hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/locking/mutex.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * kernel/locking/mutex.c
34 *
....@@ -15,7 +16,7 @@
1516 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
1617 * and Sven Dietrich.
1718 *
18
- * Also see Documentation/locking/mutex-design.txt.
19
+ * Also see Documentation/locking/mutex-design.rst.
1920 */
2021 #include <linux/mutex.h>
2122 #include <linux/ww_mutex.h>
....@@ -34,6 +35,8 @@
3435 #else
3536 # include "mutex.h"
3637 #endif
38
+
39
+#include <trace/hooks/dtask.h>
3740
3841 void
3942 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
....@@ -64,10 +67,36 @@
6467
6568 #define MUTEX_FLAGS 0x07
6669
70
+/*
71
+ * Internal helper function; C doesn't allow us to hide it :/
72
+ *
73
+ * DO NOT USE (outside of mutex code).
74
+ */
75
+static inline struct task_struct *__mutex_owner(struct mutex *lock)
76
+{
77
+ return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
78
+}
79
+
6780 static inline struct task_struct *__owner_task(unsigned long owner)
6881 {
6982 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
7083 }
84
+
85
+bool mutex_is_locked(struct mutex *lock)
86
+{
87
+ return __mutex_owner(lock) != NULL;
88
+}
89
+EXPORT_SYMBOL(mutex_is_locked);
90
+
91
+__must_check enum mutex_trylock_recursive_enum
92
+mutex_trylock_recursive(struct mutex *lock)
93
+{
94
+ if (unlikely(__mutex_owner(lock) == current))
95
+ return MUTEX_TRYLOCK_RECURSIVE;
96
+
97
+ return mutex_trylock(lock);
98
+}
99
+EXPORT_SYMBOL(mutex_trylock_recursive);
71100
72101 static inline unsigned long __owner_flags(unsigned long owner)
73102 {
....@@ -141,8 +170,10 @@
141170 unsigned long curr = (unsigned long)current;
142171 unsigned long zero = 0UL;
143172
144
- if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
173
+ if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) {
174
+ trace_android_vh_record_mutex_lock_starttime(current, jiffies);
145175 return true;
176
+ }
146177
147178 return false;
148179 }
....@@ -181,9 +212,12 @@
181212 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
182213 struct list_head *list)
183214 {
215
+ bool already_on_list = false;
184216 debug_mutex_add_waiter(lock, waiter, current);
185217
186
- list_add_tail(&waiter->list, list);
218
+ trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list);
219
+ if (!already_on_list)
220
+ list_add_tail(&waiter->list, list);
187221 if (__mutex_waiter_is_first(lock, waiter))
188222 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
189223 }
....@@ -534,9 +568,16 @@
534568 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
535569 {
536570 bool ret = true;
571
+ int cnt = 0;
572
+ bool time_out = false;
537573
538574 rcu_read_lock();
539575 while (__mutex_owner(lock) == owner) {
576
+ trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
577
+ if (time_out) {
578
+ ret = false;
579
+ break;
580
+ }
540581 /*
541582 * Ensure we emit the owner->on_cpu, dereference _after_
542583 * checking lock->owner still matches owner. If that fails,
....@@ -587,6 +628,7 @@
587628 if (owner)
588629 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
589630 rcu_read_unlock();
631
+ trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
590632
591633 /*
592634 * If lock->owner is not set, the mutex has been released. Return true
....@@ -668,6 +710,7 @@
668710 if (!waiter)
669711 osq_unlock(&lock->osq);
670712
713
+ trace_android_vh_mutex_opt_spin_finish(lock, true);
671714 return true;
672715
673716
....@@ -676,6 +719,7 @@
676719 osq_unlock(&lock->osq);
677720
678721 fail:
722
+ trace_android_vh_mutex_opt_spin_finish(lock, false);
679723 /*
680724 * If we fell out of the spin path because of need_resched(),
681725 * reschedule now, before we try-lock the mutex. This avoids getting
....@@ -717,10 +761,13 @@
717761 void __sched mutex_unlock(struct mutex *lock)
718762 {
719763 #ifndef CONFIG_DEBUG_LOCK_ALLOC
720
- if (__mutex_unlock_fast(lock))
764
+ if (__mutex_unlock_fast(lock)) {
765
+ trace_android_vh_record_mutex_lock_starttime(current, 0);
721766 return;
767
+ }
722768 #endif
723769 __mutex_unlock_slowpath(lock, _RET_IP_);
770
+ trace_android_vh_record_mutex_lock_starttime(current, 0);
724771 }
725772 EXPORT_SYMBOL(mutex_unlock);
726773
....@@ -919,6 +966,10 @@
919966
920967 might_sleep();
921968
969
+#ifdef CONFIG_DEBUG_MUTEXES
970
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
971
+#endif
972
+
922973 ww = container_of(lock, struct ww_mutex, base);
923974 if (ww_ctx) {
924975 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
....@@ -942,6 +993,7 @@
942993 lock_acquired(&lock->dep_map, ip);
943994 if (ww_ctx)
944995 ww_mutex_set_context_fastpath(ww, ww_ctx);
996
+ trace_android_vh_record_mutex_lock_starttime(current, jiffies);
945997 preempt_enable();
946998 return 0;
947999 }
....@@ -983,6 +1035,7 @@
9831035
9841036 waiter.task = current;
9851037
1038
+ trace_android_vh_mutex_wait_start(lock);
9861039 set_current_state(state);
9871040 for (;;) {
9881041 bool first;
....@@ -1001,7 +1054,7 @@
10011054 * wait_lock. This ensures the lock cancellation is ordered
10021055 * against mutex_unlock() and wake-ups do not go missing.
10031056 */
1004
- if (unlikely(signal_pending_state(state, current))) {
1057
+ if (signal_pending_state(state, current)) {
10051058 ret = -EINTR;
10061059 goto err;
10071060 }
....@@ -1034,6 +1087,7 @@
10341087 spin_lock(&lock->wait_lock);
10351088 acquired:
10361089 __set_current_state(TASK_RUNNING);
1090
+ trace_android_vh_mutex_wait_finish(lock);
10371091
10381092 if (ww_ctx) {
10391093 /*
....@@ -1058,15 +1112,17 @@
10581112
10591113 spin_unlock(&lock->wait_lock);
10601114 preempt_enable();
1115
+ trace_android_vh_record_mutex_lock_starttime(current, jiffies);
10611116 return 0;
10621117
10631118 err:
10641119 __set_current_state(TASK_RUNNING);
1120
+ trace_android_vh_mutex_wait_finish(lock);
10651121 __mutex_remove_waiter(lock, &waiter);
10661122 err_early_kill:
10671123 spin_unlock(&lock->wait_lock);
10681124 debug_mutex_free_waiter(&waiter);
1069
- mutex_release(&lock->dep_map, 1, ip);
1125
+ mutex_release(&lock->dep_map, ip);
10701126 preempt_enable();
10711127 return ret;
10721128 }
....@@ -1200,7 +1256,7 @@
12001256 DEFINE_WAKE_Q(wake_q);
12011257 unsigned long owner;
12021258
1203
- mutex_release(&lock->dep_map, 1, ip);
1259
+ mutex_release(&lock->dep_map, ip);
12041260
12051261 /*
12061262 * Release the lock before (potentially) taking the spinlock such that
....@@ -1250,9 +1306,11 @@
12501306 if (owner & MUTEX_FLAG_HANDOFF)
12511307 __mutex_handoff(lock, next);
12521308
1309
+ trace_android_vh_mutex_unlock_slowpath(lock);
12531310 spin_unlock(&lock->wait_lock);
12541311
12551312 wake_up_q(&wake_q);
1313
+ trace_android_vh_mutex_unlock_slowpath_end(lock, next);
12561314 }
12571315
12581316 #ifndef CONFIG_DEBUG_LOCK_ALLOC
....@@ -1384,10 +1442,17 @@
13841442 */
13851443 int __sched mutex_trylock(struct mutex *lock)
13861444 {
1387
- bool locked = __mutex_trylock(lock);
1445
+ bool locked;
13881446
1389
- if (locked)
1447
+#ifdef CONFIG_DEBUG_MUTEXES
1448
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1449
+#endif
1450
+
1451
+ locked = __mutex_trylock(lock);
1452
+ if (locked) {
1453
+ trace_android_vh_record_mutex_lock_starttime(current, jiffies);
13901454 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1455
+ }
13911456
13921457 return locked;
13931458 }