hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/kernel/locking/mutex.c
....@@ -568,9 +568,16 @@
568568 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
569569 {
570570 bool ret = true;
571
+ int cnt = 0;
572
+ bool time_out = false;
571573
572574 rcu_read_lock();
573575 while (__mutex_owner(lock) == owner) {
576
+ trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
577
+ if (time_out) {
578
+ ret = false;
579
+ break;
580
+ }
574581 /*
575582 * Ensure we emit the owner->on_cpu, dereference _after_
576583 * checking lock->owner still matches owner. If that fails,
....@@ -621,6 +628,7 @@
621628 if (owner)
622629 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
623630 rcu_read_unlock();
631
+ trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
624632
625633 /*
626634 * If lock->owner is not set, the mutex has been released. Return true
....@@ -702,6 +710,7 @@
702710 if (!waiter)
703711 osq_unlock(&lock->osq);
704712
713
+ trace_android_vh_mutex_opt_spin_finish(lock, true);
705714 return true;
706715
707716
....@@ -710,6 +719,7 @@
710719 osq_unlock(&lock->osq);
711720
712721 fail:
722
+ trace_android_vh_mutex_opt_spin_finish(lock, false);
713723 /*
714724 * If we fell out of the spin path because of need_resched(),
715725 * reschedule now, before we try-lock the mutex. This avoids getting
....@@ -750,12 +760,14 @@
750760 */
751761 void __sched mutex_unlock(struct mutex *lock)
752762 {
753
- trace_android_vh_record_mutex_lock_starttime(current, 0);
754763 #ifndef CONFIG_DEBUG_LOCK_ALLOC
755
- if (__mutex_unlock_fast(lock))
764
+ if (__mutex_unlock_fast(lock)) {
765
+ trace_android_vh_record_mutex_lock_starttime(current, 0);
756766 return;
767
+ }
757768 #endif
758769 __mutex_unlock_slowpath(lock, _RET_IP_);
770
+ trace_android_vh_record_mutex_lock_starttime(current, 0);
759771 }
760772 EXPORT_SYMBOL(mutex_unlock);
761773