| .. | .. | 
|---|
 | 1 | +// SPDX-License-Identifier: GPL-2.0-only  | 
|---|
| 1 | 2 |  /* | 
|---|
| 2 | 3 |   * RT-Mutexes: simple blocking mutual exclusion locks with PI support | 
|---|
| 3 | 4 |   * | 
|---|
| .. | .. | 
|---|
| 7 | 8 |   *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | 
|---|
| 8 | 9 |   *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | 
|---|
| 9 | 10 |   *  Copyright (C) 2006 Esben Nielsen | 
|---|
 | 11 | + * Adaptive Spinlocks:  | 
|---|
 | 12 | + *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,  | 
|---|
 | 13 | + *				     and Peter Morreale,  | 
|---|
 | 14 | + * Adaptive Spinlocks simplification:  | 
|---|
 | 15 | + *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>  | 
|---|
| 10 | 16 |   * | 
|---|
| 11 |  | - *  See Documentation/locking/rt-mutex-design.txt for details.  | 
|---|
 | 17 | + *  See Documentation/locking/rt-mutex-design.rst for details.  | 
|---|
| 12 | 18 |   */ | 
|---|
| 13 | 19 |  #include <linux/spinlock.h> | 
|---|
| 14 | 20 |  #include <linux/export.h> | 
|---|
| .. | .. | 
|---|
| 18 | 24 |  #include <linux/sched/wake_q.h> | 
|---|
| 19 | 25 |  #include <linux/sched/debug.h> | 
|---|
| 20 | 26 |  #include <linux/timer.h> | 
|---|
 | 27 | +#include <trace/hooks/dtask.h>  | 
|---|
 | 28 | +#include <linux/ww_mutex.h>  | 
|---|
| 21 | 29 |   | 
|---|
| 22 | 30 |  #include "rtmutex_common.h" | 
|---|
| 23 | 31 |   | 
|---|
| .. | .. | 
|---|
| 56 | 64 |  	if (rt_mutex_has_waiters(lock)) | 
|---|
| 57 | 65 |  		val |= RT_MUTEX_HAS_WAITERS; | 
|---|
| 58 | 66 |   | 
|---|
| 59 |  | -	lock->owner = (struct task_struct *)val;  | 
|---|
 | 67 | +	WRITE_ONCE(lock->owner, (struct task_struct *)val);  | 
|---|
| 60 | 68 |  } | 
|---|
| 61 | 69 |   | 
|---|
| 62 | 70 |  static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | 
|---|
| .. | .. | 
|---|
| 135 | 143 |  		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); | 
|---|
| 136 | 144 |  } | 
|---|
| 137 | 145 |   | 
|---|
 | 146 | +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)  | 
|---|
 | 147 | +{  | 
|---|
 | 148 | +	return waiter && waiter != PI_WAKEUP_INPROGRESS &&  | 
|---|
 | 149 | +		waiter != PI_REQUEUE_INPROGRESS;  | 
|---|
 | 150 | +}  | 
|---|
 | 151 | +  | 
|---|
| 138 | 152 |  /* | 
|---|
| 139 | 153 |   * We can speed up the acquire/release, if there's no debugging state to be | 
|---|
| 140 | 154 |   * set up. | 
|---|
| 141 | 155 |   */ | 
|---|
| 142 | 156 |  #ifndef CONFIG_DEBUG_RT_MUTEXES | 
|---|
| 143 |  | -# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)  | 
|---|
| 144 | 157 |  # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) | 
|---|
| 145 | 158 |  # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) | 
|---|
| 146 | 159 |   | 
|---|
| .. | .. | 
|---|
| 201 | 214 |  } | 
|---|
| 202 | 215 |   | 
|---|
| 203 | 216 |  #else | 
|---|
| 204 |  | -# define rt_mutex_cmpxchg_relaxed(l,c,n)	(0)  | 
|---|
| 205 | 217 |  # define rt_mutex_cmpxchg_acquire(l,c,n)	(0) | 
|---|
| 206 | 218 |  # define rt_mutex_cmpxchg_release(l,c,n)	(0) | 
|---|
| 207 | 219 |   | 
|---|
| .. | .. | 
|---|
| 228 | 240 |   * Only use with rt_mutex_waiter_{less,equal}() | 
|---|
| 229 | 241 |   */ | 
|---|
| 230 | 242 |  #define task_to_waiter(p)	\ | 
|---|
| 231 |  | -	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }  | 
|---|
 | 243 | +	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }  | 
|---|
| 232 | 244 |   | 
|---|
| 233 | 245 |  static inline int | 
|---|
| 234 | 246 |  rt_mutex_waiter_less(struct rt_mutex_waiter *left, | 
|---|
| .. | .. | 
|---|
| 266 | 278 |  		return left->deadline == right->deadline; | 
|---|
| 267 | 279 |   | 
|---|
| 268 | 280 |  	return 1; | 
|---|
 | 281 | +}  | 
|---|
 | 282 | +  | 
|---|
 | 283 | +#define STEAL_NORMAL  0  | 
|---|
 | 284 | +#define STEAL_LATERAL 1  | 
|---|
 | 285 | +  | 
|---|
 | 286 | +static inline int  | 
|---|
 | 287 | +rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)  | 
|---|
 | 288 | +{  | 
|---|
 | 289 | +	struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);  | 
|---|
 | 290 | +  | 
|---|
 | 291 | +	if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))  | 
|---|
 | 292 | +		return 1;  | 
|---|
 | 293 | +  | 
|---|
 | 294 | +	/*  | 
|---|
 | 295 | +	 * Note that RT tasks are excluded from lateral-steals  | 
|---|
 | 296 | +	 * to prevent the introduction of an unbounded latency.  | 
|---|
 | 297 | +	 */  | 
|---|
 | 298 | +	if (mode == STEAL_NORMAL || rt_task(waiter->task))  | 
|---|
 | 299 | +		return 0;  | 
|---|
 | 300 | +  | 
|---|
 | 301 | +	return rt_mutex_waiter_equal(waiter, top_waiter);  | 
|---|
| 269 | 302 |  } | 
|---|
| 270 | 303 |   | 
|---|
| 271 | 304 |  static void | 
|---|
| .. | .. | 
|---|
| 372 | 405 |  	return debug_rt_mutex_detect_deadlock(waiter, chwalk); | 
|---|
| 373 | 406 |  } | 
|---|
| 374 | 407 |   | 
|---|
 | 408 | +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)  | 
|---|
 | 409 | +{  | 
|---|
 | 410 | +	if (waiter->savestate)  | 
|---|
 | 411 | +		wake_up_lock_sleeper(waiter->task);  | 
|---|
 | 412 | +	else  | 
|---|
 | 413 | +		wake_up_process(waiter->task);  | 
|---|
 | 414 | +}  | 
|---|
 | 415 | +  | 
|---|
| 375 | 416 |  /* | 
|---|
| 376 | 417 |   * Max number of times we'll walk the boosting chain: | 
|---|
| 377 | 418 |   */ | 
|---|
| .. | .. | 
|---|
| 379 | 420 |   | 
|---|
| 380 | 421 |  static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) | 
|---|
| 381 | 422 |  { | 
|---|
| 382 |  | -	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;  | 
|---|
 | 423 | +	return rt_mutex_real_waiter(p->pi_blocked_on) ?  | 
|---|
 | 424 | +		p->pi_blocked_on->lock : NULL;  | 
|---|
| 383 | 425 |  } | 
|---|
| 384 | 426 |   | 
|---|
| 385 | 427 |  /* | 
|---|
| .. | .. | 
|---|
| 515 | 557 |  	 * reached or the state of the chain has changed while we | 
|---|
| 516 | 558 |  	 * dropped the locks. | 
|---|
| 517 | 559 |  	 */ | 
|---|
| 518 |  | -	if (!waiter)  | 
|---|
 | 560 | +	if (!rt_mutex_real_waiter(waiter))  | 
|---|
| 519 | 561 |  		goto out_unlock_pi; | 
|---|
| 520 | 562 |   | 
|---|
| 521 | 563 |  	/* | 
|---|
| .. | .. | 
|---|
| 598 | 640 |  	 * walk, we detected a deadlock. | 
|---|
| 599 | 641 |  	 */ | 
|---|
| 600 | 642 |  	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 
|---|
| 601 |  | -		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);  | 
|---|
| 602 | 643 |  		raw_spin_unlock(&lock->wait_lock); | 
|---|
| 603 | 644 |  		ret = -EDEADLK; | 
|---|
| 604 | 645 |  		goto out_unlock_pi; | 
|---|
| .. | .. | 
|---|
| 627 | 668 |  		} | 
|---|
| 628 | 669 |   | 
|---|
| 629 | 670 |  		/* [10] Grab the next task, i.e. owner of @lock */ | 
|---|
| 630 |  | -		task = rt_mutex_owner(lock);  | 
|---|
| 631 |  | -		get_task_struct(task);  | 
|---|
 | 671 | +		task = get_task_struct(rt_mutex_owner(lock));  | 
|---|
| 632 | 672 |  		raw_spin_lock(&task->pi_lock); | 
|---|
| 633 | 673 |   | 
|---|
| 634 | 674 |  		/* | 
|---|
| .. | .. | 
|---|
| 696 | 736 |  	 * follow here. This is the end of the chain we are walking. | 
|---|
| 697 | 737 |  	 */ | 
|---|
| 698 | 738 |  	if (!rt_mutex_owner(lock)) { | 
|---|
 | 739 | +		struct rt_mutex_waiter *lock_top_waiter;  | 
|---|
 | 740 | +  | 
|---|
| 699 | 741 |  		/* | 
|---|
| 700 | 742 |  		 * If the requeue [7] above changed the top waiter, | 
|---|
| 701 | 743 |  		 * then we need to wake the new top waiter up to try | 
|---|
| 702 | 744 |  		 * to get the lock. | 
|---|
| 703 | 745 |  		 */ | 
|---|
| 704 |  | -		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))  | 
|---|
| 705 |  | -			wake_up_process(rt_mutex_top_waiter(lock)->task);  | 
|---|
 | 746 | +		lock_top_waiter = rt_mutex_top_waiter(lock);  | 
|---|
 | 747 | +		if (prerequeue_top_waiter != lock_top_waiter)  | 
|---|
 | 748 | +			rt_mutex_wake_waiter(lock_top_waiter);  | 
|---|
| 706 | 749 |  		raw_spin_unlock_irq(&lock->wait_lock); | 
|---|
| 707 | 750 |  		return 0; | 
|---|
| 708 | 751 |  	} | 
|---|
| 709 | 752 |   | 
|---|
| 710 | 753 |  	/* [10] Grab the next task, i.e. the owner of @lock */ | 
|---|
| 711 |  | -	task = rt_mutex_owner(lock);  | 
|---|
| 712 |  | -	get_task_struct(task);  | 
|---|
 | 754 | +	task = get_task_struct(rt_mutex_owner(lock));  | 
|---|
| 713 | 755 |  	raw_spin_lock(&task->pi_lock); | 
|---|
| 714 | 756 |   | 
|---|
| 715 | 757 |  	/* [11] requeue the pi waiters if necessary */ | 
|---|
| .. | .. | 
|---|
| 804 | 846 |   * @task:   The task which wants to acquire the lock | 
|---|
| 805 | 847 |   * @waiter: The waiter that is queued to the lock's wait tree if the | 
|---|
| 806 | 848 |   *	    callsite called task_blocked_on_lock(), otherwise NULL | 
|---|
 | 849 | + * @mode:   Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)  | 
|---|
| 807 | 850 |   */ | 
|---|
| 808 |  | -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,  | 
|---|
| 809 |  | -				struct rt_mutex_waiter *waiter)  | 
|---|
 | 851 | +static int __try_to_take_rt_mutex(struct rt_mutex *lock,  | 
|---|
 | 852 | +				  struct task_struct *task,  | 
|---|
 | 853 | +				  struct rt_mutex_waiter *waiter, int mode)  | 
|---|
| 810 | 854 |  { | 
|---|
| 811 | 855 |  	lockdep_assert_held(&lock->wait_lock); | 
|---|
| 812 | 856 |   | 
|---|
| .. | .. | 
|---|
| 842 | 886 |  	 */ | 
|---|
| 843 | 887 |  	if (waiter) { | 
|---|
| 844 | 888 |  		/* | 
|---|
| 845 |  | -		 * If waiter is not the highest priority waiter of  | 
|---|
| 846 |  | -		 * @lock, give up.  | 
|---|
 | 889 | +		 * If waiter is not the highest priority waiter of @lock,  | 
|---|
 | 890 | +		 * or its peer when lateral steal is allowed, give up.  | 
|---|
| 847 | 891 |  		 */ | 
|---|
| 848 |  | -		if (waiter != rt_mutex_top_waiter(lock))  | 
|---|
 | 892 | +		if (!rt_mutex_steal(lock, waiter, mode))  | 
|---|
| 849 | 893 |  			return 0; | 
|---|
| 850 |  | -  | 
|---|
| 851 | 894 |  		/* | 
|---|
| 852 | 895 |  		 * We can acquire the lock. Remove the waiter from the | 
|---|
| 853 | 896 |  		 * lock waiters tree. | 
|---|
| .. | .. | 
|---|
| 865 | 908 |  		 */ | 
|---|
| 866 | 909 |  		if (rt_mutex_has_waiters(lock)) { | 
|---|
| 867 | 910 |  			/* | 
|---|
| 868 |  | -			 * If @task->prio is greater than or equal to  | 
|---|
| 869 |  | -			 * the top waiter priority (kernel view),  | 
|---|
| 870 |  | -			 * @task lost.  | 
|---|
 | 911 | +			 * If @task->prio is greater than the top waiter  | 
|---|
 | 912 | +			 * priority (kernel view), or equal to it when a  | 
|---|
 | 913 | +			 * lateral steal is forbidden, @task lost.  | 
|---|
| 871 | 914 |  			 */ | 
|---|
| 872 |  | -			if (!rt_mutex_waiter_less(task_to_waiter(task),  | 
|---|
| 873 |  | -						  rt_mutex_top_waiter(lock)))  | 
|---|
 | 915 | +			if (!rt_mutex_steal(lock, task_to_waiter(task), mode))  | 
|---|
| 874 | 916 |  				return 0; | 
|---|
| 875 |  | -  | 
|---|
| 876 | 917 |  			/* | 
|---|
| 877 | 918 |  			 * The current top waiter stays enqueued. We | 
|---|
| 878 | 919 |  			 * don't have to change anything in the lock | 
|---|
| .. | .. | 
|---|
| 919 | 960 |  	return 1; | 
|---|
| 920 | 961 |  } | 
|---|
| 921 | 962 |   | 
|---|
 | 963 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 964 | +/*  | 
|---|
 | 965 | + * preemptible spin_lock functions:  | 
|---|
 | 966 | + */  | 
|---|
 | 967 | +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,  | 
|---|
 | 968 | +					 void  (*slowfn)(struct rt_mutex *lock))  | 
|---|
 | 969 | +{  | 
|---|
 | 970 | +	might_sleep_no_state_check();  | 
|---|
 | 971 | +  | 
|---|
 | 972 | +	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))  | 
|---|
 | 973 | +		return;  | 
|---|
 | 974 | +	else  | 
|---|
 | 975 | +		slowfn(lock);  | 
|---|
 | 976 | +}  | 
|---|
 | 977 | +  | 
|---|
 | 978 | +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,  | 
|---|
 | 979 | +					   void  (*slowfn)(struct rt_mutex *lock))  | 
|---|
 | 980 | +{  | 
|---|
 | 981 | +	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))  | 
|---|
 | 982 | +		return;  | 
|---|
 | 983 | +	else  | 
|---|
 | 984 | +		slowfn(lock);  | 
|---|
 | 985 | +}  | 
|---|
 | 986 | +#ifdef CONFIG_SMP  | 
|---|
 | 987 | +/*  | 
|---|
 | 988 | + * Note that owner is a speculative pointer and dereferencing relies  | 
|---|
 | 989 | + * on rcu_read_lock() and the check against the lock owner.  | 
|---|
 | 990 | + */  | 
|---|
 | 991 | +static int adaptive_wait(struct rt_mutex *lock,  | 
|---|
 | 992 | +			 struct task_struct *owner)  | 
|---|
 | 993 | +{  | 
|---|
 | 994 | +	int res = 0;  | 
|---|
 | 995 | +  | 
|---|
 | 996 | +	rcu_read_lock();  | 
|---|
 | 997 | +	for (;;) {  | 
|---|
 | 998 | +		if (owner != rt_mutex_owner(lock))  | 
|---|
 | 999 | +			break;  | 
|---|
 | 1000 | +		/*  | 
|---|
 | 1001 | +		 * Ensure that owner->on_cpu is dereferenced _after_  | 
|---|
 | 1002 | +		 * checking the above to be valid.  | 
|---|
 | 1003 | +		 */  | 
|---|
 | 1004 | +		barrier();  | 
|---|
 | 1005 | +		if (!owner->on_cpu) {  | 
|---|
 | 1006 | +			res = 1;  | 
|---|
 | 1007 | +			break;  | 
|---|
 | 1008 | +		}  | 
|---|
 | 1009 | +		cpu_relax();  | 
|---|
 | 1010 | +	}  | 
|---|
 | 1011 | +	rcu_read_unlock();  | 
|---|
 | 1012 | +	return res;  | 
|---|
 | 1013 | +}  | 
|---|
 | 1014 | +#else  | 
|---|
 | 1015 | +static int adaptive_wait(struct rt_mutex *lock,  | 
|---|
 | 1016 | +			 struct task_struct *orig_owner)  | 
|---|
 | 1017 | +{  | 
|---|
 | 1018 | +	return 1;  | 
|---|
 | 1019 | +}  | 
|---|
 | 1020 | +#endif  | 
|---|
 | 1021 | +  | 
|---|
 | 1022 | +static int task_blocks_on_rt_mutex(struct rt_mutex *lock,  | 
|---|
 | 1023 | +				   struct rt_mutex_waiter *waiter,  | 
|---|
 | 1024 | +				   struct task_struct *task,  | 
|---|
 | 1025 | +				   enum rtmutex_chainwalk chwalk);  | 
|---|
 | 1026 | +/*  | 
|---|
 | 1027 | + * Slow path lock function spin_lock style: this variant is very  | 
|---|
 | 1028 | + * careful not to miss any non-lock wakeups.  | 
|---|
 | 1029 | + *  | 
|---|
 | 1030 | + * We store the current state under p->pi_lock in p->saved_state and  | 
|---|
 | 1031 | + * the try_to_wake_up() code handles this accordingly.  | 
|---|
 | 1032 | + */  | 
|---|
 | 1033 | +void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,  | 
|---|
 | 1034 | +					  struct rt_mutex_waiter *waiter,  | 
|---|
 | 1035 | +					  unsigned long flags)  | 
|---|
 | 1036 | +{  | 
|---|
 | 1037 | +	struct task_struct *lock_owner, *self = current;  | 
|---|
 | 1038 | +	struct rt_mutex_waiter *top_waiter;  | 
|---|
 | 1039 | +	int ret;  | 
|---|
 | 1040 | +  | 
|---|
 | 1041 | +	if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))  | 
|---|
 | 1042 | +		return;  | 
|---|
 | 1043 | +  | 
|---|
 | 1044 | +	BUG_ON(rt_mutex_owner(lock) == self);  | 
|---|
 | 1045 | +  | 
|---|
 | 1046 | +	/*  | 
|---|
 | 1047 | +	 * We save whatever state the task is in and we'll restore it  | 
|---|
 | 1048 | +	 * after acquiring the lock taking real wakeups into account  | 
|---|
 | 1049 | +	 * as well. We are serialized via pi_lock against wakeups. See  | 
|---|
 | 1050 | +	 * try_to_wake_up().  | 
|---|
 | 1051 | +	 */  | 
|---|
 | 1052 | +	raw_spin_lock(&self->pi_lock);  | 
|---|
 | 1053 | +	self->saved_state = self->state;  | 
|---|
 | 1054 | +	__set_current_state_no_track(TASK_UNINTERRUPTIBLE);  | 
|---|
 | 1055 | +	raw_spin_unlock(&self->pi_lock);  | 
|---|
 | 1056 | +  | 
|---|
 | 1057 | +	ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);  | 
|---|
 | 1058 | +	BUG_ON(ret);  | 
|---|
 | 1059 | +  | 
|---|
 | 1060 | +	for (;;) {  | 
|---|
 | 1061 | +		/* Try to acquire the lock again. */  | 
|---|
 | 1062 | +		if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))  | 
|---|
 | 1063 | +			break;  | 
|---|
 | 1064 | +  | 
|---|
 | 1065 | +		top_waiter = rt_mutex_top_waiter(lock);  | 
|---|
 | 1066 | +		lock_owner = rt_mutex_owner(lock);  | 
|---|
 | 1067 | +  | 
|---|
 | 1068 | +		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);  | 
|---|
 | 1069 | +  | 
|---|
 | 1070 | +		if (top_waiter != waiter || adaptive_wait(lock, lock_owner))  | 
|---|
 | 1071 | +			preempt_schedule_lock();  | 
|---|
 | 1072 | +  | 
|---|
 | 1073 | +		raw_spin_lock_irqsave(&lock->wait_lock, flags);  | 
|---|
 | 1074 | +  | 
|---|
 | 1075 | +		raw_spin_lock(&self->pi_lock);  | 
|---|
 | 1076 | +		__set_current_state_no_track(TASK_UNINTERRUPTIBLE);  | 
|---|
 | 1077 | +		raw_spin_unlock(&self->pi_lock);  | 
|---|
 | 1078 | +	}  | 
|---|
 | 1079 | +  | 
|---|
 | 1080 | +	/*  | 
|---|
 | 1081 | +	 * Restore the task state to current->saved_state. We set it  | 
|---|
 | 1082 | +	 * to the original state above and the try_to_wake_up() code  | 
|---|
 | 1083 | +	 * has possibly updated it when a real (non-rtmutex) wakeup  | 
|---|
 | 1084 | +	 * happened while we were blocked. Clear saved_state so  | 
|---|
 | 1085 | +	 * try_to_wakeup() does not get confused.  | 
|---|
 | 1086 | +	 */  | 
|---|
 | 1087 | +	raw_spin_lock(&self->pi_lock);  | 
|---|
 | 1088 | +	__set_current_state_no_track(self->saved_state);  | 
|---|
 | 1089 | +	self->saved_state = TASK_RUNNING;  | 
|---|
 | 1090 | +	raw_spin_unlock(&self->pi_lock);  | 
|---|
 | 1091 | +  | 
|---|
 | 1092 | +	/*  | 
|---|
 | 1093 | +	 * try_to_take_rt_mutex() sets the waiter bit  | 
|---|
 | 1094 | +	 * unconditionally. We might have to fix that up:  | 
|---|
 | 1095 | +	 */  | 
|---|
 | 1096 | +	fixup_rt_mutex_waiters(lock);  | 
|---|
 | 1097 | +  | 
|---|
 | 1098 | +	BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));  | 
|---|
 | 1099 | +	BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));  | 
|---|
 | 1100 | +}  | 
|---|
 | 1101 | +  | 
|---|
 | 1102 | +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)  | 
|---|
 | 1103 | +{  | 
|---|
 | 1104 | +	struct rt_mutex_waiter waiter;  | 
|---|
 | 1105 | +	unsigned long flags;  | 
|---|
 | 1106 | +  | 
|---|
 | 1107 | +	rt_mutex_init_waiter(&waiter, true);  | 
|---|
 | 1108 | +  | 
|---|
 | 1109 | +	raw_spin_lock_irqsave(&lock->wait_lock, flags);  | 
|---|
 | 1110 | +	rt_spin_lock_slowlock_locked(lock, &waiter, flags);  | 
|---|
 | 1111 | +	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);  | 
|---|
 | 1112 | +	debug_rt_mutex_free_waiter(&waiter);  | 
|---|
 | 1113 | +}  | 
|---|
 | 1114 | +  | 
|---|
 | 1115 | +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,  | 
|---|
 | 1116 | +					     struct wake_q_head *wake_q,  | 
|---|
 | 1117 | +					     struct wake_q_head *wq_sleeper);  | 
|---|
 | 1118 | +/*  | 
|---|
 | 1119 | + * Slow path to release a rt_mutex spin_lock style  | 
|---|
 | 1120 | + */  | 
|---|
 | 1121 | +void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)  | 
|---|
 | 1122 | +{  | 
|---|
 | 1123 | +	unsigned long flags;  | 
|---|
 | 1124 | +	DEFINE_WAKE_Q(wake_q);  | 
|---|
 | 1125 | +	DEFINE_WAKE_Q(wake_sleeper_q);  | 
|---|
 | 1126 | +	bool postunlock;  | 
|---|
 | 1127 | +  | 
|---|
 | 1128 | +	raw_spin_lock_irqsave(&lock->wait_lock, flags);  | 
|---|
 | 1129 | +	postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);  | 
|---|
 | 1130 | +	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);  | 
|---|
 | 1131 | +  | 
|---|
 | 1132 | +	if (postunlock)  | 
|---|
 | 1133 | +		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);  | 
|---|
 | 1134 | +}  | 
|---|
 | 1135 | +  | 
|---|
 | 1136 | +void __lockfunc rt_spin_lock(spinlock_t *lock)  | 
|---|
 | 1137 | +{  | 
|---|
 | 1138 | +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);  | 
|---|
 | 1139 | +	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);  | 
|---|
 | 1140 | +	rcu_read_lock();  | 
|---|
 | 1141 | +	migrate_disable();  | 
|---|
 | 1142 | +}  | 
|---|
 | 1143 | +EXPORT_SYMBOL(rt_spin_lock);  | 
|---|
 | 1144 | +  | 
|---|
 | 1145 | +void __lockfunc __rt_spin_lock(struct rt_mutex *lock)  | 
|---|
 | 1146 | +{  | 
|---|
 | 1147 | +	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);  | 
|---|
 | 1148 | +}  | 
|---|
 | 1149 | +  | 
|---|
 | 1150 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC  | 
|---|
 | 1151 | +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)  | 
|---|
 | 1152 | +{  | 
|---|
 | 1153 | +	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);  | 
|---|
 | 1154 | +	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);  | 
|---|
 | 1155 | +	rcu_read_lock();  | 
|---|
 | 1156 | +	migrate_disable();  | 
|---|
 | 1157 | +}  | 
|---|
 | 1158 | +EXPORT_SYMBOL(rt_spin_lock_nested);  | 
|---|
 | 1159 | +  | 
|---|
 | 1160 | +void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,  | 
|---|
 | 1161 | +				       struct lockdep_map *nest_lock)  | 
|---|
 | 1162 | +{  | 
|---|
 | 1163 | +	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);  | 
|---|
 | 1164 | +	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);  | 
|---|
 | 1165 | +	rcu_read_lock();  | 
|---|
 | 1166 | +	migrate_disable();  | 
|---|
 | 1167 | +}  | 
|---|
 | 1168 | +EXPORT_SYMBOL(rt_spin_lock_nest_lock);  | 
|---|
 | 1169 | +#endif  | 
|---|
 | 1170 | +  | 
|---|
 | 1171 | +void __lockfunc rt_spin_unlock(spinlock_t *lock)  | 
|---|
 | 1172 | +{  | 
|---|
 | 1173 | +	/* NOTE: we always pass in '1' for nested, for simplicity */  | 
|---|
 | 1174 | +	spin_release(&lock->dep_map, _RET_IP_);  | 
|---|
 | 1175 | +	migrate_enable();  | 
|---|
 | 1176 | +	rcu_read_unlock();  | 
|---|
 | 1177 | +	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);  | 
|---|
 | 1178 | +}  | 
|---|
 | 1179 | +EXPORT_SYMBOL(rt_spin_unlock);  | 
|---|
 | 1180 | +  | 
|---|
 | 1181 | +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)  | 
|---|
 | 1182 | +{  | 
|---|
 | 1183 | +	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);  | 
|---|
 | 1184 | +}  | 
|---|
 | 1185 | +EXPORT_SYMBOL(__rt_spin_unlock);  | 
|---|
 | 1186 | +  | 
|---|
 | 1187 | +/*  | 
|---|
 | 1188 | + * Wait for the lock to get unlocked: instead of polling for an unlock  | 
|---|
 | 1189 | + * (like raw spinlocks do), we lock and unlock, to force the kernel to  | 
|---|
 | 1190 | + * schedule if there's contention:  | 
|---|
 | 1191 | + */  | 
|---|
 | 1192 | +void __lockfunc rt_spin_lock_unlock(spinlock_t *lock)  | 
|---|
 | 1193 | +{  | 
|---|
 | 1194 | +	spin_lock(lock);  | 
|---|
 | 1195 | +	spin_unlock(lock);  | 
|---|
 | 1196 | +}  | 
|---|
 | 1197 | +EXPORT_SYMBOL(rt_spin_lock_unlock);  | 
|---|
 | 1198 | +  | 
|---|
 | 1199 | +int __lockfunc rt_spin_trylock(spinlock_t *lock)  | 
|---|
 | 1200 | +{  | 
|---|
 | 1201 | +	int ret;  | 
|---|
 | 1202 | +  | 
|---|
 | 1203 | +	ret = __rt_mutex_trylock(&lock->lock);  | 
|---|
 | 1204 | +	if (ret) {  | 
|---|
 | 1205 | +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);  | 
|---|
 | 1206 | +		rcu_read_lock();  | 
|---|
 | 1207 | +		migrate_disable();  | 
|---|
 | 1208 | +	}  | 
|---|
 | 1209 | +	return ret;  | 
|---|
 | 1210 | +}  | 
|---|
 | 1211 | +EXPORT_SYMBOL(rt_spin_trylock);  | 
|---|
 | 1212 | +  | 
|---|
 | 1213 | +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)  | 
|---|
 | 1214 | +{  | 
|---|
 | 1215 | +	int ret;  | 
|---|
 | 1216 | +  | 
|---|
 | 1217 | +	local_bh_disable();  | 
|---|
 | 1218 | +	ret = __rt_mutex_trylock(&lock->lock);  | 
|---|
 | 1219 | +	if (ret) {  | 
|---|
 | 1220 | +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);  | 
|---|
 | 1221 | +		rcu_read_lock();  | 
|---|
 | 1222 | +		migrate_disable();  | 
|---|
 | 1223 | +	} else {  | 
|---|
 | 1224 | +		local_bh_enable();  | 
|---|
 | 1225 | +	}  | 
|---|
 | 1226 | +	return ret;  | 
|---|
 | 1227 | +}  | 
|---|
 | 1228 | +EXPORT_SYMBOL(rt_spin_trylock_bh);  | 
|---|
 | 1229 | +  | 
|---|
 | 1230 | +void  | 
|---|
 | 1231 | +__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)  | 
|---|
 | 1232 | +{  | 
|---|
 | 1233 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC  | 
|---|
 | 1234 | +	/*  | 
|---|
 | 1235 | +	 * Make sure we are not reinitializing a held lock:  | 
|---|
 | 1236 | +	 */  | 
|---|
 | 1237 | +	debug_check_no_locks_freed((void *)lock, sizeof(*lock));  | 
|---|
 | 1238 | +	lockdep_init_map(&lock->dep_map, name, key, 0);  | 
|---|
 | 1239 | +#endif  | 
|---|
 | 1240 | +}  | 
|---|
 | 1241 | +EXPORT_SYMBOL(__rt_spin_lock_init);  | 
|---|
 | 1242 | +  | 
|---|
 | 1243 | +#endif /* PREEMPT_RT */  | 
|---|
 | 1244 | +  | 
|---|
 | 1245 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 1246 | +	static inline int __sched  | 
|---|
 | 1247 | +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)  | 
|---|
 | 1248 | +{  | 
|---|
 | 1249 | +	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);  | 
|---|
 | 1250 | +	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);  | 
|---|
 | 1251 | +  | 
|---|
 | 1252 | +	if (!hold_ctx)  | 
|---|
 | 1253 | +		return 0;  | 
|---|
 | 1254 | +  | 
|---|
 | 1255 | +	if (unlikely(ctx == hold_ctx))  | 
|---|
 | 1256 | +		return -EALREADY;  | 
|---|
 | 1257 | +  | 
|---|
 | 1258 | +	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&  | 
|---|
 | 1259 | +	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {  | 
|---|
 | 1260 | +#ifdef CONFIG_DEBUG_MUTEXES  | 
|---|
 | 1261 | +		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);  | 
|---|
 | 1262 | +		ctx->contending_lock = ww;  | 
|---|
 | 1263 | +#endif  | 
|---|
 | 1264 | +		return -EDEADLK;  | 
|---|
 | 1265 | +	}  | 
|---|
 | 1266 | +  | 
|---|
 | 1267 | +	return 0;  | 
|---|
 | 1268 | +}  | 
|---|
 | 1269 | +#else  | 
|---|
 | 1270 | +	static inline int __sched  | 
|---|
 | 1271 | +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)  | 
|---|
 | 1272 | +{  | 
|---|
 | 1273 | +	BUG();  | 
|---|
 | 1274 | +	return 0;  | 
|---|
 | 1275 | +}  | 
|---|
 | 1276 | +  | 
|---|
 | 1277 | +#endif  | 
|---|
 | 1278 | +  | 
|---|
 | 1279 | +static inline int  | 
|---|
 | 1280 | +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,  | 
|---|
 | 1281 | +		     struct rt_mutex_waiter *waiter)  | 
|---|
 | 1282 | +{  | 
|---|
 | 1283 | +	return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);  | 
|---|
 | 1284 | +}  | 
|---|
 | 1285 | +  | 
|---|
| 922 | 1286 |  /* | 
|---|
| 923 | 1287 |   * Task blocks on lock. | 
|---|
| 924 | 1288 |   * | 
|---|
| .. | .. | 
|---|
| 951 | 1315 |  		return -EDEADLK; | 
|---|
| 952 | 1316 |   | 
|---|
| 953 | 1317 |  	raw_spin_lock(&task->pi_lock); | 
|---|
 | 1318 | +	/*  | 
|---|
 | 1319 | +	 * In the case of futex requeue PI, this will be a proxy  | 
|---|
 | 1320 | +	 * lock. The task will wake unaware that it is enqueueed on  | 
|---|
 | 1321 | +	 * this lock. Avoid blocking on two locks and corrupting  | 
|---|
 | 1322 | +	 * pi_blocked_on via the PI_WAKEUP_INPROGRESS  | 
|---|
 | 1323 | +	 * flag. futex_wait_requeue_pi() sets this when it wakes up  | 
|---|
 | 1324 | +	 * before requeue (due to a signal or timeout). Do not enqueue  | 
|---|
 | 1325 | +	 * the task if PI_WAKEUP_INPROGRESS is set.  | 
|---|
 | 1326 | +	 */  | 
|---|
 | 1327 | +	if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {  | 
|---|
 | 1328 | +		raw_spin_unlock(&task->pi_lock);  | 
|---|
 | 1329 | +		return -EAGAIN;  | 
|---|
 | 1330 | +	}  | 
|---|
 | 1331 | +  | 
|---|
 | 1332 | +       BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));  | 
|---|
 | 1333 | +  | 
|---|
| 954 | 1334 |  	waiter->task = task; | 
|---|
| 955 | 1335 |  	waiter->lock = lock; | 
|---|
| 956 | 1336 |  	waiter->prio = task->prio; | 
|---|
| .. | .. | 
|---|
| 974 | 1354 |  		rt_mutex_enqueue_pi(owner, waiter); | 
|---|
| 975 | 1355 |   | 
|---|
| 976 | 1356 |  		rt_mutex_adjust_prio(owner); | 
|---|
| 977 |  | -		if (owner->pi_blocked_on)  | 
|---|
 | 1357 | +		if (rt_mutex_real_waiter(owner->pi_blocked_on))  | 
|---|
| 978 | 1358 |  			chain_walk = 1; | 
|---|
| 979 | 1359 |  	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { | 
|---|
| 980 | 1360 |  		chain_walk = 1; | 
|---|
| .. | .. | 
|---|
| 1016 | 1396 |   * Called with lock->wait_lock held and interrupts disabled. | 
|---|
| 1017 | 1397 |   */ | 
|---|
| 1018 | 1398 |  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, | 
|---|
 | 1399 | +				    struct wake_q_head *wake_sleeper_q,  | 
|---|
| 1019 | 1400 |  				    struct rt_mutex *lock) | 
|---|
| 1020 | 1401 |  { | 
|---|
| 1021 | 1402 |  	struct rt_mutex_waiter *waiter; | 
|---|
| .. | .. | 
|---|
| 1055 | 1436 |  	 * Pairs with preempt_enable() in rt_mutex_postunlock(); | 
|---|
| 1056 | 1437 |  	 */ | 
|---|
| 1057 | 1438 |  	preempt_disable(); | 
|---|
| 1058 |  | -	wake_q_add(wake_q, waiter->task);  | 
|---|
 | 1439 | +	if (waiter->savestate)  | 
|---|
 | 1440 | +		wake_q_add_sleeper(wake_sleeper_q, waiter->task);  | 
|---|
 | 1441 | +	else  | 
|---|
 | 1442 | +		wake_q_add(wake_q, waiter->task);  | 
|---|
| 1059 | 1443 |  	raw_spin_unlock(¤t->pi_lock); | 
|---|
| 1060 | 1444 |  } | 
|---|
| 1061 | 1445 |   | 
|---|
| .. | .. | 
|---|
| 1070 | 1454 |  { | 
|---|
| 1071 | 1455 |  	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); | 
|---|
| 1072 | 1456 |  	struct task_struct *owner = rt_mutex_owner(lock); | 
|---|
| 1073 |  | -	struct rt_mutex *next_lock;  | 
|---|
 | 1457 | +	struct rt_mutex *next_lock = NULL;  | 
|---|
| 1074 | 1458 |   | 
|---|
| 1075 | 1459 |  	lockdep_assert_held(&lock->wait_lock); | 
|---|
| 1076 | 1460 |   | 
|---|
| .. | .. | 
|---|
| 1096 | 1480 |  	rt_mutex_adjust_prio(owner); | 
|---|
| 1097 | 1481 |   | 
|---|
| 1098 | 1482 |  	/* Store the lock on which owner is blocked or NULL */ | 
|---|
| 1099 |  | -	next_lock = task_blocked_on_lock(owner);  | 
|---|
 | 1483 | +	if (rt_mutex_real_waiter(owner->pi_blocked_on))  | 
|---|
 | 1484 | +		next_lock = task_blocked_on_lock(owner);  | 
|---|
| 1100 | 1485 |   | 
|---|
| 1101 | 1486 |  	raw_spin_unlock(&owner->pi_lock); | 
|---|
| 1102 | 1487 |   | 
|---|
| .. | .. | 
|---|
| 1132 | 1517 |  	raw_spin_lock_irqsave(&task->pi_lock, flags); | 
|---|
| 1133 | 1518 |   | 
|---|
| 1134 | 1519 |  	waiter = task->pi_blocked_on; | 
|---|
| 1135 |  | -	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {  | 
|---|
 | 1520 | +	if (!rt_mutex_real_waiter(waiter) ||  | 
|---|
 | 1521 | +	    rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {  | 
|---|
| 1136 | 1522 |  		raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 
|---|
| 1137 | 1523 |  		return; | 
|---|
| 1138 | 1524 |  	} | 
|---|
| 1139 | 1525 |  	next_lock = waiter->lock; | 
|---|
| 1140 |  | -	raw_spin_unlock_irqrestore(&task->pi_lock, flags);  | 
|---|
| 1141 | 1526 |   | 
|---|
| 1142 | 1527 |  	/* gets dropped in rt_mutex_adjust_prio_chain()! */ | 
|---|
| 1143 | 1528 |  	get_task_struct(task); | 
|---|
| 1144 | 1529 |   | 
|---|
 | 1530 | +	raw_spin_unlock_irqrestore(&task->pi_lock, flags);  | 
|---|
| 1145 | 1531 |  	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, | 
|---|
| 1146 | 1532 |  				   next_lock, NULL, task); | 
|---|
| 1147 | 1533 |  } | 
|---|
| 1148 | 1534 |   | 
|---|
| 1149 |  | -void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)  | 
|---|
 | 1535 | +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)  | 
|---|
| 1150 | 1536 |  { | 
|---|
| 1151 | 1537 |  	debug_rt_mutex_init_waiter(waiter); | 
|---|
| 1152 | 1538 |  	RB_CLEAR_NODE(&waiter->pi_tree_entry); | 
|---|
| 1153 | 1539 |  	RB_CLEAR_NODE(&waiter->tree_entry); | 
|---|
| 1154 | 1540 |  	waiter->task = NULL; | 
|---|
 | 1541 | +	waiter->savestate = savestate;  | 
|---|
| 1155 | 1542 |  } | 
|---|
| 1156 | 1543 |   | 
|---|
| 1157 | 1544 |  /** | 
|---|
| .. | .. | 
|---|
| 1167 | 1554 |  static int __sched | 
|---|
| 1168 | 1555 |  __rt_mutex_slowlock(struct rt_mutex *lock, int state, | 
|---|
| 1169 | 1556 |  		    struct hrtimer_sleeper *timeout, | 
|---|
| 1170 |  | -		    struct rt_mutex_waiter *waiter)  | 
|---|
 | 1557 | +		    struct rt_mutex_waiter *waiter,  | 
|---|
 | 1558 | +		    struct ww_acquire_ctx *ww_ctx)  | 
|---|
| 1171 | 1559 |  { | 
|---|
| 1172 | 1560 |  	int ret = 0; | 
|---|
| 1173 | 1561 |   | 
|---|
 | 1562 | +	trace_android_vh_rtmutex_wait_start(lock);  | 
|---|
| 1174 | 1563 |  	for (;;) { | 
|---|
| 1175 | 1564 |  		/* Try to acquire the lock: */ | 
|---|
| 1176 | 1565 |  		if (try_to_take_rt_mutex(lock, current, waiter)) | 
|---|
| 1177 | 1566 |  			break; | 
|---|
| 1178 | 1567 |   | 
|---|
| 1179 |  | -		/*  | 
|---|
| 1180 |  | -		 * TASK_INTERRUPTIBLE checks for signals and  | 
|---|
| 1181 |  | -		 * timeout. Ignored otherwise.  | 
|---|
| 1182 |  | -		 */  | 
|---|
| 1183 |  | -		if (likely(state == TASK_INTERRUPTIBLE)) {  | 
|---|
| 1184 |  | -			/* Signal pending? */  | 
|---|
| 1185 |  | -			if (signal_pending(current))  | 
|---|
| 1186 |  | -				ret = -EINTR;  | 
|---|
| 1187 |  | -			if (timeout && !timeout->task)  | 
|---|
| 1188 |  | -				ret = -ETIMEDOUT;  | 
|---|
 | 1568 | +		if (timeout && !timeout->task) {  | 
|---|
 | 1569 | +			ret = -ETIMEDOUT;  | 
|---|
 | 1570 | +			break;  | 
|---|
 | 1571 | +		}  | 
|---|
 | 1572 | +		if (signal_pending_state(state, current)) {  | 
|---|
 | 1573 | +			ret = -EINTR;  | 
|---|
 | 1574 | +			break;  | 
|---|
 | 1575 | +		}  | 
|---|
 | 1576 | +  | 
|---|
 | 1577 | +		if (ww_ctx && ww_ctx->acquired > 0) {  | 
|---|
 | 1578 | +			ret = __mutex_lock_check_stamp(lock, ww_ctx);  | 
|---|
| 1189 | 1579 |  			if (ret) | 
|---|
| 1190 | 1580 |  				break; | 
|---|
| 1191 | 1581 |  		} | 
|---|
| 1192 | 1582 |   | 
|---|
| 1193 | 1583 |  		raw_spin_unlock_irq(&lock->wait_lock); | 
|---|
| 1194 |  | -  | 
|---|
| 1195 |  | -		debug_rt_mutex_print_deadlock(waiter);  | 
|---|
| 1196 | 1584 |   | 
|---|
| 1197 | 1585 |  		schedule(); | 
|---|
| 1198 | 1586 |   | 
|---|
| .. | .. | 
|---|
| 1200 | 1588 |  		set_current_state(state); | 
|---|
| 1201 | 1589 |  	} | 
|---|
| 1202 | 1590 |   | 
|---|
 | 1591 | +	trace_android_vh_rtmutex_wait_finish(lock);  | 
|---|
| 1203 | 1592 |  	__set_current_state(TASK_RUNNING); | 
|---|
| 1204 | 1593 |  	return ret; | 
|---|
| 1205 | 1594 |  } | 
|---|
| .. | .. | 
|---|
| 1214 | 1603 |  	if (res != -EDEADLOCK || detect_deadlock) | 
|---|
| 1215 | 1604 |  		return; | 
|---|
| 1216 | 1605 |   | 
|---|
| 1217 |  | -	/*  | 
|---|
| 1218 |  | -	 * Yell lowdly and stop the task right here.  | 
|---|
| 1219 |  | -	 */  | 
|---|
| 1220 |  | -	rt_mutex_print_deadlock(w);  | 
|---|
| 1221 | 1606 |  	while (1) { | 
|---|
| 1222 | 1607 |  		set_current_state(TASK_INTERRUPTIBLE); | 
|---|
| 1223 | 1608 |  		schedule(); | 
|---|
| 1224 | 1609 |  	} | 
|---|
 | 1610 | +}  | 
|---|
 | 1611 | +  | 
|---|
 | 1612 | +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,  | 
|---|
 | 1613 | +						   struct ww_acquire_ctx *ww_ctx)  | 
|---|
 | 1614 | +{  | 
|---|
 | 1615 | +#ifdef CONFIG_DEBUG_MUTEXES  | 
|---|
 | 1616 | +	/*  | 
|---|
 | 1617 | +	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,  | 
|---|
 | 1618 | +	 * but released with a normal mutex_unlock in this call.  | 
|---|
 | 1619 | +	 *  | 
|---|
 | 1620 | +	 * This should never happen, always use ww_mutex_unlock.  | 
|---|
 | 1621 | +	 */  | 
|---|
 | 1622 | +	DEBUG_LOCKS_WARN_ON(ww->ctx);  | 
|---|
 | 1623 | +  | 
|---|
 | 1624 | +	/*  | 
|---|
 | 1625 | +	 * Not quite done after calling ww_acquire_done() ?  | 
|---|
 | 1626 | +	 */  | 
|---|
 | 1627 | +	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);  | 
|---|
 | 1628 | +  | 
|---|
 | 1629 | +	if (ww_ctx->contending_lock) {  | 
|---|
 | 1630 | +		/*  | 
|---|
 | 1631 | +		 * After -EDEADLK you tried to  | 
|---|
 | 1632 | +		 * acquire a different ww_mutex? Bad!  | 
|---|
 | 1633 | +		 */  | 
|---|
 | 1634 | +		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);  | 
|---|
 | 1635 | +  | 
|---|
 | 1636 | +		/*  | 
|---|
 | 1637 | +		 * You called ww_mutex_lock after receiving -EDEADLK,  | 
|---|
 | 1638 | +		 * but 'forgot' to unlock everything else first?  | 
|---|
 | 1639 | +		 */  | 
|---|
 | 1640 | +		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);  | 
|---|
 | 1641 | +		ww_ctx->contending_lock = NULL;  | 
|---|
 | 1642 | +	}  | 
|---|
 | 1643 | +  | 
|---|
 | 1644 | +	/*  | 
|---|
 | 1645 | +	 * Naughty, using a different class will lead to undefined behavior!  | 
|---|
 | 1646 | +	 */  | 
|---|
 | 1647 | +	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);  | 
|---|
 | 1648 | +#endif  | 
|---|
 | 1649 | +	ww_ctx->acquired++;  | 
|---|
 | 1650 | +}  | 
|---|
 | 1651 | +  | 
|---|
 | 1652 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 1653 | +static void ww_mutex_account_lock(struct rt_mutex *lock,  | 
|---|
 | 1654 | +				  struct ww_acquire_ctx *ww_ctx)  | 
|---|
 | 1655 | +{  | 
|---|
 | 1656 | +	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);  | 
|---|
 | 1657 | +	struct rt_mutex_waiter *waiter, *n;  | 
|---|
 | 1658 | +  | 
|---|
 | 1659 | +	/*  | 
|---|
 | 1660 | +	 * This branch gets optimized out for the common case,  | 
|---|
 | 1661 | +	 * and is only important for ww_mutex_lock.  | 
|---|
 | 1662 | +	 */  | 
|---|
 | 1663 | +	ww_mutex_lock_acquired(ww, ww_ctx);  | 
|---|
 | 1664 | +	ww->ctx = ww_ctx;  | 
|---|
 | 1665 | +  | 
|---|
 | 1666 | +	/*  | 
|---|
 | 1667 | +	 * Give any possible sleeping processes the chance to wake up,  | 
|---|
 | 1668 | +	 * so they can recheck if they have to back off.  | 
|---|
 | 1669 | +	 */  | 
|---|
 | 1670 | +	rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,  | 
|---|
 | 1671 | +					     tree_entry) {  | 
|---|
 | 1672 | +		/* XXX debug rt mutex waiter wakeup */  | 
|---|
 | 1673 | +  | 
|---|
 | 1674 | +		BUG_ON(waiter->lock != lock);  | 
|---|
 | 1675 | +		rt_mutex_wake_waiter(waiter);  | 
|---|
 | 1676 | +	}  | 
|---|
 | 1677 | +}  | 
|---|
 | 1678 | +  | 
|---|
 | 1679 | +#else  | 
|---|
 | 1680 | +  | 
|---|
 | 1681 | +static void ww_mutex_account_lock(struct rt_mutex *lock,  | 
|---|
 | 1682 | +				  struct ww_acquire_ctx *ww_ctx)  | 
|---|
 | 1683 | +{  | 
|---|
 | 1684 | +	BUG();  | 
|---|
 | 1685 | +}  | 
|---|
 | 1686 | +#endif  | 
|---|
 | 1687 | +  | 
|---|
 | 1688 | +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,  | 
|---|
 | 1689 | +				     struct hrtimer_sleeper *timeout,  | 
|---|
 | 1690 | +				     enum rtmutex_chainwalk chwalk,  | 
|---|
 | 1691 | +				     struct ww_acquire_ctx *ww_ctx,  | 
|---|
 | 1692 | +				     struct rt_mutex_waiter *waiter)  | 
|---|
 | 1693 | +{  | 
|---|
 | 1694 | +	int ret;  | 
|---|
 | 1695 | +  | 
|---|
 | 1696 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 1697 | +	if (ww_ctx) {  | 
|---|
 | 1698 | +		struct ww_mutex *ww;  | 
|---|
 | 1699 | +  | 
|---|
 | 1700 | +		ww = container_of(lock, struct ww_mutex, base.lock);  | 
|---|
 | 1701 | +		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))  | 
|---|
 | 1702 | +			return -EALREADY;  | 
|---|
 | 1703 | +	}  | 
|---|
 | 1704 | +#endif  | 
|---|
 | 1705 | +  | 
|---|
 | 1706 | +	/* Try to acquire the lock again: */  | 
|---|
 | 1707 | +	if (try_to_take_rt_mutex(lock, current, NULL)) {  | 
|---|
 | 1708 | +		if (ww_ctx)  | 
|---|
 | 1709 | +			ww_mutex_account_lock(lock, ww_ctx);  | 
|---|
 | 1710 | +		return 0;  | 
|---|
 | 1711 | +	}  | 
|---|
 | 1712 | +  | 
|---|
 | 1713 | +	set_current_state(state);  | 
|---|
 | 1714 | +  | 
|---|
 | 1715 | +	/* Setup the timer, when timeout != NULL */  | 
|---|
 | 1716 | +	if (unlikely(timeout))  | 
|---|
 | 1717 | +		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);  | 
|---|
 | 1718 | +  | 
|---|
 | 1719 | +	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);  | 
|---|
 | 1720 | +  | 
|---|
 | 1721 | +	if (likely(!ret)) {  | 
|---|
 | 1722 | +		/* sleep on the mutex */  | 
|---|
 | 1723 | +		ret = __rt_mutex_slowlock(lock, state, timeout, waiter,  | 
|---|
 | 1724 | +					  ww_ctx);  | 
|---|
 | 1725 | +	} else if (ww_ctx) {  | 
|---|
 | 1726 | +		/* ww_mutex received EDEADLK, let it become EALREADY */  | 
|---|
 | 1727 | +		ret = __mutex_lock_check_stamp(lock, ww_ctx);  | 
|---|
 | 1728 | +		BUG_ON(!ret);  | 
|---|
 | 1729 | +	}  | 
|---|
 | 1730 | +  | 
|---|
 | 1731 | +	if (unlikely(ret)) {  | 
|---|
 | 1732 | +		__set_current_state(TASK_RUNNING);  | 
|---|
 | 1733 | +		remove_waiter(lock, waiter);  | 
|---|
 | 1734 | +		/* ww_mutex wants to report EDEADLK/EALREADY, let it */  | 
|---|
 | 1735 | +		if (!ww_ctx)  | 
|---|
 | 1736 | +			rt_mutex_handle_deadlock(ret, chwalk, waiter);  | 
|---|
 | 1737 | +	} else if (ww_ctx) {  | 
|---|
 | 1738 | +		ww_mutex_account_lock(lock, ww_ctx);  | 
|---|
 | 1739 | +	}  | 
|---|
 | 1740 | +  | 
|---|
 | 1741 | +	/*  | 
|---|
 | 1742 | +	 * try_to_take_rt_mutex() sets the waiter bit  | 
|---|
 | 1743 | +	 * unconditionally. We might have to fix that up.  | 
|---|
 | 1744 | +	 */  | 
|---|
 | 1745 | +	fixup_rt_mutex_waiters(lock);  | 
|---|
 | 1746 | +	return ret;  | 
|---|
| 1225 | 1747 |  } | 
|---|
| 1226 | 1748 |   | 
|---|
| 1227 | 1749 |  /* | 
|---|
| .. | .. | 
|---|
| 1230 | 1752 |  static int __sched | 
|---|
| 1231 | 1753 |  rt_mutex_slowlock(struct rt_mutex *lock, int state, | 
|---|
| 1232 | 1754 |  		  struct hrtimer_sleeper *timeout, | 
|---|
| 1233 |  | -		  enum rtmutex_chainwalk chwalk)  | 
|---|
 | 1755 | +		  enum rtmutex_chainwalk chwalk,  | 
|---|
 | 1756 | +		  struct ww_acquire_ctx *ww_ctx)  | 
|---|
| 1234 | 1757 |  { | 
|---|
| 1235 | 1758 |  	struct rt_mutex_waiter waiter; | 
|---|
| 1236 | 1759 |  	unsigned long flags; | 
|---|
| 1237 | 1760 |  	int ret = 0; | 
|---|
| 1238 | 1761 |   | 
|---|
| 1239 |  | -	rt_mutex_init_waiter(&waiter);  | 
|---|
 | 1762 | +	rt_mutex_init_waiter(&waiter, false);  | 
|---|
| 1240 | 1763 |   | 
|---|
| 1241 | 1764 |  	/* | 
|---|
| 1242 | 1765 |  	 * Technically we could use raw_spin_[un]lock_irq() here, but this can | 
|---|
| .. | .. | 
|---|
| 1248 | 1771 |  	 */ | 
|---|
| 1249 | 1772 |  	raw_spin_lock_irqsave(&lock->wait_lock, flags); | 
|---|
| 1250 | 1773 |   | 
|---|
| 1251 |  | -	/* Try to acquire the lock again: */  | 
|---|
| 1252 |  | -	if (try_to_take_rt_mutex(lock, current, NULL)) {  | 
|---|
| 1253 |  | -		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);  | 
|---|
| 1254 |  | -		return 0;  | 
|---|
| 1255 |  | -	}  | 
|---|
| 1256 |  | -  | 
|---|
| 1257 |  | -	set_current_state(state);  | 
|---|
| 1258 |  | -  | 
|---|
| 1259 |  | -	/* Setup the timer, when timeout != NULL */  | 
|---|
| 1260 |  | -	if (unlikely(timeout))  | 
|---|
| 1261 |  | -		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);  | 
|---|
| 1262 |  | -  | 
|---|
| 1263 |  | -	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);  | 
|---|
| 1264 |  | -  | 
|---|
| 1265 |  | -	if (likely(!ret))  | 
|---|
| 1266 |  | -		/* sleep on the mutex */  | 
|---|
| 1267 |  | -		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);  | 
|---|
| 1268 |  | -  | 
|---|
| 1269 |  | -	if (unlikely(ret)) {  | 
|---|
| 1270 |  | -		__set_current_state(TASK_RUNNING);  | 
|---|
| 1271 |  | -		remove_waiter(lock, &waiter);  | 
|---|
| 1272 |  | -		rt_mutex_handle_deadlock(ret, chwalk, &waiter);  | 
|---|
| 1273 |  | -	}  | 
|---|
| 1274 |  | -  | 
|---|
| 1275 |  | -	/*  | 
|---|
| 1276 |  | -	 * try_to_take_rt_mutex() sets the waiter bit  | 
|---|
| 1277 |  | -	 * unconditionally. We might have to fix that up.  | 
|---|
| 1278 |  | -	 */  | 
|---|
| 1279 |  | -	fixup_rt_mutex_waiters(lock);  | 
|---|
 | 1774 | +	ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,  | 
|---|
 | 1775 | +				       &waiter);  | 
|---|
| 1280 | 1776 |   | 
|---|
| 1281 | 1777 |  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | 
|---|
| 1282 | 1778 |   | 
|---|
| .. | .. | 
|---|
| 1337 | 1833 |   * Return whether the current task needs to call rt_mutex_postunlock(). | 
|---|
| 1338 | 1834 |   */ | 
|---|
| 1339 | 1835 |  static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, | 
|---|
| 1340 |  | -					struct wake_q_head *wake_q)  | 
|---|
 | 1836 | +					struct wake_q_head *wake_q,  | 
|---|
 | 1837 | +					struct wake_q_head *wake_sleeper_q)  | 
|---|
| 1341 | 1838 |  { | 
|---|
| 1342 | 1839 |  	unsigned long flags; | 
|---|
| 1343 | 1840 |   | 
|---|
| .. | .. | 
|---|
| 1391 | 1888 |  	 * | 
|---|
| 1392 | 1889 |  	 * Queue the next waiter for wakeup once we release the wait_lock. | 
|---|
| 1393 | 1890 |  	 */ | 
|---|
| 1394 |  | -	mark_wakeup_next_waiter(wake_q, lock);  | 
|---|
 | 1891 | +	mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);  | 
|---|
| 1395 | 1892 |  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | 
|---|
| 1396 | 1893 |   | 
|---|
| 1397 | 1894 |  	return true; /* call rt_mutex_postunlock() */ | 
|---|
| .. | .. | 
|---|
| 1405 | 1902 |   */ | 
|---|
| 1406 | 1903 |  static inline int | 
|---|
| 1407 | 1904 |  rt_mutex_fastlock(struct rt_mutex *lock, int state, | 
|---|
 | 1905 | +		  struct ww_acquire_ctx *ww_ctx,  | 
|---|
| 1408 | 1906 |  		  int (*slowfn)(struct rt_mutex *lock, int state, | 
|---|
| 1409 | 1907 |  				struct hrtimer_sleeper *timeout, | 
|---|
| 1410 |  | -				enum rtmutex_chainwalk chwalk))  | 
|---|
 | 1908 | +				enum rtmutex_chainwalk chwalk,  | 
|---|
 | 1909 | +				struct ww_acquire_ctx *ww_ctx))  | 
|---|
| 1411 | 1910 |  { | 
|---|
| 1412 | 1911 |  	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) | 
|---|
| 1413 | 1912 |  		return 0; | 
|---|
| 1414 | 1913 |   | 
|---|
| 1415 |  | -	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);  | 
|---|
| 1416 |  | -}  | 
|---|
| 1417 |  | -  | 
|---|
| 1418 |  | -static inline int  | 
|---|
| 1419 |  | -rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,  | 
|---|
| 1420 |  | -			struct hrtimer_sleeper *timeout,  | 
|---|
| 1421 |  | -			enum rtmutex_chainwalk chwalk,  | 
|---|
| 1422 |  | -			int (*slowfn)(struct rt_mutex *lock, int state,  | 
|---|
| 1423 |  | -				      struct hrtimer_sleeper *timeout,  | 
|---|
| 1424 |  | -				      enum rtmutex_chainwalk chwalk))  | 
|---|
| 1425 |  | -{  | 
|---|
| 1426 |  | -	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&  | 
|---|
| 1427 |  | -	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))  | 
|---|
| 1428 |  | -		return 0;  | 
|---|
| 1429 |  | -  | 
|---|
| 1430 |  | -	return slowfn(lock, state, timeout, chwalk);  | 
|---|
 | 1914 | +	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);  | 
|---|
| 1431 | 1915 |  } | 
|---|
| 1432 | 1916 |   | 
|---|
| 1433 | 1917 |  static inline int | 
|---|
| .. | .. | 
|---|
| 1441 | 1925 |  } | 
|---|
| 1442 | 1926 |   | 
|---|
| 1443 | 1927 |  /* | 
|---|
| 1444 |  | - * Performs the wakeup of the the top-waiter and re-enables preemption.  | 
|---|
 | 1928 | + * Performs the wakeup of the top-waiter and re-enables preemption.  | 
|---|
| 1445 | 1929 |   */ | 
|---|
| 1446 |  | -void rt_mutex_postunlock(struct wake_q_head *wake_q)  | 
|---|
 | 1930 | +void rt_mutex_postunlock(struct wake_q_head *wake_q,  | 
|---|
 | 1931 | +			 struct wake_q_head *wake_sleeper_q)  | 
|---|
| 1447 | 1932 |  { | 
|---|
| 1448 | 1933 |  	wake_up_q(wake_q); | 
|---|
 | 1934 | +	wake_up_q_sleeper(wake_sleeper_q);  | 
|---|
| 1449 | 1935 |   | 
|---|
| 1450 | 1936 |  	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */ | 
|---|
| 1451 | 1937 |  	preempt_enable(); | 
|---|
| .. | .. | 
|---|
| 1454 | 1940 |  static inline void | 
|---|
| 1455 | 1941 |  rt_mutex_fastunlock(struct rt_mutex *lock, | 
|---|
| 1456 | 1942 |  		    bool (*slowfn)(struct rt_mutex *lock, | 
|---|
| 1457 |  | -				   struct wake_q_head *wqh))  | 
|---|
 | 1943 | +				   struct wake_q_head *wqh,  | 
|---|
 | 1944 | +				   struct wake_q_head *wq_sleeper))  | 
|---|
| 1458 | 1945 |  { | 
|---|
| 1459 | 1946 |  	DEFINE_WAKE_Q(wake_q); | 
|---|
 | 1947 | +	DEFINE_WAKE_Q(wake_sleeper_q);  | 
|---|
| 1460 | 1948 |   | 
|---|
| 1461 | 1949 |  	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) | 
|---|
| 1462 | 1950 |  		return; | 
|---|
| 1463 | 1951 |   | 
|---|
| 1464 |  | -	if (slowfn(lock, &wake_q))  | 
|---|
| 1465 |  | -		rt_mutex_postunlock(&wake_q);  | 
|---|
 | 1952 | +	if (slowfn(lock, &wake_q, &wake_sleeper_q))  | 
|---|
 | 1953 | +		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);  | 
|---|
 | 1954 | +}  | 
|---|
 | 1955 | +  | 
|---|
 | 1956 | +int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)  | 
|---|
 | 1957 | +{  | 
|---|
 | 1958 | +	might_sleep();  | 
|---|
 | 1959 | +	return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);  | 
|---|
 | 1960 | +}  | 
|---|
 | 1961 | +  | 
|---|
 | 1962 | +/**  | 
|---|
 | 1963 | + * rt_mutex_lock_state - lock a rt_mutex with a given state  | 
|---|
 | 1964 | + *  | 
|---|
 | 1965 | + * @lock:      The rt_mutex to be locked  | 
|---|
 | 1966 | + * @state:     The state to set when blocking on the rt_mutex  | 
|---|
 | 1967 | + */  | 
|---|
 | 1968 | +static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,  | 
|---|
 | 1969 | +					      unsigned int subclass, int state)  | 
|---|
 | 1970 | +{  | 
|---|
 | 1971 | +	int ret;  | 
|---|
 | 1972 | +  | 
|---|
 | 1973 | +	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);  | 
|---|
 | 1974 | +	ret = __rt_mutex_lock_state(lock, state);  | 
|---|
 | 1975 | +	if (ret)  | 
|---|
 | 1976 | +		mutex_release(&lock->dep_map, _RET_IP_);  | 
|---|
 | 1977 | +	trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);  | 
|---|
 | 1978 | +  | 
|---|
 | 1979 | +	return ret;  | 
|---|
| 1466 | 1980 |  } | 
|---|
| 1467 | 1981 |   | 
|---|
| 1468 | 1982 |  static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) | 
|---|
| 1469 | 1983 |  { | 
|---|
| 1470 |  | -	might_sleep();  | 
|---|
| 1471 |  | -  | 
|---|
| 1472 |  | -	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);  | 
|---|
| 1473 |  | -	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);  | 
|---|
 | 1984 | +	rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);  | 
|---|
| 1474 | 1985 |  } | 
|---|
| 1475 | 1986 |   | 
|---|
| 1476 | 1987 |  #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|---|
| .. | .. | 
|---|
| 1485 | 1996 |  	__rt_mutex_lock(lock, subclass); | 
|---|
| 1486 | 1997 |  } | 
|---|
| 1487 | 1998 |  EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); | 
|---|
| 1488 |  | -#endif  | 
|---|
| 1489 | 1999 |   | 
|---|
| 1490 |  | -#ifndef CONFIG_DEBUG_LOCK_ALLOC  | 
|---|
 | 2000 | +#else /* !CONFIG_DEBUG_LOCK_ALLOC */  | 
|---|
 | 2001 | +  | 
|---|
| 1491 | 2002 |  /** | 
|---|
| 1492 | 2003 |   * rt_mutex_lock - lock a rt_mutex | 
|---|
| 1493 | 2004 |   * | 
|---|
| .. | .. | 
|---|
| 1511 | 2022 |   */ | 
|---|
| 1512 | 2023 |  int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) | 
|---|
| 1513 | 2024 |  { | 
|---|
| 1514 |  | -	int ret;  | 
|---|
| 1515 |  | -  | 
|---|
| 1516 |  | -	might_sleep();  | 
|---|
| 1517 |  | -  | 
|---|
| 1518 |  | -	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);  | 
|---|
| 1519 |  | -	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);  | 
|---|
| 1520 |  | -	if (ret)  | 
|---|
| 1521 |  | -		mutex_release(&lock->dep_map, 1, _RET_IP_);  | 
|---|
| 1522 |  | -  | 
|---|
| 1523 |  | -	return ret;  | 
|---|
 | 2025 | +	return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);  | 
|---|
| 1524 | 2026 |  } | 
|---|
| 1525 | 2027 |  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | 
|---|
| 1526 | 2028 |   | 
|---|
| .. | .. | 
|---|
| 1537 | 2039 |  	return __rt_mutex_slowtrylock(lock); | 
|---|
| 1538 | 2040 |  } | 
|---|
| 1539 | 2041 |   | 
|---|
| 1540 |  | -/**  | 
|---|
| 1541 |  | - * rt_mutex_timed_lock - lock a rt_mutex interruptible  | 
|---|
| 1542 |  | - *			the timeout structure is provided  | 
|---|
| 1543 |  | - *			by the caller  | 
|---|
| 1544 |  | - *  | 
|---|
| 1545 |  | - * @lock:		the rt_mutex to be locked  | 
|---|
| 1546 |  | - * @timeout:		timeout structure or NULL (no timeout)  | 
|---|
| 1547 |  | - *  | 
|---|
| 1548 |  | - * Returns:  | 
|---|
| 1549 |  | - *  0		on success  | 
|---|
| 1550 |  | - * -EINTR	when interrupted by a signal  | 
|---|
| 1551 |  | - * -ETIMEDOUT	when the timeout expired  | 
|---|
| 1552 |  | - */  | 
|---|
| 1553 |  | -int  | 
|---|
| 1554 |  | -rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)  | 
|---|
 | 2042 | +int __sched __rt_mutex_trylock(struct rt_mutex *lock)  | 
|---|
| 1555 | 2043 |  { | 
|---|
| 1556 |  | -	int ret;  | 
|---|
 | 2044 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 2045 | +	if (WARN_ON_ONCE(in_irq() || in_nmi()))  | 
|---|
 | 2046 | +#else  | 
|---|
 | 2047 | +	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))  | 
|---|
 | 2048 | +#endif  | 
|---|
 | 2049 | +		return 0;  | 
|---|
| 1557 | 2050 |   | 
|---|
| 1558 |  | -	might_sleep();  | 
|---|
| 1559 |  | -  | 
|---|
| 1560 |  | -	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);  | 
|---|
| 1561 |  | -	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,  | 
|---|
| 1562 |  | -				       RT_MUTEX_MIN_CHAINWALK,  | 
|---|
| 1563 |  | -				       rt_mutex_slowlock);  | 
|---|
| 1564 |  | -	if (ret)  | 
|---|
| 1565 |  | -		mutex_release(&lock->dep_map, 1, _RET_IP_);  | 
|---|
| 1566 |  | -  | 
|---|
| 1567 |  | -	return ret;  | 
|---|
 | 2051 | +	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);  | 
|---|
| 1568 | 2052 |  } | 
|---|
| 1569 |  | -EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);  | 
|---|
| 1570 | 2053 |   | 
|---|
| 1571 | 2054 |  /** | 
|---|
| 1572 | 2055 |   * rt_mutex_trylock - try to lock a rt_mutex | 
|---|
| .. | .. | 
|---|
| 1583 | 2066 |  { | 
|---|
| 1584 | 2067 |  	int ret; | 
|---|
| 1585 | 2068 |   | 
|---|
| 1586 |  | -	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))  | 
|---|
| 1587 |  | -		return 0;  | 
|---|
| 1588 |  | -  | 
|---|
| 1589 |  | -	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);  | 
|---|
 | 2069 | +	ret = __rt_mutex_trylock(lock);  | 
|---|
| 1590 | 2070 |  	if (ret) | 
|---|
| 1591 | 2071 |  		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 
|---|
 | 2072 | +	else  | 
|---|
 | 2073 | +		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);  | 
|---|
| 1592 | 2074 |   | 
|---|
| 1593 | 2075 |  	return ret; | 
|---|
| 1594 | 2076 |  } | 
|---|
| 1595 |  | -EXPORT_SYMBOL_GPL(rt_mutex_trylock);  | 
|---|
 | 2077 | +  | 
|---|
 | 2078 | +void __sched __rt_mutex_unlock(struct rt_mutex *lock)  | 
|---|
 | 2079 | +{  | 
|---|
 | 2080 | +	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);  | 
|---|
 | 2081 | +}  | 
|---|
| 1596 | 2082 |   | 
|---|
| 1597 | 2083 |  /** | 
|---|
| 1598 | 2084 |   * rt_mutex_unlock - unlock a rt_mutex | 
|---|
| .. | .. | 
|---|
| 1601 | 2087 |   */ | 
|---|
| 1602 | 2088 |  void __sched rt_mutex_unlock(struct rt_mutex *lock) | 
|---|
| 1603 | 2089 |  { | 
|---|
| 1604 |  | -	mutex_release(&lock->dep_map, 1, _RET_IP_);  | 
|---|
| 1605 |  | -	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);  | 
|---|
 | 2090 | +	mutex_release(&lock->dep_map, _RET_IP_);  | 
|---|
 | 2091 | +	__rt_mutex_unlock(lock);  | 
|---|
 | 2092 | +	trace_android_vh_record_rtmutex_lock_starttime(current, 0);  | 
|---|
| 1606 | 2093 |  } | 
|---|
| 1607 | 2094 |  EXPORT_SYMBOL_GPL(rt_mutex_unlock); | 
|---|
| 1608 | 2095 |   | 
|---|
| 1609 |  | -/**  | 
|---|
| 1610 |  | - * Futex variant, that since futex variants do not use the fast-path, can be  | 
|---|
| 1611 |  | - * simple and will not need to retry.  | 
|---|
| 1612 |  | - */  | 
|---|
| 1613 |  | -bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,  | 
|---|
| 1614 |  | -				    struct wake_q_head *wake_q)  | 
|---|
 | 2096 | +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,  | 
|---|
 | 2097 | +					     struct wake_q_head *wake_q,  | 
|---|
 | 2098 | +					     struct wake_q_head *wq_sleeper)  | 
|---|
| 1615 | 2099 |  { | 
|---|
| 1616 | 2100 |  	lockdep_assert_held(&lock->wait_lock); | 
|---|
| 1617 | 2101 |   | 
|---|
| .. | .. | 
|---|
| 1628 | 2112 |  	 * avoid inversion prior to the wakeup.  preempt_disable() | 
|---|
| 1629 | 2113 |  	 * therein pairs with rt_mutex_postunlock(). | 
|---|
| 1630 | 2114 |  	 */ | 
|---|
| 1631 |  | -	mark_wakeup_next_waiter(wake_q, lock);  | 
|---|
 | 2115 | +	mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);  | 
|---|
| 1632 | 2116 |   | 
|---|
| 1633 | 2117 |  	return true; /* call postunlock() */ | 
|---|
 | 2118 | +}  | 
|---|
 | 2119 | +  | 
|---|
 | 2120 | +/**  | 
|---|
 | 2121 | + * Futex variant, that since futex variants do not use the fast-path, can be  | 
|---|
 | 2122 | + * simple and will not need to retry.  | 
|---|
 | 2123 | + */  | 
|---|
 | 2124 | +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,  | 
|---|
 | 2125 | +				     struct wake_q_head *wake_q,  | 
|---|
 | 2126 | +				     struct wake_q_head *wq_sleeper)  | 
|---|
 | 2127 | +{  | 
|---|
 | 2128 | +	return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);  | 
|---|
| 1634 | 2129 |  } | 
|---|
| 1635 | 2130 |   | 
|---|
| 1636 | 2131 |  void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) | 
|---|
| 1637 | 2132 |  { | 
|---|
| 1638 | 2133 |  	DEFINE_WAKE_Q(wake_q); | 
|---|
 | 2134 | +	DEFINE_WAKE_Q(wake_sleeper_q);  | 
|---|
| 1639 | 2135 |  	unsigned long flags; | 
|---|
| 1640 | 2136 |  	bool postunlock; | 
|---|
| 1641 | 2137 |   | 
|---|
| 1642 | 2138 |  	raw_spin_lock_irqsave(&lock->wait_lock, flags); | 
|---|
| 1643 |  | -	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);  | 
|---|
 | 2139 | +	postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);  | 
|---|
| 1644 | 2140 |  	raw_spin_unlock_irqrestore(&lock->wait_lock, flags); | 
|---|
| 1645 | 2141 |   | 
|---|
| 1646 | 2142 |  	if (postunlock) | 
|---|
| 1647 |  | -		rt_mutex_postunlock(&wake_q);  | 
|---|
 | 2143 | +		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);  | 
|---|
| 1648 | 2144 |  } | 
|---|
| 1649 | 2145 |   | 
|---|
| 1650 | 2146 |  /** | 
|---|
| .. | .. | 
|---|
| 1658 | 2154 |  void rt_mutex_destroy(struct rt_mutex *lock) | 
|---|
| 1659 | 2155 |  { | 
|---|
| 1660 | 2156 |  	WARN_ON(rt_mutex_is_locked(lock)); | 
|---|
| 1661 |  | -#ifdef CONFIG_DEBUG_RT_MUTEXES  | 
|---|
| 1662 |  | -	lock->magic = NULL;  | 
|---|
| 1663 |  | -#endif  | 
|---|
| 1664 | 2157 |  } | 
|---|
| 1665 | 2158 |  EXPORT_SYMBOL_GPL(rt_mutex_destroy); | 
|---|
| 1666 | 2159 |   | 
|---|
| .. | .. | 
|---|
| 1683 | 2176 |  	if (name && key) | 
|---|
| 1684 | 2177 |  		debug_rt_mutex_init(lock, name, key); | 
|---|
| 1685 | 2178 |  } | 
|---|
| 1686 |  | -EXPORT_SYMBOL_GPL(__rt_mutex_init);  | 
|---|
 | 2179 | +EXPORT_SYMBOL(__rt_mutex_init);  | 
|---|
| 1687 | 2180 |   | 
|---|
| 1688 | 2181 |  /** | 
|---|
| 1689 | 2182 |   * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | 
|---|
| .. | .. | 
|---|
| 1703 | 2196 |  				struct task_struct *proxy_owner) | 
|---|
| 1704 | 2197 |  { | 
|---|
| 1705 | 2198 |  	__rt_mutex_init(lock, NULL, NULL); | 
|---|
 | 2199 | +#ifdef CONFIG_DEBUG_SPINLOCK  | 
|---|
 | 2200 | +	/*  | 
|---|
 | 2201 | +	 * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is  | 
|---|
 | 2202 | +	 * holding the ->wait_lock of the proxy_lock while unlocking a sleeping  | 
|---|
 | 2203 | +	 * lock.  | 
|---|
 | 2204 | +	 */  | 
|---|
 | 2205 | +	raw_spin_lock_init(&lock->wait_lock);  | 
|---|
 | 2206 | +#endif  | 
|---|
| 1706 | 2207 |  	debug_rt_mutex_proxy_lock(lock, proxy_owner); | 
|---|
| 1707 | 2208 |  	rt_mutex_set_owner(lock, proxy_owner); | 
|---|
| 1708 | 2209 |  } | 
|---|
| .. | .. | 
|---|
| 1723 | 2224 |  { | 
|---|
| 1724 | 2225 |  	debug_rt_mutex_proxy_unlock(lock); | 
|---|
| 1725 | 2226 |  	rt_mutex_set_owner(lock, NULL); | 
|---|
 | 2227 | +}  | 
|---|
 | 2228 | +  | 
|---|
 | 2229 | +static void fixup_rt_mutex_blocked(struct rt_mutex *lock)  | 
|---|
 | 2230 | +{  | 
|---|
 | 2231 | +	struct task_struct *tsk = current;  | 
|---|
 | 2232 | +	/*  | 
|---|
 | 2233 | +	 * RT has a problem here when the wait got interrupted by a timeout  | 
|---|
 | 2234 | +	 * or a signal. task->pi_blocked_on is still set. The task must  | 
|---|
 | 2235 | +	 * acquire the hash bucket lock when returning from this function.  | 
|---|
 | 2236 | +	 *  | 
|---|
 | 2237 | +	 * If the hash bucket lock is contended then the  | 
|---|
 | 2238 | +	 * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in  | 
|---|
 | 2239 | +	 * task_blocks_on_rt_mutex() will trigger. This can be avoided by  | 
|---|
 | 2240 | +	 * clearing task->pi_blocked_on which removes the task from the  | 
|---|
 | 2241 | +	 * boosting chain of the rtmutex. That's correct because the task  | 
|---|
 | 2242 | +	 * is not longer blocked on it.  | 
|---|
 | 2243 | +	 */  | 
|---|
 | 2244 | +	raw_spin_lock(&tsk->pi_lock);  | 
|---|
 | 2245 | +	tsk->pi_blocked_on = NULL;  | 
|---|
 | 2246 | +	raw_spin_unlock(&tsk->pi_lock);  | 
|---|
| 1726 | 2247 |  } | 
|---|
| 1727 | 2248 |   | 
|---|
| 1728 | 2249 |  /** | 
|---|
| .. | .. | 
|---|
| 1755 | 2276 |  	if (try_to_take_rt_mutex(lock, task, NULL)) | 
|---|
| 1756 | 2277 |  		return 1; | 
|---|
| 1757 | 2278 |   | 
|---|
 | 2279 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 2280 | +	/*  | 
|---|
 | 2281 | +	 * In PREEMPT_RT there's an added race.  | 
|---|
 | 2282 | +	 * If the task, that we are about to requeue, times out,  | 
|---|
 | 2283 | +	 * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue  | 
|---|
 | 2284 | +	 * to skip this task. But right after the task sets  | 
|---|
 | 2285 | +	 * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then  | 
|---|
 | 2286 | +	 * block on the spin_lock(&hb->lock), which in RT is an rtmutex.  | 
|---|
 | 2287 | +	 * This will replace the PI_WAKEUP_INPROGRESS with the actual  | 
|---|
 | 2288 | +	 * lock that it blocks on. We *must not* place this task  | 
|---|
 | 2289 | +	 * on this proxy lock in that case.  | 
|---|
 | 2290 | +	 *  | 
|---|
 | 2291 | +	 * To prevent this race, we first take the task's pi_lock  | 
|---|
 | 2292 | +	 * and check if it has updated its pi_blocked_on. If it has,  | 
|---|
 | 2293 | +	 * we assume that it woke up and we return -EAGAIN.  | 
|---|
 | 2294 | +	 * Otherwise, we set the task's pi_blocked_on to  | 
|---|
 | 2295 | +	 * PI_REQUEUE_INPROGRESS, so that if the task is waking up  | 
|---|
 | 2296 | +	 * it will know that we are in the process of requeuing it.  | 
|---|
 | 2297 | +	 */  | 
|---|
 | 2298 | +	raw_spin_lock(&task->pi_lock);  | 
|---|
 | 2299 | +	if (task->pi_blocked_on) {  | 
|---|
 | 2300 | +		raw_spin_unlock(&task->pi_lock);  | 
|---|
 | 2301 | +		return -EAGAIN;  | 
|---|
 | 2302 | +	}  | 
|---|
 | 2303 | +	task->pi_blocked_on = PI_REQUEUE_INPROGRESS;  | 
|---|
 | 2304 | +	raw_spin_unlock(&task->pi_lock);  | 
|---|
 | 2305 | +#endif  | 
|---|
 | 2306 | +  | 
|---|
| 1758 | 2307 |  	/* We enforce deadlock detection for futexes */ | 
|---|
| 1759 | 2308 |  	ret = task_blocks_on_rt_mutex(lock, waiter, task, | 
|---|
| 1760 | 2309 |  				      RT_MUTEX_FULL_CHAINWALK); | 
|---|
| .. | .. | 
|---|
| 1769 | 2318 |  		ret = 0; | 
|---|
| 1770 | 2319 |  	} | 
|---|
| 1771 | 2320 |   | 
|---|
| 1772 |  | -	debug_rt_mutex_print_deadlock(waiter);  | 
|---|
 | 2321 | +	if (ret)  | 
|---|
 | 2322 | +		fixup_rt_mutex_blocked(lock);  | 
|---|
| 1773 | 2323 |   | 
|---|
| 1774 | 2324 |  	return ret; | 
|---|
| 1775 | 2325 |  } | 
|---|
| .. | .. | 
|---|
| 1835 | 2385 |   *			been started. | 
|---|
| 1836 | 2386 |   * @waiter:		the pre-initialized rt_mutex_waiter | 
|---|
| 1837 | 2387 |   * | 
|---|
| 1838 |  | - * Wait for the the lock acquisition started on our behalf by  | 
|---|
 | 2388 | + * Wait for the lock acquisition started on our behalf by  | 
|---|
| 1839 | 2389 |   * rt_mutex_start_proxy_lock(). Upon failure, the caller must call | 
|---|
| 1840 | 2390 |   * rt_mutex_cleanup_proxy_lock(). | 
|---|
| 1841 | 2391 |   * | 
|---|
| .. | .. | 
|---|
| 1854 | 2404 |  	raw_spin_lock_irq(&lock->wait_lock); | 
|---|
| 1855 | 2405 |  	/* sleep on the mutex */ | 
|---|
| 1856 | 2406 |  	set_current_state(TASK_INTERRUPTIBLE); | 
|---|
| 1857 |  | -	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);  | 
|---|
 | 2407 | +	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);  | 
|---|
| 1858 | 2408 |  	/* | 
|---|
| 1859 | 2409 |  	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | 
|---|
| 1860 | 2410 |  	 * have to fix that up. | 
|---|
| 1861 | 2411 |  	 */ | 
|---|
| 1862 | 2412 |  	fixup_rt_mutex_waiters(lock); | 
|---|
 | 2413 | +	if (ret)  | 
|---|
 | 2414 | +		fixup_rt_mutex_blocked(lock);  | 
|---|
 | 2415 | +  | 
|---|
| 1863 | 2416 |  	raw_spin_unlock_irq(&lock->wait_lock); | 
|---|
| 1864 | 2417 |   | 
|---|
| 1865 | 2418 |  	return ret; | 
|---|
| .. | .. | 
|---|
| 1921 | 2474 |   | 
|---|
| 1922 | 2475 |  	return cleanup; | 
|---|
| 1923 | 2476 |  } | 
|---|
 | 2477 | +  | 
|---|
 | 2478 | +static inline int  | 
|---|
 | 2479 | +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  | 
|---|
 | 2480 | +{  | 
|---|
 | 2481 | +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH  | 
|---|
 | 2482 | +	unsigned int tmp;  | 
|---|
 | 2483 | +  | 
|---|
 | 2484 | +	if (ctx->deadlock_inject_countdown-- == 0) {  | 
|---|
 | 2485 | +		tmp = ctx->deadlock_inject_interval;  | 
|---|
 | 2486 | +		if (tmp > UINT_MAX/4)  | 
|---|
 | 2487 | +			tmp = UINT_MAX;  | 
|---|
 | 2488 | +		else  | 
|---|
 | 2489 | +			tmp = tmp*2 + tmp + tmp/2;  | 
|---|
 | 2490 | +  | 
|---|
 | 2491 | +		ctx->deadlock_inject_interval = tmp;  | 
|---|
 | 2492 | +		ctx->deadlock_inject_countdown = tmp;  | 
|---|
 | 2493 | +		ctx->contending_lock = lock;  | 
|---|
 | 2494 | +  | 
|---|
 | 2495 | +		ww_mutex_unlock(lock);  | 
|---|
 | 2496 | +  | 
|---|
 | 2497 | +		return -EDEADLK;  | 
|---|
 | 2498 | +	}  | 
|---|
 | 2499 | +#endif  | 
|---|
 | 2500 | +  | 
|---|
 | 2501 | +	return 0;  | 
|---|
 | 2502 | +}  | 
|---|
 | 2503 | +  | 
|---|
 | 2504 | +#ifdef CONFIG_PREEMPT_RT  | 
|---|
 | 2505 | +int __sched  | 
|---|
 | 2506 | +ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  | 
|---|
 | 2507 | +{  | 
|---|
 | 2508 | +	int ret;  | 
|---|
 | 2509 | +  | 
|---|
 | 2510 | +	might_sleep();  | 
|---|
 | 2511 | +  | 
|---|
 | 2512 | +	mutex_acquire_nest(&lock->base.dep_map, 0, 0,  | 
|---|
 | 2513 | +			   ctx ? &ctx->dep_map : NULL, _RET_IP_);  | 
|---|
 | 2514 | +	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,  | 
|---|
 | 2515 | +				ctx);  | 
|---|
 | 2516 | +	if (ret)  | 
|---|
 | 2517 | +		mutex_release(&lock->base.dep_map, _RET_IP_);  | 
|---|
 | 2518 | +	else if (!ret && ctx && ctx->acquired > 1)  | 
|---|
 | 2519 | +		return ww_mutex_deadlock_injection(lock, ctx);  | 
|---|
 | 2520 | +  | 
|---|
 | 2521 | +	return ret;  | 
|---|
 | 2522 | +}  | 
|---|
 | 2523 | +EXPORT_SYMBOL(ww_mutex_lock_interruptible);  | 
|---|
 | 2524 | +  | 
|---|
 | 2525 | +int __sched  | 
|---|
 | 2526 | +ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)  | 
|---|
 | 2527 | +{  | 
|---|
 | 2528 | +	int ret;  | 
|---|
 | 2529 | +  | 
|---|
 | 2530 | +	might_sleep();  | 
|---|
 | 2531 | +  | 
|---|
 | 2532 | +	mutex_acquire_nest(&lock->base.dep_map, 0, 0,  | 
|---|
 | 2533 | +			   ctx ? &ctx->dep_map : NULL, _RET_IP_);  | 
|---|
 | 2534 | +	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,  | 
|---|
 | 2535 | +				ctx);  | 
|---|
 | 2536 | +	if (ret)  | 
|---|
 | 2537 | +		mutex_release(&lock->base.dep_map, _RET_IP_);  | 
|---|
 | 2538 | +	else if (!ret && ctx && ctx->acquired > 1)  | 
|---|
 | 2539 | +		return ww_mutex_deadlock_injection(lock, ctx);  | 
|---|
 | 2540 | +  | 
|---|
 | 2541 | +	return ret;  | 
|---|
 | 2542 | +}  | 
|---|
 | 2543 | +EXPORT_SYMBOL(ww_mutex_lock);  | 
|---|
 | 2544 | +  | 
|---|
 | 2545 | +void __sched ww_mutex_unlock(struct ww_mutex *lock)  | 
|---|
 | 2546 | +{  | 
|---|
 | 2547 | +	/*  | 
|---|
 | 2548 | +	 * The unlocking fastpath is the 0->1 transition from 'locked'  | 
|---|
 | 2549 | +	 * into 'unlocked' state:  | 
|---|
 | 2550 | +	 */  | 
|---|
 | 2551 | +	if (lock->ctx) {  | 
|---|
 | 2552 | +#ifdef CONFIG_DEBUG_MUTEXES  | 
|---|
 | 2553 | +		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);  | 
|---|
 | 2554 | +#endif  | 
|---|
 | 2555 | +		if (lock->ctx->acquired > 0)  | 
|---|
 | 2556 | +			lock->ctx->acquired--;  | 
|---|
 | 2557 | +		lock->ctx = NULL;  | 
|---|
 | 2558 | +	}  | 
|---|
 | 2559 | +  | 
|---|
 | 2560 | +	mutex_release(&lock->base.dep_map, _RET_IP_);  | 
|---|
 | 2561 | +	__rt_mutex_unlock(&lock->base.lock);  | 
|---|
 | 2562 | +}  | 
|---|
 | 2563 | +EXPORT_SYMBOL(ww_mutex_unlock);  | 
|---|
 | 2564 | +  | 
|---|
 | 2565 | +int __rt_mutex_owner_current(struct rt_mutex *lock)  | 
|---|
 | 2566 | +{  | 
|---|
 | 2567 | +	return rt_mutex_owner(lock) == current;  | 
|---|
 | 2568 | +}  | 
|---|
 | 2569 | +EXPORT_SYMBOL(__rt_mutex_owner_current);  | 
|---|
 | 2570 | +#endif  | 
|---|