From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/kernel/locking/rtmutex.c |  965 +++++++++-----------------------------------------------
 1 files changed, 163 insertions(+), 802 deletions(-)

diff --git a/kernel/kernel/locking/rtmutex.c b/kernel/kernel/locking/rtmutex.c
index 47d59f9..419cc66 100644
--- a/kernel/kernel/locking/rtmutex.c
+++ b/kernel/kernel/locking/rtmutex.c
@@ -8,11 +8,6 @@
  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
- * Adaptive Spinlocks:
- *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
- *				     and Peter Morreale,
- * Adaptive Spinlocks simplification:
- *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
  *
  *  See Documentation/locking/rt-mutex-design.rst for details.
  */
@@ -25,7 +20,6 @@
 #include <linux/sched/debug.h>
 #include <linux/timer.h>
 #include <trace/hooks/dtask.h>
-#include <linux/ww_mutex.h>
 
 #include "rtmutex_common.h"
 
@@ -143,12 +137,6 @@
 		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
 }
 
-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-{
-	return waiter && waiter != PI_WAKEUP_INPROGRESS &&
-		waiter != PI_REQUEUE_INPROGRESS;
-}
-
 /*
  * We can speed up the acquire/release, if there's no debugging state to be
  * set up.
@@ -240,7 +228,7 @@
  * Only use with rt_mutex_waiter_{less,equal}()
  */
 #define task_to_waiter(p)	\
-	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
+	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
 
 static inline int
 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
@@ -278,27 +266,6 @@
 		return left->deadline == right->deadline;
 
 	return 1;
-}
-
-#define STEAL_NORMAL  0
-#define STEAL_LATERAL 1
-
-static inline int
-rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
-{
-	struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
-
-	if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
-		return 1;
-
-	/*
-	 * Note that RT tasks are excluded from lateral-steals
-	 * to prevent the introduction of an unbounded latency.
-	 */
-	if (mode == STEAL_NORMAL || rt_task(waiter->task))
-		return 0;
-
-	return rt_mutex_waiter_equal(waiter, top_waiter);
 }
 
 static void
@@ -405,14 +372,6 @@
 	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
 }
 
-static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
-{
-	if (waiter->savestate)
-		wake_up_lock_sleeper(waiter->task);
-	else
-		wake_up_process(waiter->task);
-}
-
 /*
  * Max number of times we'll walk the boosting chain:
  */
@@ -420,8 +379,7 @@
 
 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
 {
-	return rt_mutex_real_waiter(p->pi_blocked_on) ?
-		p->pi_blocked_on->lock : NULL;
+	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
 }
 
 /*
@@ -557,7 +515,7 @@
 	 * reached or the state of the chain has changed while we
 	 * dropped the locks.
 	 */
-	if (!rt_mutex_real_waiter(waiter))
+	if (!waiter)
 		goto out_unlock_pi;
 
 	/*
@@ -640,6 +598,7 @@
 	 * walk, we detected a deadlock.
 	 */
 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
 		raw_spin_unlock(&lock->wait_lock);
 		ret = -EDEADLK;
 		goto out_unlock_pi;
@@ -736,16 +695,13 @@
 	 * follow here. This is the end of the chain we are walking.
 	 */
 	if (!rt_mutex_owner(lock)) {
-		struct rt_mutex_waiter *lock_top_waiter;
-
 		/*
 		 * If the requeue [7] above changed the top waiter,
 		 * then we need to wake the new top waiter up to try
 		 * to get the lock.
 		 */
-		lock_top_waiter = rt_mutex_top_waiter(lock);
-		if (prerequeue_top_waiter != lock_top_waiter)
-			rt_mutex_wake_waiter(lock_top_waiter);
+		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+			wake_up_process(rt_mutex_top_waiter(lock)->task);
 		raw_spin_unlock_irq(&lock->wait_lock);
 		return 0;
 	}
@@ -846,11 +802,9 @@
  * @task:   The task which wants to acquire the lock
  * @waiter: The waiter that is queued to the lock's wait tree if the
  *	    callsite called task_blocked_on_lock(), otherwise NULL
- * @mode:   Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
  */
-static int __try_to_take_rt_mutex(struct rt_mutex *lock,
-				  struct task_struct *task,
-				  struct rt_mutex_waiter *waiter, int mode)
+static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+				struct rt_mutex_waiter *waiter)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -886,11 +840,12 @@
 	 */
 	if (waiter) {
 		/*
-		 * If waiter is not the highest priority waiter of @lock,
-		 * or its peer when lateral steal is allowed, give up.
+		 * If waiter is not the highest priority waiter of
+		 * @lock, give up.
 		 */
-		if (!rt_mutex_steal(lock, waiter, mode))
+		if (waiter != rt_mutex_top_waiter(lock))
 			return 0;
+
 		/*
 		 * We can acquire the lock. Remove the waiter from the
 		 * lock waiters tree.
@@ -908,12 +863,14 @@
 		 */
 		if (rt_mutex_has_waiters(lock)) {
 			/*
-			 * If @task->prio is greater than the top waiter
-			 * priority (kernel view), or equal to it when a
-			 * lateral steal is forbidden, @task lost.
+			 * If @task->prio is greater than or equal to
+			 * the top waiter priority (kernel view),
+			 * @task lost.
 			 */
-			if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
+			if (!rt_mutex_waiter_less(task_to_waiter(task),
+						  rt_mutex_top_waiter(lock)))
 				return 0;
+
 			/*
 			 * The current top waiter stays enqueued. We
 			 * don't have to change anything in the lock
@@ -960,329 +917,6 @@
 	return 1;
 }
 
-#ifdef CONFIG_PREEMPT_RT
-/*
- * preemptible spin_lock functions:
- */
-static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
-					 void  (*slowfn)(struct rt_mutex *lock))
-{
-	might_sleep_no_state_check();
-
-	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return;
-	else
-		slowfn(lock);
-}
-
-static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-					   void  (*slowfn)(struct rt_mutex *lock))
-{
-	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
-		return;
-	else
-		slowfn(lock);
-}
-#ifdef CONFIG_SMP
-/*
- * Note that owner is a speculative pointer and dereferencing relies
- * on rcu_read_lock() and the check against the lock owner.
- */
-static int adaptive_wait(struct rt_mutex *lock,
-			 struct task_struct *owner)
-{
-	int res = 0;
-
-	rcu_read_lock();
-	for (;;) {
-		if (owner != rt_mutex_owner(lock))
-			break;
-		/*
-		 * Ensure that owner->on_cpu is dereferenced _after_
-		 * checking the above to be valid.
-		 */
-		barrier();
-		if (!owner->on_cpu) {
-			res = 1;
-			break;
-		}
-		cpu_relax();
-	}
-	rcu_read_unlock();
-	return res;
-}
-#else
-static int adaptive_wait(struct rt_mutex *lock,
-			 struct task_struct *orig_owner)
-{
-	return 1;
-}
-#endif
-
-static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
-				   struct rt_mutex_waiter *waiter,
-				   struct task_struct *task,
-				   enum rtmutex_chainwalk chwalk);
-/*
- * Slow path lock function spin_lock style: this variant is very
- * careful not to miss any non-lock wakeups.
- *
- * We store the current state under p->pi_lock in p->saved_state and
- * the try_to_wake_up() code handles this accordingly.
- */
-void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
-					  struct rt_mutex_waiter *waiter,
-					  unsigned long flags)
-{
-	struct task_struct *lock_owner, *self = current;
-	struct rt_mutex_waiter *top_waiter;
-	int ret;
-
-	if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
-		return;
-
-	BUG_ON(rt_mutex_owner(lock) == self);
-
-	/*
-	 * We save whatever state the task is in and we'll restore it
-	 * after acquiring the lock taking real wakeups into account
-	 * as well. We are serialized via pi_lock against wakeups. See
-	 * try_to_wake_up().
-	 */
-	raw_spin_lock(&self->pi_lock);
-	self->saved_state = self->state;
-	__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-	raw_spin_unlock(&self->pi_lock);
-
-	ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
-	BUG_ON(ret);
-
-	for (;;) {
-		/* Try to acquire the lock again. */
-		if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
-			break;
-
-		top_waiter = rt_mutex_top_waiter(lock);
-		lock_owner = rt_mutex_owner(lock);
-
-		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-		if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
-			preempt_schedule_lock();
-
-		raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-		raw_spin_lock(&self->pi_lock);
-		__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-		raw_spin_unlock(&self->pi_lock);
-	}
-
-	/*
-	 * Restore the task state to current->saved_state. We set it
-	 * to the original state above and the try_to_wake_up() code
-	 * has possibly updated it when a real (non-rtmutex) wakeup
-	 * happened while we were blocked. Clear saved_state so
-	 * try_to_wakeup() does not get confused.
-	 */
-	raw_spin_lock(&self->pi_lock);
-	__set_current_state_no_track(self->saved_state);
-	self->saved_state = TASK_RUNNING;
-	raw_spin_unlock(&self->pi_lock);
-
-	/*
-	 * try_to_take_rt_mutex() sets the waiter bit
-	 * unconditionally. We might have to fix that up:
-	 */
-	fixup_rt_mutex_waiters(lock);
-
-	BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
-	BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
-}
-
-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
-{
-	struct rt_mutex_waiter waiter;
-	unsigned long flags;
-
-	rt_mutex_init_waiter(&waiter, true);
-
-	raw_spin_lock_irqsave(&lock->wait_lock, flags);
-	rt_spin_lock_slowlock_locked(lock, &waiter, flags);
-	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-	debug_rt_mutex_free_waiter(&waiter);
-}
-
-static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
-					     struct wake_q_head *wake_q,
-					     struct wake_q_head *wq_sleeper);
-/*
- * Slow path to release a rt_mutex spin_lock style
- */
-void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
-{
-	unsigned long flags;
-	DEFINE_WAKE_Q(wake_q);
-	DEFINE_WAKE_Q(wake_sleeper_q);
-	bool postunlock;
-
-	raw_spin_lock_irqsave(&lock->wait_lock, flags);
-	postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
-	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-	if (postunlock)
-		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
-}
-
-void __lockfunc rt_spin_lock(spinlock_t *lock)
-{
-	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-	rcu_read_lock();
-	migrate_disable();
-}
-EXPORT_SYMBOL(rt_spin_lock);
-
-void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
-{
-	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
-{
-	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-	rcu_read_lock();
-	migrate_disable();
-}
-EXPORT_SYMBOL(rt_spin_lock_nested);
-
-void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
-				       struct lockdep_map *nest_lock)
-{
-	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
-	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-	rcu_read_lock();
-	migrate_disable();
-}
-EXPORT_SYMBOL(rt_spin_lock_nest_lock);
-#endif
-
-void __lockfunc rt_spin_unlock(spinlock_t *lock)
-{
-	/* NOTE: we always pass in '1' for nested, for simplicity */
-	spin_release(&lock->dep_map, _RET_IP_);
-	migrate_enable();
-	rcu_read_unlock();
-	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
-}
-EXPORT_SYMBOL(rt_spin_unlock);
-
-void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
-{
-	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
-}
-EXPORT_SYMBOL(__rt_spin_unlock);
-
-/*
- * Wait for the lock to get unlocked: instead of polling for an unlock
- * (like raw spinlocks do), we lock and unlock, to force the kernel to
- * schedule if there's contention:
- */
-void __lockfunc rt_spin_lock_unlock(spinlock_t *lock)
-{
-	spin_lock(lock);
-	spin_unlock(lock);
-}
-EXPORT_SYMBOL(rt_spin_lock_unlock);
-
-int __lockfunc rt_spin_trylock(spinlock_t *lock)
-{
-	int ret;
-
-	ret = __rt_mutex_trylock(&lock->lock);
-	if (ret) {
-		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-		rcu_read_lock();
-		migrate_disable();
-	}
-	return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock);
-
-int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
-{
-	int ret;
-
-	local_bh_disable();
-	ret = __rt_mutex_trylock(&lock->lock);
-	if (ret) {
-		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-		rcu_read_lock();
-		migrate_disable();
-	} else {
-		local_bh_enable();
-	}
-	return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock_bh);
-
-void
-__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	/*
-	 * Make sure we are not reinitializing a held lock:
-	 */
-	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-	lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
-}
-EXPORT_SYMBOL(__rt_spin_lock_init);
-
-#endif /* PREEMPT_RT */
-
-#ifdef CONFIG_PREEMPT_RT
-	static inline int __sched
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
-	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
-
-	if (!hold_ctx)
-		return 0;
-
-	if (unlikely(ctx == hold_ctx))
-		return -EALREADY;
-
-	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
-	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
-		ctx->contending_lock = ww;
-#endif
-		return -EDEADLK;
-	}
-
-	return 0;
-}
-#else
-	static inline int __sched
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-	BUG();
-	return 0;
-}
-
-#endif
-
-static inline int
-try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-		     struct rt_mutex_waiter *waiter)
-{
-	return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
-}
-
 /*
  * Task blocks on lock.
  *
@@ -1315,22 +949,6 @@
 		return -EDEADLK;
 
 	raw_spin_lock(&task->pi_lock);
-	/*
-	 * In the case of futex requeue PI, this will be a proxy
-	 * lock. The task will wake unaware that it is enqueueed on
-	 * this lock. Avoid blocking on two locks and corrupting
-	 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
-	 * flag. futex_wait_requeue_pi() sets this when it wakes up
-	 * before requeue (due to a signal or timeout). Do not enqueue
-	 * the task if PI_WAKEUP_INPROGRESS is set.
-	 */
-	if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
-		raw_spin_unlock(&task->pi_lock);
-		return -EAGAIN;
-	}
-
-       BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-
 	waiter->task = task;
 	waiter->lock = lock;
 	waiter->prio = task->prio;
@@ -1354,7 +972,7 @@
 		rt_mutex_enqueue_pi(owner, waiter);
 
 		rt_mutex_adjust_prio(owner);
-		if (rt_mutex_real_waiter(owner->pi_blocked_on))
+		if (owner->pi_blocked_on)
 			chain_walk = 1;
 	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
 		chain_walk = 1;
@@ -1396,7 +1014,6 @@
  * Called with lock->wait_lock held and interrupts disabled.
  */
 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
-				    struct wake_q_head *wake_sleeper_q,
 				    struct rt_mutex *lock)
 {
 	struct rt_mutex_waiter *waiter;
@@ -1436,10 +1053,7 @@
 	 * Pairs with preempt_enable() in rt_mutex_postunlock();
 	 */
 	preempt_disable();
-	if (waiter->savestate)
-		wake_q_add_sleeper(wake_sleeper_q, waiter->task);
-	else
-		wake_q_add(wake_q, waiter->task);
+	wake_q_add(wake_q, waiter->task);
 	raw_spin_unlock(&current->pi_lock);
 }
 
@@ -1454,7 +1068,7 @@
 {
 	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
 	struct task_struct *owner = rt_mutex_owner(lock);
-	struct rt_mutex *next_lock = NULL;
+	struct rt_mutex *next_lock;
 
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -1480,8 +1094,7 @@
 	rt_mutex_adjust_prio(owner);
 
 	/* Store the lock on which owner is blocked or NULL */
-	if (rt_mutex_real_waiter(owner->pi_blocked_on))
-		next_lock = task_blocked_on_lock(owner);
+	next_lock = task_blocked_on_lock(owner);
 
 	raw_spin_unlock(&owner->pi_lock);
 
@@ -1517,28 +1130,26 @@
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
 
 	waiter = task->pi_blocked_on;
-	if (!rt_mutex_real_waiter(waiter) ||
-	    rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 		return;
 	}
 	next_lock = waiter->lock;
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
 	get_task_struct(task);
 
-	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
 				   next_lock, NULL, task);
 }
 
-void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
 {
 	debug_rt_mutex_init_waiter(waiter);
 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
 	RB_CLEAR_NODE(&waiter->tree_entry);
 	waiter->task = NULL;
-	waiter->savestate = savestate;
 }
 
 /**
@@ -1554,8 +1165,7 @@
 static int __sched
 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		    struct hrtimer_sleeper *timeout,
-		    struct rt_mutex_waiter *waiter,
-		    struct ww_acquire_ctx *ww_ctx)
+		    struct rt_mutex_waiter *waiter)
 {
 	int ret = 0;
 
@@ -1565,22 +1175,23 @@
 		if (try_to_take_rt_mutex(lock, current, waiter))
 			break;
 
-		if (timeout && !timeout->task) {
-			ret = -ETIMEDOUT;
-			break;
-		}
-		if (signal_pending_state(state, current)) {
-			ret = -EINTR;
-			break;
-		}
-
-		if (ww_ctx && ww_ctx->acquired > 0) {
-			ret = __mutex_lock_check_stamp(lock, ww_ctx);
+		/*
+		 * TASK_INTERRUPTIBLE checks for signals and
+		 * timeout. Ignored otherwise.
+		 */
+		if (likely(state == TASK_INTERRUPTIBLE)) {
+			/* Signal pending? */
+			if (signal_pending(current))
+				ret = -EINTR;
+			if (timeout && !timeout->task)
+				ret = -ETIMEDOUT;
 			if (ret)
 				break;
 		}
 
 		raw_spin_unlock_irq(&lock->wait_lock);
+
+		debug_rt_mutex_print_deadlock(waiter);
 
 		schedule();
 
@@ -1603,147 +1214,14 @@
 	if (res != -EDEADLOCK || detect_deadlock)
 		return;
 
+	/*
+	 * Yell lowdly and stop the task right here.
+	 */
+	rt_mutex_print_deadlock(w);
 	while (1) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
 	}
-}
-
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
-						   struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-	/*
-	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
-	 * but released with a normal mutex_unlock in this call.
-	 *
-	 * This should never happen, always use ww_mutex_unlock.
-	 */
-	DEBUG_LOCKS_WARN_ON(ww->ctx);
-
-	/*
-	 * Not quite done after calling ww_acquire_done() ?
-	 */
-	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
-
-	if (ww_ctx->contending_lock) {
-		/*
-		 * After -EDEADLK you tried to
-		 * acquire a different ww_mutex? Bad!
-		 */
-		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
-		/*
-		 * You called ww_mutex_lock after receiving -EDEADLK,
-		 * but 'forgot' to unlock everything else first?
-		 */
-		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
-		ww_ctx->contending_lock = NULL;
-	}
-
-	/*
-	 * Naughty, using a different class will lead to undefined behavior!
-	 */
-	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
-	ww_ctx->acquired++;
-}
-
-#ifdef CONFIG_PREEMPT_RT
-static void ww_mutex_account_lock(struct rt_mutex *lock,
-				  struct ww_acquire_ctx *ww_ctx)
-{
-	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
-	struct rt_mutex_waiter *waiter, *n;
-
-	/*
-	 * This branch gets optimized out for the common case,
-	 * and is only important for ww_mutex_lock.
-	 */
-	ww_mutex_lock_acquired(ww, ww_ctx);
-	ww->ctx = ww_ctx;
-
-	/*
-	 * Give any possible sleeping processes the chance to wake up,
-	 * so they can recheck if they have to back off.
-	 */
-	rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
-					     tree_entry) {
-		/* XXX debug rt mutex waiter wakeup */
-
-		BUG_ON(waiter->lock != lock);
-		rt_mutex_wake_waiter(waiter);
-	}
-}
-
-#else
-
-static void ww_mutex_account_lock(struct rt_mutex *lock,
-				  struct ww_acquire_ctx *ww_ctx)
-{
-	BUG();
-}
-#endif
-
-int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
-				     struct hrtimer_sleeper *timeout,
-				     enum rtmutex_chainwalk chwalk,
-				     struct ww_acquire_ctx *ww_ctx,
-				     struct rt_mutex_waiter *waiter)
-{
-	int ret;
-
-#ifdef CONFIG_PREEMPT_RT
-	if (ww_ctx) {
-		struct ww_mutex *ww;
-
-		ww = container_of(lock, struct ww_mutex, base.lock);
-		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
-			return -EALREADY;
-	}
-#endif
-
-	/* Try to acquire the lock again: */
-	if (try_to_take_rt_mutex(lock, current, NULL)) {
-		if (ww_ctx)
-			ww_mutex_account_lock(lock, ww_ctx);
-		return 0;
-	}
-
-	set_current_state(state);
-
-	/* Setup the timer, when timeout != NULL */
-	if (unlikely(timeout))
-		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
-	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
-	if (likely(!ret)) {
-		/* sleep on the mutex */
-		ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
-					  ww_ctx);
-	} else if (ww_ctx) {
-		/* ww_mutex received EDEADLK, let it become EALREADY */
-		ret = __mutex_lock_check_stamp(lock, ww_ctx);
-		BUG_ON(!ret);
-	}
-
-	if (unlikely(ret)) {
-		__set_current_state(TASK_RUNNING);
-		remove_waiter(lock, waiter);
-		/* ww_mutex wants to report EDEADLK/EALREADY, let it */
-		if (!ww_ctx)
-			rt_mutex_handle_deadlock(ret, chwalk, waiter);
-	} else if (ww_ctx) {
-		ww_mutex_account_lock(lock, ww_ctx);
-	}
-
-	/*
-	 * try_to_take_rt_mutex() sets the waiter bit
-	 * unconditionally. We might have to fix that up.
-	 */
-	fixup_rt_mutex_waiters(lock);
-	return ret;
 }
 
 /*
@@ -1752,14 +1230,13 @@
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		  struct hrtimer_sleeper *timeout,
-		  enum rtmutex_chainwalk chwalk,
-		  struct ww_acquire_ctx *ww_ctx)
+		  enum rtmutex_chainwalk chwalk)
 {
 	struct rt_mutex_waiter waiter;
 	unsigned long flags;
 	int ret = 0;
 
-	rt_mutex_init_waiter(&waiter, false);
+	rt_mutex_init_waiter(&waiter);
 
 	/*
 	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
@@ -1771,8 +1248,35 @@
 	 */
 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
-	ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
-				       &waiter);
+	/* Try to acquire the lock again: */
+	if (try_to_take_rt_mutex(lock, current, NULL)) {
+		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+		return 0;
+	}
+
+	set_current_state(state);
+
+	/* Setup the timer, when timeout != NULL */
+	if (unlikely(timeout))
+		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+
+	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
+
+	if (likely(!ret))
+		/* sleep on the mutex */
+		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
+
+	if (unlikely(ret)) {
+		__set_current_state(TASK_RUNNING);
+		remove_waiter(lock, &waiter);
+		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+	}
+
+	/*
+	 * try_to_take_rt_mutex() sets the waiter bit
+	 * unconditionally. We might have to fix that up.
+	 */
+	fixup_rt_mutex_waiters(lock);
 
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
@@ -1833,8 +1337,7 @@
  * Return whether the current task needs to call rt_mutex_postunlock().
  */
 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
-					struct wake_q_head *wake_q,
-					struct wake_q_head *wake_sleeper_q)
+					struct wake_q_head *wake_q)
 {
 	unsigned long flags;
 
@@ -1888,7 +1391,7 @@
 	 *
 	 * Queue the next waiter for wakeup once we release the wait_lock.
 	 */
-	mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
+	mark_wakeup_next_waiter(wake_q, lock);
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
 	return true; /* call rt_mutex_postunlock() */
@@ -1902,16 +1405,29 @@
  */
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
-		  struct ww_acquire_ctx *ww_ctx,
 		  int (*slowfn)(struct rt_mutex *lock, int state,
 				struct hrtimer_sleeper *timeout,
-				enum rtmutex_chainwalk chwalk,
-				struct ww_acquire_ctx *ww_ctx))
+				enum rtmutex_chainwalk chwalk))
 {
 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
 		return 0;
 
-	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
+	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+}
+
+static inline int
+rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+			struct hrtimer_sleeper *timeout,
+			enum rtmutex_chainwalk chwalk,
+			int (*slowfn)(struct rt_mutex *lock, int state,
+				      struct hrtimer_sleeper *timeout,
+				      enum rtmutex_chainwalk chwalk))
+{
+	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+		return 0;
+
+	return slowfn(lock, state, timeout, chwalk);
 }
 
 static inline int
@@ -1927,11 +1443,9 @@
 /*
  * Performs the wakeup of the top-waiter and re-enables preemption.
  */
-void rt_mutex_postunlock(struct wake_q_head *wake_q,
-			 struct wake_q_head *wake_sleeper_q)
+void rt_mutex_postunlock(struct wake_q_head *wake_q)
 {
 	wake_up_q(wake_q);
-	wake_up_q_sleeper(wake_sleeper_q);
 
 	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
 	preempt_enable();
@@ -1940,48 +1454,24 @@
 static inline void
 rt_mutex_fastunlock(struct rt_mutex *lock,
 		    bool (*slowfn)(struct rt_mutex *lock,
-				   struct wake_q_head *wqh,
-				   struct wake_q_head *wq_sleeper))
+				   struct wake_q_head *wqh))
 {
 	DEFINE_WAKE_Q(wake_q);
-	DEFINE_WAKE_Q(wake_sleeper_q);
 
 	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
 		return;
 
-	if (slowfn(lock, &wake_q, &wake_sleeper_q))
-		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
-}
-
-int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-{
-	might_sleep();
-	return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
-}
-
-/**
- * rt_mutex_lock_state - lock a rt_mutex with a given state
- *
- * @lock:      The rt_mutex to be locked
- * @state:     The state to set when blocking on the rt_mutex
- */
-static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
-					      unsigned int subclass, int state)
-{
-	int ret;
-
-	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	ret = __rt_mutex_lock_state(lock, state);
-	if (ret)
-		mutex_release(&lock->dep_map, _RET_IP_);
-	trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
-
-	return ret;
+	if (slowfn(lock, &wake_q))
+		rt_mutex_postunlock(&wake_q);
 }
 
 static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
 {
-	rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
+	might_sleep();
+
+	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -2022,7 +1512,18 @@
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
-	return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
+	int ret;
+
+	might_sleep();
+
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
+	if (ret)
+		mutex_release(&lock->dep_map, _RET_IP_);
+	else
+		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
@@ -2039,17 +1540,38 @@
 	return __rt_mutex_slowtrylock(lock);
 }
 
-int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+/**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ *			the timeout structure is provided
+ *			by the caller
+ *
+ * @lock:		the rt_mutex to be locked
+ * @timeout:		timeout structure or NULL (no timeout)
+ *
+ * Returns:
+ *  0		on success
+ * -EINTR	when interrupted by a signal
+ * -ETIMEDOUT	when the timeout expired
+ */
+int
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 {
-#ifdef CONFIG_PREEMPT_RT
-	if (WARN_ON_ONCE(in_irq() || in_nmi()))
-#else
-	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
-#endif
-		return 0;
+	int ret;
 
-	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	might_sleep();
+
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+				       RT_MUTEX_MIN_CHAINWALK,
+				       rt_mutex_slowlock);
+	if (ret)
+		mutex_release(&lock->dep_map, _RET_IP_);
+	else
+		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
+
+	return ret;
 }
+EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
 /**
  * rt_mutex_trylock - try to lock a rt_mutex
@@ -2066,7 +1588,10 @@
 {
 	int ret;
 
-	ret = __rt_mutex_trylock(lock);
+	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
+		return 0;
+
+	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 	else
@@ -2074,11 +1599,7 @@
 
 	return ret;
 }
-
-void __sched __rt_mutex_unlock(struct rt_mutex *lock)
-{
-	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
-}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
 /**
  * rt_mutex_unlock - unlock a rt_mutex
@@ -2088,14 +1609,17 @@
 void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
 	mutex_release(&lock->dep_map, _RET_IP_);
-	__rt_mutex_unlock(lock);
+	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
 	trace_android_vh_record_rtmutex_lock_starttime(current, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
-static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
-					     struct wake_q_head *wake_q,
-					     struct wake_q_head *wq_sleeper)
+/**
+ * Futex variant, that since futex variants do not use the fast-path, can be
+ * simple and will not need to retry.
+ */
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+				    struct wake_q_head *wake_q)
 {
 	lockdep_assert_held(&lock->wait_lock);
 
@@ -2112,35 +1636,23 @@
 	 * avoid inversion prior to the wakeup.  preempt_disable()
 	 * therein pairs with rt_mutex_postunlock().
 	 */
-	mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
+	mark_wakeup_next_waiter(wake_q, lock);
 
 	return true; /* call postunlock() */
-}
-
-/**
- * Futex variant, that since futex variants do not use the fast-path, can be
- * simple and will not need to retry.
- */
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
-				     struct wake_q_head *wake_q,
-				     struct wake_q_head *wq_sleeper)
-{
-	return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
 }
 
 void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
 {
 	DEFINE_WAKE_Q(wake_q);
-	DEFINE_WAKE_Q(wake_sleeper_q);
 	unsigned long flags;
 	bool postunlock;
 
 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
-	postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
+	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
 	if (postunlock)
-		rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+		rt_mutex_postunlock(&wake_q);
 }
 
 /**
@@ -2154,6 +1666,9 @@
 void rt_mutex_destroy(struct rt_mutex *lock)
 {
 	WARN_ON(rt_mutex_is_locked(lock));
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+	lock->magic = NULL;
+#endif
 }
 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
 
@@ -2176,7 +1691,7 @@
 	if (name && key)
 		debug_rt_mutex_init(lock, name, key);
 }
-EXPORT_SYMBOL(__rt_mutex_init);
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
 
 /**
  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
@@ -2196,14 +1711,6 @@
 				struct task_struct *proxy_owner)
 {
 	__rt_mutex_init(lock, NULL, NULL);
-#ifdef CONFIG_DEBUG_SPINLOCK
-	/*
-	 * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is
-	 * holding the ->wait_lock of the proxy_lock while unlocking a sleeping
-	 * lock.
-	 */
-	raw_spin_lock_init(&lock->wait_lock);
-#endif
 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
 	rt_mutex_set_owner(lock, proxy_owner);
 }
@@ -2224,26 +1731,6 @@
 {
 	debug_rt_mutex_proxy_unlock(lock);
 	rt_mutex_set_owner(lock, NULL);
-}
-
-static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
-{
-	struct task_struct *tsk = current;
-	/*
-	 * RT has a problem here when the wait got interrupted by a timeout
-	 * or a signal. task->pi_blocked_on is still set. The task must
-	 * acquire the hash bucket lock when returning from this function.
-	 *
-	 * If the hash bucket lock is contended then the
-	 * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
-	 * task_blocks_on_rt_mutex() will trigger. This can be avoided by
-	 * clearing task->pi_blocked_on which removes the task from the
-	 * boosting chain of the rtmutex. That's correct because the task
-	 * is not longer blocked on it.
-	 */
-	raw_spin_lock(&tsk->pi_lock);
-	tsk->pi_blocked_on = NULL;
-	raw_spin_unlock(&tsk->pi_lock);
 }
 
 /**
@@ -2276,34 +1763,6 @@
 	if (try_to_take_rt_mutex(lock, task, NULL))
 		return 1;
 
-#ifdef CONFIG_PREEMPT_RT
-	/*
-	 * In PREEMPT_RT there's an added race.
-	 * If the task, that we are about to requeue, times out,
-	 * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
-	 * to skip this task. But right after the task sets
-	 * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
-	 * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
-	 * This will replace the PI_WAKEUP_INPROGRESS with the actual
-	 * lock that it blocks on. We *must not* place this task
-	 * on this proxy lock in that case.
-	 *
-	 * To prevent this race, we first take the task's pi_lock
-	 * and check if it has updated its pi_blocked_on. If it has,
-	 * we assume that it woke up and we return -EAGAIN.
-	 * Otherwise, we set the task's pi_blocked_on to
-	 * PI_REQUEUE_INPROGRESS, so that if the task is waking up
-	 * it will know that we are in the process of requeuing it.
-	 */
-	raw_spin_lock(&task->pi_lock);
-	if (task->pi_blocked_on) {
-		raw_spin_unlock(&task->pi_lock);
-		return -EAGAIN;
-	}
-	task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
-	raw_spin_unlock(&task->pi_lock);
-#endif
-
 	/* We enforce deadlock detection for futexes */
 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
 				      RT_MUTEX_FULL_CHAINWALK);
@@ -2318,8 +1777,7 @@
 		ret = 0;
 	}
 
-	if (ret)
-		fixup_rt_mutex_blocked(lock);
+	debug_rt_mutex_print_deadlock(waiter);
 
 	return ret;
 }
@@ -2404,15 +1862,12 @@
 	raw_spin_lock_irq(&lock->wait_lock);
 	/* sleep on the mutex */
 	set_current_state(TASK_INTERRUPTIBLE);
-	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
 	/*
 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
 	 * have to fix that up.
 	 */
 	fixup_rt_mutex_waiters(lock);
-	if (ret)
-		fixup_rt_mutex_blocked(lock);
-
 	raw_spin_unlock_irq(&lock->wait_lock);
 
 	return ret;
@@ -2474,97 +1929,3 @@
 
 	return cleanup;
 }
-
-static inline int
-ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
-	unsigned int tmp;
-
-	if (ctx->deadlock_inject_countdown-- == 0) {
-		tmp = ctx->deadlock_inject_interval;
-		if (tmp > UINT_MAX/4)
-			tmp = UINT_MAX;
-		else
-			tmp = tmp*2 + tmp + tmp/2;
-
-		ctx->deadlock_inject_interval = tmp;
-		ctx->deadlock_inject_countdown = tmp;
-		ctx->contending_lock = lock;
-
-		ww_mutex_unlock(lock);
-
-		return -EDEADLK;
-	}
-#endif
-
-	return 0;
-}
-
-#ifdef CONFIG_PREEMPT_RT
-int __sched
-ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire_nest(&lock->base.dep_map, 0, 0,
-			   ctx ? &ctx->dep_map : NULL, _RET_IP_);
-	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
-				ctx);
-	if (ret)
-		mutex_release(&lock->base.dep_map, _RET_IP_);
-	else if (!ret && ctx && ctx->acquired > 1)
-		return ww_mutex_deadlock_injection(lock, ctx);
-
-	return ret;
-}
-EXPORT_SYMBOL(ww_mutex_lock_interruptible);
-
-int __sched
-ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire_nest(&lock->base.dep_map, 0, 0,
-			   ctx ? &ctx->dep_map : NULL, _RET_IP_);
-	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
-				ctx);
-	if (ret)
-		mutex_release(&lock->base.dep_map, _RET_IP_);
-	else if (!ret && ctx && ctx->acquired > 1)
-		return ww_mutex_deadlock_injection(lock, ctx);
-
-	return ret;
-}
-EXPORT_SYMBOL(ww_mutex_lock);
-
-void __sched ww_mutex_unlock(struct ww_mutex *lock)
-{
-	/*
-	 * The unlocking fastpath is the 0->1 transition from 'locked'
-	 * into 'unlocked' state:
-	 */
-	if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
-		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
-#endif
-		if (lock->ctx->acquired > 0)
-			lock->ctx->acquired--;
-		lock->ctx = NULL;
-	}
-
-	mutex_release(&lock->base.dep_map, _RET_IP_);
-	__rt_mutex_unlock(&lock->base.lock);
-}
-EXPORT_SYMBOL(ww_mutex_unlock);
-
-int __rt_mutex_owner_current(struct rt_mutex *lock)
-{
-	return rt_mutex_owner(lock) == current;
-}
-EXPORT_SYMBOL(__rt_mutex_owner_current);
-#endif

--
Gitblit v1.6.2