From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 02:45:28 +0000
Subject: [PATCH] add boot partition  size

---
 kernel/kernel/locking/rtmutex.c |  219 ++++++++++++++----------------------------------------
 1 files changed, 58 insertions(+), 161 deletions(-)

diff --git a/kernel/kernel/locking/rtmutex.c b/kernel/kernel/locking/rtmutex.c
index fe5153f..47d59f9 100644
--- a/kernel/kernel/locking/rtmutex.c
+++ b/kernel/kernel/locking/rtmutex.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
  *
@@ -7,13 +8,13 @@
  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
- *  Adaptive Spinlocks:
+ * Adaptive Spinlocks:
  *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
  *				     and Peter Morreale,
  * Adaptive Spinlocks simplification:
  *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
  *
- *  See Documentation/locking/rt-mutex-design.txt for details.
+ *  See Documentation/locking/rt-mutex-design.rst for details.
  */
 #include <linux/spinlock.h>
 #include <linux/export.h>
@@ -23,8 +24,8 @@
 #include <linux/sched/wake_q.h>
 #include <linux/sched/debug.h>
 #include <linux/timer.h>
+#include <trace/hooks/dtask.h>
 #include <linux/ww_mutex.h>
-#include <linux/blkdev.h>
 
 #include "rtmutex_common.h"
 
@@ -63,7 +64,7 @@
 	if (rt_mutex_has_waiters(lock))
 		val |= RT_MUTEX_HAS_WAITERS;
 
-	lock->owner = (struct task_struct *)val;
+	WRITE_ONCE(lock->owner, (struct task_struct *)val);
 }
 
 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
@@ -153,7 +154,6 @@
  * set up.
  */
 #ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
 
@@ -214,7 +214,6 @@
 }
 
 #else
-# define rt_mutex_cmpxchg_relaxed(l,c,n)	(0)
 # define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
 # define rt_mutex_cmpxchg_release(l,c,n)	(0)
 
@@ -641,7 +640,6 @@
 	 * walk, we detected a deadlock.
 	 */
 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
 		raw_spin_unlock(&lock->wait_lock);
 		ret = -EDEADLK;
 		goto out_unlock_pi;
@@ -670,8 +668,7 @@
 		}
 
 		/* [10] Grab the next task, i.e. owner of @lock */
-		task = rt_mutex_owner(lock);
-		get_task_struct(task);
+		task = get_task_struct(rt_mutex_owner(lock));
 		raw_spin_lock(&task->pi_lock);
 
 		/*
@@ -754,8 +751,7 @@
 	}
 
 	/* [10] Grab the next task, i.e. the owner of @lock */
-	task = rt_mutex_owner(lock);
-	get_task_struct(task);
+	task = get_task_struct(rt_mutex_owner(lock));
 	raw_spin_lock(&task->pi_lock);
 
 	/* [11] requeue the pi waiters if necessary */
@@ -964,7 +960,7 @@
 	return 1;
 }
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 /*
  * preemptible spin_lock functions:
  */
@@ -1071,10 +1067,8 @@
 
 		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
-		debug_rt_mutex_print_deadlock(waiter);
-
 		if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
-			schedule();
+			preempt_schedule_lock();
 
 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
@@ -1141,11 +1135,10 @@
 
 void __lockfunc rt_spin_lock(spinlock_t *lock)
 {
-	sleeping_lock_inc();
-	rcu_read_lock();
-	migrate_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	rcu_read_lock();
+	migrate_disable();
 }
 EXPORT_SYMBOL(rt_spin_lock);
 
@@ -1157,23 +1150,31 @@
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
 {
-	sleeping_lock_inc();
-	rcu_read_lock();
-	migrate_disable();
 	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	rcu_read_lock();
+	migrate_disable();
 }
 EXPORT_SYMBOL(rt_spin_lock_nested);
+
+void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
+				       struct lockdep_map *nest_lock)
+{
+	spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	rcu_read_lock();
+	migrate_disable();
+}
+EXPORT_SYMBOL(rt_spin_lock_nest_lock);
 #endif
 
 void __lockfunc rt_spin_unlock(spinlock_t *lock)
 {
 	/* NOTE: we always pass in '1' for nested, for simplicity */
-	spin_release(&lock->dep_map, 1, _RET_IP_);
-	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+	spin_release(&lock->dep_map, _RET_IP_);
 	migrate_enable();
 	rcu_read_unlock();
-	sleeping_lock_dec();
+	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
 }
 EXPORT_SYMBOL(rt_spin_unlock);
 
@@ -1188,26 +1189,22 @@
  * (like raw spinlocks do), we lock and unlock, to force the kernel to
  * schedule if there's contention:
  */
-void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
+void __lockfunc rt_spin_lock_unlock(spinlock_t *lock)
 {
 	spin_lock(lock);
 	spin_unlock(lock);
 }
-EXPORT_SYMBOL(rt_spin_unlock_wait);
+EXPORT_SYMBOL(rt_spin_lock_unlock);
 
 int __lockfunc rt_spin_trylock(spinlock_t *lock)
 {
 	int ret;
 
-	sleeping_lock_inc();
-	migrate_disable();
 	ret = __rt_mutex_trylock(&lock->lock);
 	if (ret) {
 		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 		rcu_read_lock();
-	} else {
-		migrate_enable();
-		sleeping_lock_dec();
+		migrate_disable();
 	}
 	return ret;
 }
@@ -1220,31 +1217,15 @@
 	local_bh_disable();
 	ret = __rt_mutex_trylock(&lock->lock);
 	if (ret) {
-		sleeping_lock_inc();
+		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 		rcu_read_lock();
 		migrate_disable();
-		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-	} else
+	} else {
 		local_bh_enable();
-	return ret;
-}
-EXPORT_SYMBOL(rt_spin_trylock_bh);
-
-int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
-{
-	int ret;
-
-	*flags = 0;
-	ret = __rt_mutex_trylock(&lock->lock);
-	if (ret) {
-		sleeping_lock_inc();
-		rcu_read_lock();
-		migrate_disable();
-		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 	}
 	return ret;
 }
-EXPORT_SYMBOL(rt_spin_trylock_irqsave);
+EXPORT_SYMBOL(rt_spin_trylock_bh);
 
 void
 __rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
@@ -1259,9 +1240,9 @@
 }
 EXPORT_SYMBOL(__rt_spin_lock_init);
 
-#endif /* PREEMPT_RT_FULL */
+#endif /* PREEMPT_RT */
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 	static inline int __sched
 __mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
 {
@@ -1578,6 +1559,7 @@
 {
 	int ret = 0;
 
+	trace_android_vh_rtmutex_wait_start(lock);
 	for (;;) {
 		/* Try to acquire the lock: */
 		if (try_to_take_rt_mutex(lock, current, waiter))
@@ -1600,14 +1582,13 @@
 
 		raw_spin_unlock_irq(&lock->wait_lock);
 
-		debug_rt_mutex_print_deadlock(waiter);
-
 		schedule();
 
 		raw_spin_lock_irq(&lock->wait_lock);
 		set_current_state(state);
 	}
 
+	trace_android_vh_rtmutex_wait_finish(lock);
 	__set_current_state(TASK_RUNNING);
 	return ret;
 }
@@ -1622,10 +1603,6 @@
 	if (res != -EDEADLOCK || detect_deadlock)
 		return;
 
-	/*
-	 * Yell lowdly and stop the task right here.
-	 */
-	rt_mutex_print_deadlock(w);
 	while (1) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
@@ -1672,7 +1649,7 @@
 	ww_ctx->acquired++;
 }
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 static void ww_mutex_account_lock(struct rt_mutex *lock,
 				  struct ww_acquire_ctx *ww_ctx)
 {
@@ -1716,7 +1693,7 @@
 {
 	int ret;
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 	if (ww_ctx) {
 		struct ww_mutex *ww;
 
@@ -1934,36 +1911,7 @@
 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
 		return 0;
 
-	/*
-	 * If rt_mutex blocks, the function sched_submit_work will not call
-	 * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
-	 * We must call blk_schedule_flush_plug here, if we don't call it,
-	 * a deadlock in I/O may happen.
-	 */
-	if (unlikely(blk_needs_flush_plug(current)))
-		blk_schedule_flush_plug(current);
-
 	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
-}
-
-static inline int
-rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
-			struct hrtimer_sleeper *timeout,
-			enum rtmutex_chainwalk chwalk,
-			struct ww_acquire_ctx *ww_ctx,
-			int (*slowfn)(struct rt_mutex *lock, int state,
-				      struct hrtimer_sleeper *timeout,
-				      enum rtmutex_chainwalk chwalk,
-				      struct ww_acquire_ctx *ww_ctx))
-{
-	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
-	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
-		return 0;
-
-	if (unlikely(blk_needs_flush_plug(current)))
-		blk_schedule_flush_plug(current);
-
-	return slowfn(lock, state, timeout, chwalk, ww_ctx);
 }
 
 static inline int
@@ -1977,7 +1925,7 @@
 }
 
 /*
- * Performs the wakeup of the the top-waiter and re-enables preemption.
+ * Performs the wakeup of the top-waiter and re-enables preemption.
  */
 void rt_mutex_postunlock(struct wake_q_head *wake_q,
 			 struct wake_q_head *wake_sleeper_q)
@@ -2025,7 +1973,9 @@
 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 	ret = __rt_mutex_lock_state(lock, state);
 	if (ret)
-		mutex_release(&lock->dep_map, 1, _RET_IP_);
+		mutex_release(&lock->dep_map, _RET_IP_);
+	trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
+
 	return ret;
 }
 
@@ -2046,9 +1996,9 @@
 	__rt_mutex_lock(lock, subclass);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
-#endif
 
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
 /**
  * rt_mutex_lock - lock a rt_mutex
  *
@@ -2089,57 +2039,9 @@
 	return __rt_mutex_slowtrylock(lock);
 }
 
-/**
- * rt_mutex_lock_killable - lock a rt_mutex killable
- *
- * @lock:              the rt_mutex to be locked
- * @detect_deadlock:   deadlock detection on/off
- *
- * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
- */
-int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
-{
-	return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
-}
-EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-
-/**
- * rt_mutex_timed_lock - lock a rt_mutex interruptible
- *			the timeout structure is provided
- *			by the caller
- *
- * @lock:		the rt_mutex to be locked
- * @timeout:		timeout structure or NULL (no timeout)
- *
- * Returns:
- *  0		on success
- * -EINTR	when interrupted by a signal
- * -ETIMEDOUT	when the timeout expired
- */
-int
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
-{
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-				       RT_MUTEX_MIN_CHAINWALK,
-				       NULL,
-				       rt_mutex_slowlock);
-	if (ret)
-		mutex_release(&lock->dep_map, 1, _RET_IP_);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-
 int __sched __rt_mutex_trylock(struct rt_mutex *lock)
 {
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 	if (WARN_ON_ONCE(in_irq() || in_nmi()))
 #else
 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
@@ -2167,10 +2069,11 @@
 	ret = __rt_mutex_trylock(lock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+	else
+		trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
 void __sched __rt_mutex_unlock(struct rt_mutex *lock)
 {
@@ -2184,8 +2087,9 @@
  */
 void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
-	mutex_release(&lock->dep_map, 1, _RET_IP_);
+	mutex_release(&lock->dep_map, _RET_IP_);
 	__rt_mutex_unlock(lock);
+	trace_android_vh_record_rtmutex_lock_starttime(current, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
 
@@ -2250,9 +2154,6 @@
 void rt_mutex_destroy(struct rt_mutex *lock)
 {
 	WARN_ON(rt_mutex_is_locked(lock));
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-	lock->magic = NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
 
@@ -2375,7 +2276,7 @@
 	if (try_to_take_rt_mutex(lock, task, NULL))
 		return 1;
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 	/*
 	 * In PREEMPT_RT there's an added race.
 	 * If the task, that we are about to requeue, times out,
@@ -2419,8 +2320,6 @@
 
 	if (ret)
 		fixup_rt_mutex_blocked(lock);
-
-	debug_rt_mutex_print_deadlock(waiter);
 
 	return ret;
 }
@@ -2486,7 +2385,7 @@
  *			been started.
  * @waiter:		the pre-initialized rt_mutex_waiter
  *
- * Wait for the the lock acquisition started on our behalf by
+ * Wait for the lock acquisition started on our behalf by
  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
  * rt_mutex_cleanup_proxy_lock().
  *
@@ -2580,7 +2479,7 @@
 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
-	unsigned tmp;
+	unsigned int tmp;
 
 	if (ctx->deadlock_inject_countdown-- == 0) {
 		tmp = ctx->deadlock_inject_interval;
@@ -2602,7 +2501,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
 int __sched
 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
@@ -2615,13 +2514,13 @@
 	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
 				ctx);
 	if (ret)
-		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
+		mutex_release(&lock->base.dep_map, _RET_IP_);
 	else if (!ret && ctx && ctx->acquired > 1)
 		return ww_mutex_deadlock_injection(lock, ctx);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
 
 int __sched
 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
@@ -2635,23 +2534,21 @@
 	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
 				ctx);
 	if (ret)
-		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
+		mutex_release(&lock->base.dep_map, _RET_IP_);
 	else if (!ret && ctx && ctx->acquired > 1)
 		return ww_mutex_deadlock_injection(lock, ctx);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(ww_mutex_lock);
+EXPORT_SYMBOL(ww_mutex_lock);
 
 void __sched ww_mutex_unlock(struct ww_mutex *lock)
 {
-	int nest = !!lock->ctx;
-
 	/*
 	 * The unlocking fastpath is the 0->1 transition from 'locked'
 	 * into 'unlocked' state:
 	 */
-	if (nest) {
+	if (lock->ctx) {
 #ifdef CONFIG_DEBUG_MUTEXES
 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
 #endif
@@ -2660,7 +2557,7 @@
 		lock->ctx = NULL;
 	}
 
-	mutex_release(&lock->base.dep_map, nest, _RET_IP_);
+	mutex_release(&lock->base.dep_map, _RET_IP_);
 	__rt_mutex_unlock(&lock->base.lock);
 }
 EXPORT_SYMBOL(ww_mutex_unlock);

--
Gitblit v1.6.2