From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/kernel/locking/osq_lock.c | 29 +++++++++++++++--------------
1 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/kernel/kernel/locking/osq_lock.c b/kernel/kernel/locking/osq_lock.c
index 6ef600a..1de006e 100644
--- a/kernel/kernel/locking/osq_lock.c
+++ b/kernel/kernel/locking/osq_lock.c
@@ -134,20 +134,17 @@
* cmpxchg in an attempt to undo our queueing.
*/
- while (!READ_ONCE(node->locked)) {
- /*
- * If we need to reschedule bail... so we can block.
- * Use vcpu_is_preempted() to avoid waiting for a preempted
- * lock holder:
- */
- if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
- goto unqueue;
+ /*
+ * Wait to acquire the lock or cancelation. Note that need_resched()
+ * will come with an IPI, which will wake smp_cond_load_relaxed() if it
+ * is implemented with a monitor-wait. vcpu_is_preempted() relies on
+ * polling, be careful.
+ */
+ if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
+ vcpu_is_preempted(node_cpu(node->prev))))
+ return true;
- cpu_relax();
- }
- return true;
-
-unqueue:
+ /* unqueue */
/*
* Step - A -- stabilize @prev
*
@@ -157,7 +154,11 @@
*/
for (;;) {
- if (prev->next == node &&
+ /*
+ * cpu_relax() below implies a compiler barrier which would
+ * prevent this comparison being optimized away.
+ */
+ if (data_race(prev->next) == node &&
cmpxchg(&prev->next, node, NULL) == node)
break;
--
Gitblit v1.6.2