hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/locking/osq_lock.c
....@@ -134,20 +134,17 @@
134134 * cmpxchg in an attempt to undo our queueing.
135135 */
136136
137
- while (!READ_ONCE(node->locked)) {
138
- /*
139
- * If we need to reschedule bail... so we can block.
140
- * Use vcpu_is_preempted() to avoid waiting for a preempted
141
- * lock holder:
142
- */
143
- if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
144
- goto unqueue;
137
+ /*
138
+ * Wait to acquire the lock or cancelation. Note that need_resched()
139
+ * will come with an IPI, which will wake smp_cond_load_relaxed() if it
140
+ * is implemented with a monitor-wait. vcpu_is_preempted() relies on
141
+ * polling, be careful.
142
+ */
143
+ if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
144
+ vcpu_is_preempted(node_cpu(node->prev))))
145
+ return true;
145146
146
- cpu_relax();
147
- }
148
- return true;
149
-
150
-unqueue:
147
+ /* unqueue */
151148 /*
152149 * Step - A -- stabilize @prev
153150 *
....@@ -157,7 +154,11 @@
157154 */
158155
159156 for (;;) {
160
- if (prev->next == node &&
157
+ /*
158
+ * cpu_relax() below implies a compiler barrier which would
159
+ * prevent this comparison being optimized away.
160
+ */
161
+ if (data_race(prev->next) == node &&
161162 cmpxchg(&prev->next, node, NULL) == node)
162163 break;
163164