| .. | .. | 
|---|
| 134 | 134 |  	 * cmpxchg in an attempt to undo our queueing. | 
|---|
| 135 | 135 |  	 */ | 
|---|
| 136 | 136 |   | 
|---|
| 137 |  | -	while (!READ_ONCE(node->locked)) {  | 
|---|
| 138 |  | -		/*  | 
|---|
| 139 |  | -		 * If we need to reschedule bail... so we can block.  | 
|---|
| 140 |  | -		 * Use vcpu_is_preempted() to avoid waiting for a preempted  | 
|---|
| 141 |  | -		 * lock holder:  | 
|---|
| 142 |  | -		 */  | 
|---|
| 143 |  | -		if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))  | 
|---|
| 144 |  | -			goto unqueue;  | 
|---|
 | 137 | +	/*  | 
|---|
 | 138 | +	 * Wait to acquire the lock or cancelation. Note that need_resched()  | 
|---|
 | 139 | +	 * will come with an IPI, which will wake smp_cond_load_relaxed() if it  | 
|---|
 | 140 | +	 * is implemented with a monitor-wait. vcpu_is_preempted() relies on  | 
|---|
 | 141 | +	 * polling, be careful.  | 
|---|
 | 142 | +	 */  | 
|---|
 | 143 | +	if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||  | 
|---|
 | 144 | +				  vcpu_is_preempted(node_cpu(node->prev))))  | 
|---|
 | 145 | +		return true;  | 
|---|
| 145 | 146 |   | 
|---|
| 146 |  | -		cpu_relax();  | 
|---|
| 147 |  | -	}  | 
|---|
| 148 |  | -	return true;  | 
|---|
| 149 |  | -  | 
|---|
| 150 |  | -unqueue:  | 
|---|
 | 147 | +	/* unqueue */  | 
|---|
| 151 | 148 |  	/* | 
|---|
| 152 | 149 |  	 * Step - A  -- stabilize @prev | 
|---|
| 153 | 150 |  	 * | 
|---|
| .. | .. | 
|---|
| 157 | 154 |  	 */ | 
|---|
| 158 | 155 |   | 
|---|
| 159 | 156 |  	for (;;) { | 
|---|
| 160 |  | -		if (prev->next == node &&  | 
|---|
 | 157 | +		/*  | 
|---|
 | 158 | +		 * cpu_relax() below implies a compiler barrier which would  | 
|---|
 | 159 | +		 * prevent this comparison being optimized away.  | 
|---|
 | 160 | +		 */  | 
|---|
 | 161 | +		if (data_race(prev->next) == node &&  | 
|---|
| 161 | 162 |  		    cmpxchg(&prev->next, node, NULL) == node) | 
|---|
| 162 | 163 |  			break; | 
|---|
| 163 | 164 |   | 
|---|