hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/kernel/locking/qspinlock_paravirt.h
....@@ -4,7 +4,7 @@
44 #endif
55
66 #include <linux/hash.h>
7
-#include <linux/bootmem.h>
7
+#include <linux/memblock.h>
88 #include <linux/debug_locks.h>
99
1010 /*
....@@ -49,8 +49,6 @@
4949
5050 struct pv_node {
5151 struct mcs_spinlock mcs;
52
- struct mcs_spinlock __res[3];
53
-
5452 int cpu;
5553 u8 state;
5654 };
....@@ -91,7 +89,7 @@
9189
9290 if (!(val & _Q_LOCKED_PENDING_MASK) &&
9391 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
94
- qstat_inc(qstat_pv_lock_stealing, true);
92
+ lockevent_inc(pv_lock_stealing);
9593 return true;
9694 }
9795 if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
....@@ -221,7 +219,7 @@
221219 hopcnt++;
222220 if (!cmpxchg(&he->lock, NULL, lock)) {
223221 WRITE_ONCE(he->node, node);
224
- qstat_hop(hopcnt);
222
+ lockevent_pv_hop(hopcnt);
225223 return &he->lock;
226224 }
227225 }
....@@ -281,7 +279,7 @@
281279 {
282280 struct pv_node *pn = (struct pv_node *)node;
283281
284
- BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
282
+ BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
285283
286284 pn->cpu = smp_processor_id();
287285 pn->state = vcpu_running;
....@@ -322,8 +320,8 @@
322320 smp_store_mb(pn->state, vcpu_halted);
323321
324322 if (!READ_ONCE(node->locked)) {
325
- qstat_inc(qstat_pv_wait_node, true);
326
- qstat_inc(qstat_pv_wait_early, wait_early);
323
+ lockevent_inc(pv_wait_node);
324
+ lockevent_cond_inc(pv_wait_early, wait_early);
327325 pv_wait(&pn->state, vcpu_halted);
328326 }
329327
....@@ -341,7 +339,8 @@
341339 * So it is better to spin for a while in the hope that the
342340 * MCS lock will be released soon.
343341 */
344
- qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
342
+ lockevent_cond_inc(pv_spurious_wakeup,
343
+ !READ_ONCE(node->locked));
345344 }
346345
347346 /*
....@@ -418,7 +417,7 @@
418417 /*
419418 * Tracking # of slowpath locking operations
420419 */
421
- qstat_inc(qstat_lock_slowpath, true);
420
+ lockevent_inc(lock_slowpath);
422421
423422 for (;; waitcnt++) {
424423 /*
....@@ -466,8 +465,8 @@
466465 }
467466 }
468467 WRITE_ONCE(pn->state, vcpu_hashed);
469
- qstat_inc(qstat_pv_wait_head, true);
470
- qstat_inc(qstat_pv_wait_again, waitcnt);
468
+ lockevent_inc(pv_wait_head);
469
+ lockevent_cond_inc(pv_wait_again, waitcnt);
471470 pv_wait(&lock->locked, _Q_SLOW_VAL);
472471
473472 /*
....@@ -530,7 +529,7 @@
530529 * vCPU is harmless other than the additional latency in completing
531530 * the unlock.
532531 */
533
- qstat_inc(qstat_pv_kick_unlock, true);
532
+ lockevent_inc(pv_kick_unlock);
534533 pv_kick(node->cpu);
535534 }
536535