| .. | .. |
|---|
| 4 | 4 | #endif |
|---|
| 5 | 5 | |
|---|
| 6 | 6 | #include <linux/hash.h> |
|---|
| 7 | | -#include <linux/bootmem.h> |
|---|
| 7 | +#include <linux/memblock.h> |
|---|
| 8 | 8 | #include <linux/debug_locks.h> |
|---|
| 9 | 9 | |
|---|
| 10 | 10 | /* |
|---|
| .. | .. |
|---|
| 49 | 49 | |
|---|
| 50 | 50 | struct pv_node { |
|---|
| 51 | 51 | struct mcs_spinlock mcs; |
|---|
| 52 | | - struct mcs_spinlock __res[3]; |
|---|
| 53 | | - |
|---|
| 54 | 52 | int cpu; |
|---|
| 55 | 53 | u8 state; |
|---|
| 56 | 54 | }; |
|---|
| .. | .. |
|---|
| 91 | 89 | |
|---|
| 92 | 90 | if (!(val & _Q_LOCKED_PENDING_MASK) && |
|---|
| 93 | 91 | (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { |
|---|
| 94 | | - qstat_inc(qstat_pv_lock_stealing, true); |
|---|
| 92 | + lockevent_inc(pv_lock_stealing); |
|---|
| 95 | 93 | return true; |
|---|
| 96 | 94 | } |
|---|
| 97 | 95 | if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) |
|---|
| .. | .. |
|---|
| 221 | 219 | hopcnt++; |
|---|
| 222 | 220 | if (!cmpxchg(&he->lock, NULL, lock)) { |
|---|
| 223 | 221 | WRITE_ONCE(he->node, node); |
|---|
| 224 | | - qstat_hop(hopcnt); |
|---|
| 222 | + lockevent_pv_hop(hopcnt); |
|---|
| 225 | 223 | return &he->lock; |
|---|
| 226 | 224 | } |
|---|
| 227 | 225 | } |
|---|
| .. | .. |
|---|
| 281 | 279 | { |
|---|
| 282 | 280 | struct pv_node *pn = (struct pv_node *)node; |
|---|
| 283 | 281 | |
|---|
| 284 | | - BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); |
|---|
| 282 | + BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode)); |
|---|
| 285 | 283 | |
|---|
| 286 | 284 | pn->cpu = smp_processor_id(); |
|---|
| 287 | 285 | pn->state = vcpu_running; |
|---|
| .. | .. |
|---|
| 322 | 320 | smp_store_mb(pn->state, vcpu_halted); |
|---|
| 323 | 321 | |
|---|
| 324 | 322 | if (!READ_ONCE(node->locked)) { |
|---|
| 325 | | - qstat_inc(qstat_pv_wait_node, true); |
|---|
| 326 | | - qstat_inc(qstat_pv_wait_early, wait_early); |
|---|
| 323 | + lockevent_inc(pv_wait_node); |
|---|
| 324 | + lockevent_cond_inc(pv_wait_early, wait_early); |
|---|
| 327 | 325 | pv_wait(&pn->state, vcpu_halted); |
|---|
| 328 | 326 | } |
|---|
| 329 | 327 | |
|---|
| .. | .. |
|---|
| 341 | 339 | * So it is better to spin for a while in the hope that the |
|---|
| 342 | 340 | * MCS lock will be released soon. |
|---|
| 343 | 341 | */ |
|---|
| 344 | | - qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked)); |
|---|
| 342 | + lockevent_cond_inc(pv_spurious_wakeup, |
|---|
| 343 | + !READ_ONCE(node->locked)); |
|---|
| 345 | 344 | } |
|---|
| 346 | 345 | |
|---|
| 347 | 346 | /* |
|---|
| .. | .. |
|---|
| 418 | 417 | /* |
|---|
| 419 | 418 | * Tracking # of slowpath locking operations |
|---|
| 420 | 419 | */ |
|---|
| 421 | | - qstat_inc(qstat_lock_slowpath, true); |
|---|
| 420 | + lockevent_inc(lock_slowpath); |
|---|
| 422 | 421 | |
|---|
| 423 | 422 | for (;; waitcnt++) { |
|---|
| 424 | 423 | /* |
|---|
| .. | .. |
|---|
| 466 | 465 | } |
|---|
| 467 | 466 | } |
|---|
| 468 | 467 | WRITE_ONCE(pn->state, vcpu_hashed); |
|---|
| 469 | | - qstat_inc(qstat_pv_wait_head, true); |
|---|
| 470 | | - qstat_inc(qstat_pv_wait_again, waitcnt); |
|---|
| 468 | + lockevent_inc(pv_wait_head); |
|---|
| 469 | + lockevent_cond_inc(pv_wait_again, waitcnt); |
|---|
| 471 | 470 | pv_wait(&lock->locked, _Q_SLOW_VAL); |
|---|
| 472 | 471 | |
|---|
| 473 | 472 | /* |
|---|
| .. | .. |
|---|
| 530 | 529 | * vCPU is harmless other than the additional latency in completing |
|---|
| 531 | 530 | * the unlock. |
|---|
| 532 | 531 | */ |
|---|
| 533 | | - qstat_inc(qstat_pv_kick_unlock, true); |
|---|
| 532 | + lockevent_inc(pv_kick_unlock); |
|---|
| 534 | 533 | pv_kick(node->cpu); |
|---|
| 535 | 534 | } |
|---|
| 536 | 535 | |
|---|