hc
2024-03-22 ac5f19e89dcbd5c7428fcc78a0d407c887564466
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * Authors: Waiman Long <longman@redhat.com>
 */
 
#ifndef LOCK_EVENT
#define LOCK_EVENT(name)    LOCKEVENT_ ## name,
#endif
 
#ifdef CONFIG_QUEUED_SPINLOCKS
#ifdef CONFIG_PARAVIRT_SPINLOCKS
/*
 * Locking events for PV qspinlock.
 */
LOCK_EVENT(pv_hash_hops)    /* Average # of hops per hashing operation */
LOCK_EVENT(pv_kick_unlock)    /* # of vCPU kicks issued at unlock time   */
LOCK_EVENT(pv_kick_wake)    /* # of vCPU kicks for pv_latency_wake       */
LOCK_EVENT(pv_latency_kick)    /* Average latency (ns) of vCPU kick       */
LOCK_EVENT(pv_latency_wake)    /* Average latency (ns) of kick-to-wakeup  */
LOCK_EVENT(pv_lock_stealing)    /* # of lock stealing operations       */
LOCK_EVENT(pv_spurious_wakeup)    /* # of spurious wakeups in non-head vCPUs */
LOCK_EVENT(pv_wait_again)    /* # of wait's after queue head vCPU kick  */
LOCK_EVENT(pv_wait_early)    /* # of early vCPU wait's           */
LOCK_EVENT(pv_wait_head)    /* # of vCPU wait's at the queue head       */
LOCK_EVENT(pv_wait_node)    /* # of vCPU wait's at non-head queue node */
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
 
/*
 * Locking events for qspinlock
 *
 * Subtracting lock_use_node[234] from lock_slowpath will give you
 * lock_use_node1.
 */
LOCK_EVENT(lock_pending)    /* # of locking ops via pending code         */
LOCK_EVENT(lock_slowpath)    /* # of locking ops via MCS lock queue         */
LOCK_EVENT(lock_use_node2)    /* # of locking ops that use 2nd percpu node */
LOCK_EVENT(lock_use_node3)    /* # of locking ops that use 3rd percpu node */
LOCK_EVENT(lock_use_node4)    /* # of locking ops that use 4th percpu node */
LOCK_EVENT(lock_no_node)    /* # of locking ops w/o using percpu node    */
#endif /* CONFIG_QUEUED_SPINLOCKS */
 
/*
 * Locking events for rwsem
 */
LOCK_EVENT(rwsem_sleep_reader)    /* # of reader sleeps            */
LOCK_EVENT(rwsem_sleep_writer)    /* # of writer sleeps            */
LOCK_EVENT(rwsem_wake_reader)    /* # of reader wakeups            */
LOCK_EVENT(rwsem_wake_writer)    /* # of writer wakeups            */
LOCK_EVENT(rwsem_opt_rlock)    /* # of opt-acquired read locks        */
LOCK_EVENT(rwsem_opt_wlock)    /* # of opt-acquired write locks    */
LOCK_EVENT(rwsem_opt_fail)    /* # of failed optspins            */
LOCK_EVENT(rwsem_opt_nospin)    /* # of disabled optspins        */
LOCK_EVENT(rwsem_opt_norspin)    /* # of disabled reader-only optspins    */
LOCK_EVENT(rwsem_opt_rlock2)    /* # of opt-acquired 2ndary read locks    */
LOCK_EVENT(rwsem_rlock)        /* # of read locks acquired        */
LOCK_EVENT(rwsem_rlock_fast)    /* # of fast read locks acquired    */
LOCK_EVENT(rwsem_rlock_fail)    /* # of failed read lock acquisitions    */
LOCK_EVENT(rwsem_rlock_handoff)    /* # of read lock handoffs        */
LOCK_EVENT(rwsem_wlock)        /* # of write locks acquired        */
LOCK_EVENT(rwsem_wlock_fail)    /* # of failed write lock acquisitions    */
LOCK_EVENT(rwsem_wlock_handoff)    /* # of write lock handoffs        */