.. | .. |
---|
15 | 15 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 |
---|
16 | 16 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 |
---|
17 | 17 | |
---|
| 18 | +#define MAX_FIXED_COUNTERS 3 |
---|
| 19 | + |
---|
18 | 20 | struct kvm_event_hw_type_mapping { |
---|
19 | 21 | u8 eventsel; |
---|
20 | 22 | u8 unit_mask; |
---|
.. | .. |
---|
22 | 24 | }; |
---|
23 | 25 | |
---|
24 | 26 | struct kvm_pmu_ops { |
---|
25 | | - unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, |
---|
26 | | - u8 unit_mask); |
---|
| 27 | + unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); |
---|
27 | 28 | unsigned (*find_fixed_event)(int idx); |
---|
28 | 29 | bool (*pmc_is_enabled)(struct kvm_pmc *pmc); |
---|
29 | 30 | struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); |
---|
30 | | - struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, |
---|
31 | | - u64 *mask); |
---|
32 | | - int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); |
---|
| 31 | + struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, |
---|
| 32 | + unsigned int idx, u64 *mask); |
---|
| 33 | + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); |
---|
| 34 | + int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); |
---|
33 | 35 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
---|
34 | | - int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
---|
| 36 | + int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
---|
35 | 37 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
---|
36 | 38 | void (*refresh)(struct kvm_vcpu *vcpu); |
---|
37 | 39 | void (*init)(struct kvm_vcpu *vcpu); |
---|
.. | .. |
---|
57 | 59 | return counter & pmc_bitmask(pmc); |
---|
58 | 60 | } |
---|
59 | 61 | |
---|
| 62 | +static inline void pmc_release_perf_event(struct kvm_pmc *pmc) |
---|
| 63 | +{ |
---|
| 64 | + if (pmc->perf_event) { |
---|
| 65 | + perf_event_release_kernel(pmc->perf_event); |
---|
| 66 | + pmc->perf_event = NULL; |
---|
| 67 | + pmc->current_config = 0; |
---|
| 68 | + pmc_to_pmu(pmc)->event_count--; |
---|
| 69 | + } |
---|
| 70 | +} |
---|
| 71 | + |
---|
60 | 72 | static inline void pmc_stop_counter(struct kvm_pmc *pmc) |
---|
61 | 73 | { |
---|
62 | 74 | if (pmc->perf_event) { |
---|
63 | 75 | pmc->counter = pmc_read_counter(pmc); |
---|
64 | | - perf_event_release_kernel(pmc->perf_event); |
---|
65 | | - pmc->perf_event = NULL; |
---|
| 76 | + pmc_release_perf_event(pmc); |
---|
66 | 77 | } |
---|
67 | 78 | } |
---|
68 | 79 | |
---|
.. | .. |
---|
78 | 89 | |
---|
79 | 90 | static inline bool pmc_is_enabled(struct kvm_pmc *pmc) |
---|
80 | 91 | { |
---|
81 | | - return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); |
---|
| 92 | + return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc); |
---|
| 93 | +} |
---|
| 94 | + |
---|
| 95 | +static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, |
---|
| 96 | + u64 data) |
---|
| 97 | +{ |
---|
| 98 | + return !(pmu->global_ctrl_mask & data); |
---|
82 | 99 | } |
---|
83 | 100 | |
---|
84 | 101 | /* returns general purpose PMC with the specified MSR. Note that it can be |
---|
.. | .. |
---|
113 | 130 | return NULL; |
---|
114 | 131 | } |
---|
115 | 132 | |
---|
| 133 | +static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) |
---|
| 134 | +{ |
---|
| 135 | + u64 sample_period = (-counter_value) & pmc_bitmask(pmc); |
---|
| 136 | + |
---|
| 137 | + if (!sample_period) |
---|
| 138 | + sample_period = pmc_bitmask(pmc) + 1; |
---|
| 139 | + return sample_period; |
---|
| 140 | +} |
---|
| 141 | + |
---|
116 | 142 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); |
---|
117 | 143 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); |
---|
118 | 144 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); |
---|
.. | .. |
---|
120 | 146 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
---|
121 | 147 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); |
---|
122 | 148 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
---|
123 | | -int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx); |
---|
| 149 | +int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); |
---|
124 | 150 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
---|
125 | | -int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
---|
| 151 | +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
---|
126 | 152 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
---|
127 | 153 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); |
---|
128 | 154 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); |
---|
129 | 155 | void kvm_pmu_init(struct kvm_vcpu *vcpu); |
---|
| 156 | +void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); |
---|
130 | 157 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
---|
| 158 | +int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); |
---|
131 | 159 | |
---|
132 | 160 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
---|
133 | 161 | |
---|