.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Kernel-based Virtual Machine -- Performance Monitoring Unit support |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Avi Kivity <avi@redhat.com> |
---|
8 | 9 | * Gleb Natapov <gleb@redhat.com> |
---|
9 | 10 | * Wei Huang <wei@redhat.com> |
---|
10 | | - * |
---|
11 | | - * This work is licensed under the terms of the GNU GPL, version 2. See |
---|
12 | | - * the COPYING file in the top-level directory. |
---|
13 | | - * |
---|
14 | 11 | */ |
---|
15 | 12 | |
---|
16 | 13 | #include <linux/types.h> |
---|
17 | 14 | #include <linux/kvm_host.h> |
---|
18 | 15 | #include <linux/perf_event.h> |
---|
| 16 | +#include <linux/bsearch.h> |
---|
| 17 | +#include <linux/sort.h> |
---|
19 | 18 | #include <asm/perf_event.h> |
---|
20 | 19 | #include "x86.h" |
---|
21 | 20 | #include "cpuid.h" |
---|
22 | 21 | #include "lapic.h" |
---|
23 | 22 | #include "pmu.h" |
---|
| 23 | + |
---|
| 24 | +/* This is enough to filter the vast majority of currently defined events. */ |
---|
| 25 | +#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 |
---|
24 | 26 | |
---|
25 | 27 | /* NOTE: |
---|
26 | 28 | * - Each perf counter is defined as "struct kvm_pmc"; |
---|
.. | .. |
---|
62 | 64 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; |
---|
63 | 65 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
---|
64 | 66 | |
---|
65 | | - if (!test_and_set_bit(pmc->idx, |
---|
66 | | - (unsigned long *)&pmu->reprogram_pmi)) { |
---|
| 67 | + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { |
---|
67 | 68 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
---|
68 | 69 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
---|
69 | 70 | } |
---|
.. | .. |
---|
76 | 77 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; |
---|
77 | 78 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
---|
78 | 79 | |
---|
79 | | - if (!test_and_set_bit(pmc->idx, |
---|
80 | | - (unsigned long *)&pmu->reprogram_pmi)) { |
---|
| 80 | + if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) { |
---|
81 | 81 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
---|
82 | 82 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
---|
83 | 83 | |
---|
.. | .. |
---|
97 | 97 | } |
---|
98 | 98 | |
---|
99 | 99 | static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, |
---|
100 | | - unsigned config, bool exclude_user, |
---|
| 100 | + u64 config, bool exclude_user, |
---|
101 | 101 | bool exclude_kernel, bool intr, |
---|
102 | 102 | bool in_tx, bool in_tx_cp) |
---|
103 | 103 | { |
---|
.. | .. |
---|
113 | 113 | .config = config, |
---|
114 | 114 | }; |
---|
115 | 115 | |
---|
116 | | - attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); |
---|
| 116 | + attr.sample_period = get_sample_period(pmc, pmc->counter); |
---|
117 | 117 | |
---|
118 | 118 | if (in_tx) |
---|
119 | 119 | attr.config |= HSW_IN_TX; |
---|
.. | .. |
---|
137 | 137 | } |
---|
138 | 138 | |
---|
139 | 139 | pmc->perf_event = event; |
---|
140 | | - clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); |
---|
| 140 | + pmc_to_pmu(pmc)->event_count++; |
---|
| 141 | + clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); |
---|
| 142 | +} |
---|
| 143 | + |
---|
| 144 | +static void pmc_pause_counter(struct kvm_pmc *pmc) |
---|
| 145 | +{ |
---|
| 146 | + u64 counter = pmc->counter; |
---|
| 147 | + |
---|
| 148 | + if (!pmc->perf_event) |
---|
| 149 | + return; |
---|
| 150 | + |
---|
| 151 | + /* update counter, reset event value to avoid redundant accumulation */ |
---|
| 152 | + counter += perf_event_pause(pmc->perf_event, true); |
---|
| 153 | + pmc->counter = counter & pmc_bitmask(pmc); |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +static bool pmc_resume_counter(struct kvm_pmc *pmc) |
---|
| 157 | +{ |
---|
| 158 | + if (!pmc->perf_event) |
---|
| 159 | + return false; |
---|
| 160 | + |
---|
| 161 | + /* recalibrate sample period and check if it's accepted by perf core */ |
---|
| 162 | + if (perf_event_period(pmc->perf_event, |
---|
| 163 | + get_sample_period(pmc, pmc->counter))) |
---|
| 164 | + return false; |
---|
| 165 | + |
---|
| 166 | + /* reuse perf_event to serve as pmc_reprogram_counter() does*/ |
---|
| 167 | + perf_event_enable(pmc->perf_event); |
---|
| 168 | + |
---|
| 169 | + clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); |
---|
| 170 | + return true; |
---|
| 171 | +} |
---|
| 172 | + |
---|
| 173 | +static int cmp_u64(const void *pa, const void *pb) |
---|
| 174 | +{ |
---|
| 175 | + u64 a = *(u64 *)pa; |
---|
| 176 | + u64 b = *(u64 *)pb; |
---|
| 177 | + |
---|
| 178 | + return (a > b) - (a < b); |
---|
141 | 179 | } |
---|
142 | 180 | |
---|
143 | 181 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) |
---|
144 | 182 | { |
---|
145 | | - unsigned config, type = PERF_TYPE_RAW; |
---|
146 | | - u8 event_select, unit_mask; |
---|
| 183 | + u64 config; |
---|
| 184 | + u32 type = PERF_TYPE_RAW; |
---|
| 185 | + struct kvm *kvm = pmc->vcpu->kvm; |
---|
| 186 | + struct kvm_pmu_event_filter *filter; |
---|
| 187 | + struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu); |
---|
| 188 | + bool allow_event = true; |
---|
147 | 189 | |
---|
148 | 190 | if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) |
---|
149 | 191 | printk_once("kvm pmu: pin control bit is ignored\n"); |
---|
150 | 192 | |
---|
151 | 193 | pmc->eventsel = eventsel; |
---|
152 | 194 | |
---|
153 | | - pmc_stop_counter(pmc); |
---|
| 195 | + pmc_pause_counter(pmc); |
---|
154 | 196 | |
---|
155 | 197 | if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) |
---|
156 | 198 | return; |
---|
157 | 199 | |
---|
158 | | - event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; |
---|
159 | | - unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; |
---|
| 200 | + filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); |
---|
| 201 | + if (filter) { |
---|
| 202 | + __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB; |
---|
| 203 | + |
---|
| 204 | + if (bsearch(&key, filter->events, filter->nevents, |
---|
| 205 | + sizeof(__u64), cmp_u64)) |
---|
| 206 | + allow_event = filter->action == KVM_PMU_EVENT_ALLOW; |
---|
| 207 | + else |
---|
| 208 | + allow_event = filter->action == KVM_PMU_EVENT_DENY; |
---|
| 209 | + } |
---|
| 210 | + if (!allow_event) |
---|
| 211 | + return; |
---|
160 | 212 | |
---|
161 | 213 | if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | |
---|
162 | 214 | ARCH_PERFMON_EVENTSEL_INV | |
---|
163 | 215 | ARCH_PERFMON_EVENTSEL_CMASK | |
---|
164 | 216 | HSW_IN_TX | |
---|
165 | 217 | HSW_IN_TX_CHECKPOINTED))) { |
---|
166 | | - config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), |
---|
167 | | - event_select, |
---|
168 | | - unit_mask); |
---|
| 218 | + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); |
---|
169 | 219 | if (config != PERF_COUNT_HW_MAX) |
---|
170 | 220 | type = PERF_TYPE_HARDWARE; |
---|
171 | 221 | } |
---|
172 | 222 | |
---|
173 | 223 | if (type == PERF_TYPE_RAW) |
---|
174 | | - config = eventsel & AMD64_RAW_EVENT_MASK; |
---|
| 224 | + config = eventsel & pmu->raw_event_mask; |
---|
175 | 225 | |
---|
| 226 | + if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) |
---|
| 227 | + return; |
---|
| 228 | + |
---|
| 229 | + pmc_release_perf_event(pmc); |
---|
| 230 | + |
---|
| 231 | + pmc->current_config = eventsel; |
---|
176 | 232 | pmc_reprogram_counter(pmc, type, config, |
---|
177 | 233 | !(eventsel & ARCH_PERFMON_EVENTSEL_USR), |
---|
178 | 234 | !(eventsel & ARCH_PERFMON_EVENTSEL_OS), |
---|
.. | .. |
---|
186 | 242 | { |
---|
187 | 243 | unsigned en_field = ctrl & 0x3; |
---|
188 | 244 | bool pmi = ctrl & 0x8; |
---|
| 245 | + struct kvm_pmu_event_filter *filter; |
---|
| 246 | + struct kvm *kvm = pmc->vcpu->kvm; |
---|
189 | 247 | |
---|
190 | | - pmc_stop_counter(pmc); |
---|
| 248 | + pmc_pause_counter(pmc); |
---|
191 | 249 | |
---|
192 | 250 | if (!en_field || !pmc_is_enabled(pmc)) |
---|
193 | 251 | return; |
---|
194 | 252 | |
---|
| 253 | + filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); |
---|
| 254 | + if (filter) { |
---|
| 255 | + if (filter->action == KVM_PMU_EVENT_DENY && |
---|
| 256 | + test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) |
---|
| 257 | + return; |
---|
| 258 | + if (filter->action == KVM_PMU_EVENT_ALLOW && |
---|
| 259 | + !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap)) |
---|
| 260 | + return; |
---|
| 261 | + } |
---|
| 262 | + |
---|
| 263 | + if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc)) |
---|
| 264 | + return; |
---|
| 265 | + |
---|
| 266 | + pmc_release_perf_event(pmc); |
---|
| 267 | + |
---|
| 268 | + pmc->current_config = (u64)ctrl; |
---|
195 | 269 | pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, |
---|
196 | | - kvm_x86_ops->pmu_ops->find_fixed_event(idx), |
---|
| 270 | + kvm_x86_ops.pmu_ops->find_fixed_event(idx), |
---|
197 | 271 | !(en_field & 0x2), /* exclude user */ |
---|
198 | 272 | !(en_field & 0x1), /* exclude kernel */ |
---|
199 | 273 | pmi, false, false); |
---|
.. | .. |
---|
202 | 276 | |
---|
203 | 277 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx) |
---|
204 | 278 | { |
---|
205 | | - struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); |
---|
| 279 | + struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); |
---|
206 | 280 | |
---|
207 | 281 | if (!pmc) |
---|
208 | 282 | return; |
---|
.. | .. |
---|
221 | 295 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) |
---|
222 | 296 | { |
---|
223 | 297 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
224 | | - u64 bitmask; |
---|
225 | 298 | int bit; |
---|
226 | 299 | |
---|
227 | | - bitmask = pmu->reprogram_pmi; |
---|
228 | | - |
---|
229 | | - for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { |
---|
230 | | - struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); |
---|
| 300 | + for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) { |
---|
| 301 | + struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit); |
---|
231 | 302 | |
---|
232 | 303 | if (unlikely(!pmc || !pmc->perf_event)) { |
---|
233 | | - clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); |
---|
| 304 | + clear_bit(bit, pmu->reprogram_pmi); |
---|
234 | 305 | continue; |
---|
235 | 306 | } |
---|
236 | 307 | |
---|
237 | 308 | reprogram_counter(pmu, bit); |
---|
238 | 309 | } |
---|
| 310 | + |
---|
| 311 | + /* |
---|
| 312 | + * Unused perf_events are only released if the corresponding MSRs |
---|
| 313 | + * weren't accessed during the last vCPU time slice. kvm_arch_sched_in |
---|
| 314 | + * triggers KVM_REQ_PMU if cleanup is needed. |
---|
| 315 | + */ |
---|
| 316 | + if (unlikely(pmu->need_cleanup)) |
---|
| 317 | + kvm_pmu_cleanup(vcpu); |
---|
239 | 318 | } |
---|
240 | 319 | |
---|
241 | 320 | /* check if idx is a valid index to access PMU */ |
---|
242 | | -int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) |
---|
| 321 | +int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) |
---|
243 | 322 | { |
---|
244 | | - return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); |
---|
| 323 | + return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx); |
---|
245 | 324 | } |
---|
246 | 325 | |
---|
247 | 326 | bool is_vmware_backdoor_pmc(u32 pmc_idx) |
---|
.. | .. |
---|
264 | 343 | ctr_val = rdtsc(); |
---|
265 | 344 | break; |
---|
266 | 345 | case VMWARE_BACKDOOR_PMC_REAL_TIME: |
---|
267 | | - ctr_val = ktime_get_boot_ns(); |
---|
| 346 | + ctr_val = ktime_get_boottime_ns(); |
---|
268 | 347 | break; |
---|
269 | 348 | case VMWARE_BACKDOOR_PMC_APPARENT_TIME: |
---|
270 | | - ctr_val = ktime_get_boot_ns() + |
---|
| 349 | + ctr_val = ktime_get_boottime_ns() + |
---|
271 | 350 | vcpu->kvm->arch.kvmclock_offset; |
---|
272 | 351 | break; |
---|
273 | 352 | default: |
---|
.. | .. |
---|
281 | 360 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) |
---|
282 | 361 | { |
---|
283 | 362 | bool fast_mode = idx & (1u << 31); |
---|
| 363 | + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
284 | 364 | struct kvm_pmc *pmc; |
---|
285 | 365 | u64 mask = fast_mode ? ~0u : ~0ull; |
---|
| 366 | + |
---|
| 367 | + if (!pmu->version) |
---|
| 368 | + return 1; |
---|
286 | 369 | |
---|
287 | 370 | if (is_vmware_backdoor_pmc(idx)) |
---|
288 | 371 | return kvm_pmu_rdpmc_vmware(vcpu, idx, data); |
---|
289 | 372 | |
---|
290 | | - pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); |
---|
| 373 | + pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask); |
---|
291 | 374 | if (!pmc) |
---|
| 375 | + return 1; |
---|
| 376 | + |
---|
| 377 | + if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && |
---|
| 378 | + (kvm_x86_ops.get_cpl(vcpu) != 0) && |
---|
| 379 | + (kvm_read_cr0(vcpu) & X86_CR0_PE)) |
---|
292 | 380 | return 1; |
---|
293 | 381 | |
---|
294 | 382 | *data = pmc_read_counter(pmc) & mask; |
---|
.. | .. |
---|
303 | 391 | |
---|
304 | 392 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
---|
305 | 393 | { |
---|
306 | | - return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); |
---|
| 394 | + return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) || |
---|
| 395 | + kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr); |
---|
307 | 396 | } |
---|
308 | 397 | |
---|
309 | | -int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
---|
| 398 | +static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) |
---|
310 | 399 | { |
---|
311 | | - return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); |
---|
| 400 | + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
| 401 | + struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr); |
---|
| 402 | + |
---|
| 403 | + if (pmc) |
---|
| 404 | + __set_bit(pmc->idx, pmu->pmc_in_use); |
---|
| 405 | +} |
---|
| 406 | + |
---|
| 407 | +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
---|
| 408 | +{ |
---|
| 409 | + return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info); |
---|
312 | 410 | } |
---|
313 | 411 | |
---|
314 | 412 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
---|
315 | 413 | { |
---|
316 | | - return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); |
---|
| 414 | + kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); |
---|
| 415 | + return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info); |
---|
317 | 416 | } |
---|
318 | 417 | |
---|
319 | 418 | /* refresh PMU settings. This function generally is called when underlying |
---|
.. | .. |
---|
322 | 421 | */ |
---|
323 | 422 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu) |
---|
324 | 423 | { |
---|
325 | | - kvm_x86_ops->pmu_ops->refresh(vcpu); |
---|
| 424 | + kvm_x86_ops.pmu_ops->refresh(vcpu); |
---|
326 | 425 | } |
---|
327 | 426 | |
---|
328 | 427 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
330 | 429 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
331 | 430 | |
---|
332 | 431 | irq_work_sync(&pmu->irq_work); |
---|
333 | | - kvm_x86_ops->pmu_ops->reset(vcpu); |
---|
| 432 | + kvm_x86_ops.pmu_ops->reset(vcpu); |
---|
334 | 433 | } |
---|
335 | 434 | |
---|
336 | 435 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
338 | 437 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
339 | 438 | |
---|
340 | 439 | memset(pmu, 0, sizeof(*pmu)); |
---|
341 | | - kvm_x86_ops->pmu_ops->init(vcpu); |
---|
| 440 | + kvm_x86_ops.pmu_ops->init(vcpu); |
---|
342 | 441 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); |
---|
| 442 | + pmu->event_count = 0; |
---|
| 443 | + pmu->need_cleanup = false; |
---|
343 | 444 | kvm_pmu_refresh(vcpu); |
---|
| 445 | +} |
---|
| 446 | + |
---|
| 447 | +static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) |
---|
| 448 | +{ |
---|
| 449 | + struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
---|
| 450 | + |
---|
| 451 | + if (pmc_is_fixed(pmc)) |
---|
| 452 | + return fixed_ctrl_field(pmu->fixed_ctr_ctrl, |
---|
| 453 | + pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; |
---|
| 454 | + |
---|
| 455 | + return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; |
---|
| 456 | +} |
---|
| 457 | + |
---|
| 458 | +/* Release perf_events for vPMCs that have been unused for a full time slice. */ |
---|
| 459 | +void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) |
---|
| 460 | +{ |
---|
| 461 | + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
---|
| 462 | + struct kvm_pmc *pmc = NULL; |
---|
| 463 | + DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX); |
---|
| 464 | + int i; |
---|
| 465 | + |
---|
| 466 | + pmu->need_cleanup = false; |
---|
| 467 | + |
---|
| 468 | + bitmap_andnot(bitmask, pmu->all_valid_pmc_idx, |
---|
| 469 | + pmu->pmc_in_use, X86_PMC_IDX_MAX); |
---|
| 470 | + |
---|
| 471 | + for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) { |
---|
| 472 | + pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i); |
---|
| 473 | + |
---|
| 474 | + if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) |
---|
| 475 | + pmc_stop_counter(pmc); |
---|
| 476 | + } |
---|
| 477 | + |
---|
| 478 | + bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); |
---|
344 | 479 | } |
---|
345 | 480 | |
---|
346 | 481 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) |
---|
347 | 482 | { |
---|
348 | 483 | kvm_pmu_reset(vcpu); |
---|
349 | 484 | } |
---|
| 485 | + |
---|
| 486 | +int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) |
---|
| 487 | +{ |
---|
| 488 | + struct kvm_pmu_event_filter tmp, *filter; |
---|
| 489 | + size_t size; |
---|
| 490 | + int r; |
---|
| 491 | + |
---|
| 492 | + if (copy_from_user(&tmp, argp, sizeof(tmp))) |
---|
| 493 | + return -EFAULT; |
---|
| 494 | + |
---|
| 495 | + if (tmp.action != KVM_PMU_EVENT_ALLOW && |
---|
| 496 | + tmp.action != KVM_PMU_EVENT_DENY) |
---|
| 497 | + return -EINVAL; |
---|
| 498 | + |
---|
| 499 | + if (tmp.flags != 0) |
---|
| 500 | + return -EINVAL; |
---|
| 501 | + |
---|
| 502 | + if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS) |
---|
| 503 | + return -E2BIG; |
---|
| 504 | + |
---|
| 505 | + size = struct_size(filter, events, tmp.nevents); |
---|
| 506 | + filter = kmalloc(size, GFP_KERNEL_ACCOUNT); |
---|
| 507 | + if (!filter) |
---|
| 508 | + return -ENOMEM; |
---|
| 509 | + |
---|
| 510 | + r = -EFAULT; |
---|
| 511 | + if (copy_from_user(filter, argp, size)) |
---|
| 512 | + goto cleanup; |
---|
| 513 | + |
---|
| 514 | + /* Ensure nevents can't be changed between the user copies. */ |
---|
| 515 | + *filter = tmp; |
---|
| 516 | + |
---|
| 517 | + /* |
---|
| 518 | + * Sort the in-kernel list so that we can search it with bsearch. |
---|
| 519 | + */ |
---|
| 520 | + sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL); |
---|
| 521 | + |
---|
| 522 | + mutex_lock(&kvm->lock); |
---|
| 523 | + filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter, |
---|
| 524 | + mutex_is_locked(&kvm->lock)); |
---|
| 525 | + mutex_unlock(&kvm->lock); |
---|
| 526 | + |
---|
| 527 | + synchronize_srcu_expedited(&kvm->srcu); |
---|
| 528 | + r = 0; |
---|
| 529 | +cleanup: |
---|
| 530 | + kfree(filter); |
---|
| 531 | + return r; |
---|
| 532 | +} |
---|