hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/x86/kvm/pmu.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
34 *
....@@ -7,20 +8,21 @@
78 * Avi Kivity <avi@redhat.com>
89 * Gleb Natapov <gleb@redhat.com>
910 * Wei Huang <wei@redhat.com>
10
- *
11
- * This work is licensed under the terms of the GNU GPL, version 2. See
12
- * the COPYING file in the top-level directory.
13
- *
1411 */
1512
1613 #include <linux/types.h>
1714 #include <linux/kvm_host.h>
1815 #include <linux/perf_event.h>
16
+#include <linux/bsearch.h>
17
+#include <linux/sort.h>
1918 #include <asm/perf_event.h>
2019 #include "x86.h"
2120 #include "cpuid.h"
2221 #include "lapic.h"
2322 #include "pmu.h"
23
+
24
+/* This is enough to filter the vast majority of currently defined events. */
25
+#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
2426
2527 /* NOTE:
2628 * - Each perf counter is defined as "struct kvm_pmc";
....@@ -62,8 +64,7 @@
6264 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
6365 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
6466
65
- if (!test_and_set_bit(pmc->idx,
66
- (unsigned long *)&pmu->reprogram_pmi)) {
67
+ if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
6768 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
6869 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
6970 }
....@@ -76,8 +77,7 @@
7677 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
7778 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
7879
79
- if (!test_and_set_bit(pmc->idx,
80
- (unsigned long *)&pmu->reprogram_pmi)) {
80
+ if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
8181 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
8282 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
8383
....@@ -97,7 +97,7 @@
9797 }
9898
9999 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
100
- unsigned config, bool exclude_user,
100
+ u64 config, bool exclude_user,
101101 bool exclude_kernel, bool intr,
102102 bool in_tx, bool in_tx_cp)
103103 {
....@@ -113,7 +113,7 @@
113113 .config = config,
114114 };
115115
116
- attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
116
+ attr.sample_period = get_sample_period(pmc, pmc->counter);
117117
118118 if (in_tx)
119119 attr.config |= HSW_IN_TX;
....@@ -137,42 +137,98 @@
137137 }
138138
139139 pmc->perf_event = event;
140
- clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
140
+ pmc_to_pmu(pmc)->event_count++;
141
+ clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
142
+}
143
+
144
+static void pmc_pause_counter(struct kvm_pmc *pmc)
145
+{
146
+ u64 counter = pmc->counter;
147
+
148
+ if (!pmc->perf_event)
149
+ return;
150
+
151
+ /* update counter, reset event value to avoid redundant accumulation */
152
+ counter += perf_event_pause(pmc->perf_event, true);
153
+ pmc->counter = counter & pmc_bitmask(pmc);
154
+}
155
+
156
+static bool pmc_resume_counter(struct kvm_pmc *pmc)
157
+{
158
+ if (!pmc->perf_event)
159
+ return false;
160
+
161
+ /* recalibrate sample period and check if it's accepted by perf core */
162
+ if (perf_event_period(pmc->perf_event,
163
+ get_sample_period(pmc, pmc->counter)))
164
+ return false;
165
+
166
+ /* reuse perf_event to serve as pmc_reprogram_counter() does*/
167
+ perf_event_enable(pmc->perf_event);
168
+
169
+ clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
170
+ return true;
171
+}
172
+
173
+static int cmp_u64(const void *pa, const void *pb)
174
+{
175
+ u64 a = *(u64 *)pa;
176
+ u64 b = *(u64 *)pb;
177
+
178
+ return (a > b) - (a < b);
141179 }
142180
143181 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
144182 {
145
- unsigned config, type = PERF_TYPE_RAW;
146
- u8 event_select, unit_mask;
183
+ u64 config;
184
+ u32 type = PERF_TYPE_RAW;
185
+ struct kvm *kvm = pmc->vcpu->kvm;
186
+ struct kvm_pmu_event_filter *filter;
187
+ struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
188
+ bool allow_event = true;
147189
148190 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
149191 printk_once("kvm pmu: pin control bit is ignored\n");
150192
151193 pmc->eventsel = eventsel;
152194
153
- pmc_stop_counter(pmc);
195
+ pmc_pause_counter(pmc);
154196
155197 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
156198 return;
157199
158
- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
159
- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
200
+ filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
201
+ if (filter) {
202
+ __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
203
+
204
+ if (bsearch(&key, filter->events, filter->nevents,
205
+ sizeof(__u64), cmp_u64))
206
+ allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
207
+ else
208
+ allow_event = filter->action == KVM_PMU_EVENT_DENY;
209
+ }
210
+ if (!allow_event)
211
+ return;
160212
161213 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
162214 ARCH_PERFMON_EVENTSEL_INV |
163215 ARCH_PERFMON_EVENTSEL_CMASK |
164216 HSW_IN_TX |
165217 HSW_IN_TX_CHECKPOINTED))) {
166
- config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
167
- event_select,
168
- unit_mask);
218
+ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
169219 if (config != PERF_COUNT_HW_MAX)
170220 type = PERF_TYPE_HARDWARE;
171221 }
172222
173223 if (type == PERF_TYPE_RAW)
174
- config = eventsel & AMD64_RAW_EVENT_MASK;
224
+ config = eventsel & pmu->raw_event_mask;
175225
226
+ if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
227
+ return;
228
+
229
+ pmc_release_perf_event(pmc);
230
+
231
+ pmc->current_config = eventsel;
176232 pmc_reprogram_counter(pmc, type, config,
177233 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
178234 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
....@@ -186,14 +242,32 @@
186242 {
187243 unsigned en_field = ctrl & 0x3;
188244 bool pmi = ctrl & 0x8;
245
+ struct kvm_pmu_event_filter *filter;
246
+ struct kvm *kvm = pmc->vcpu->kvm;
189247
190
- pmc_stop_counter(pmc);
248
+ pmc_pause_counter(pmc);
191249
192250 if (!en_field || !pmc_is_enabled(pmc))
193251 return;
194252
253
+ filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
254
+ if (filter) {
255
+ if (filter->action == KVM_PMU_EVENT_DENY &&
256
+ test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
257
+ return;
258
+ if (filter->action == KVM_PMU_EVENT_ALLOW &&
259
+ !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
260
+ return;
261
+ }
262
+
263
+ if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
264
+ return;
265
+
266
+ pmc_release_perf_event(pmc);
267
+
268
+ pmc->current_config = (u64)ctrl;
195269 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
196
- kvm_x86_ops->pmu_ops->find_fixed_event(idx),
270
+ kvm_x86_ops.pmu_ops->find_fixed_event(idx),
197271 !(en_field & 0x2), /* exclude user */
198272 !(en_field & 0x1), /* exclude kernel */
199273 pmi, false, false);
....@@ -202,7 +276,7 @@
202276
203277 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
204278 {
205
- struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
279
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
206280
207281 if (!pmc)
208282 return;
....@@ -221,27 +295,32 @@
221295 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
222296 {
223297 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
224
- u64 bitmask;
225298 int bit;
226299
227
- bitmask = pmu->reprogram_pmi;
228
-
229
- for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
230
- struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
300
+ for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
301
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
231302
232303 if (unlikely(!pmc || !pmc->perf_event)) {
233
- clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
304
+ clear_bit(bit, pmu->reprogram_pmi);
234305 continue;
235306 }
236307
237308 reprogram_counter(pmu, bit);
238309 }
310
+
311
+ /*
312
+ * Unused perf_events are only released if the corresponding MSRs
313
+ * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
314
+ * triggers KVM_REQ_PMU if cleanup is needed.
315
+ */
316
+ if (unlikely(pmu->need_cleanup))
317
+ kvm_pmu_cleanup(vcpu);
239318 }
240319
241320 /* check if idx is a valid index to access PMU */
242
-int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
321
+int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
243322 {
244
- return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
323
+ return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
245324 }
246325
247326 bool is_vmware_backdoor_pmc(u32 pmc_idx)
....@@ -264,10 +343,10 @@
264343 ctr_val = rdtsc();
265344 break;
266345 case VMWARE_BACKDOOR_PMC_REAL_TIME:
267
- ctr_val = ktime_get_boot_ns();
346
+ ctr_val = ktime_get_boottime_ns();
268347 break;
269348 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
270
- ctr_val = ktime_get_boot_ns() +
349
+ ctr_val = ktime_get_boottime_ns() +
271350 vcpu->kvm->arch.kvmclock_offset;
272351 break;
273352 default:
....@@ -281,14 +360,23 @@
281360 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
282361 {
283362 bool fast_mode = idx & (1u << 31);
363
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
284364 struct kvm_pmc *pmc;
285365 u64 mask = fast_mode ? ~0u : ~0ull;
366
+
367
+ if (!pmu->version)
368
+ return 1;
286369
287370 if (is_vmware_backdoor_pmc(idx))
288371 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
289372
290
- pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
373
+ pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
291374 if (!pmc)
375
+ return 1;
376
+
377
+ if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
378
+ (kvm_x86_ops.get_cpl(vcpu) != 0) &&
379
+ (kvm_read_cr0(vcpu) & X86_CR0_PE))
292380 return 1;
293381
294382 *data = pmc_read_counter(pmc) & mask;
....@@ -303,17 +391,28 @@
303391
304392 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
305393 {
306
- return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
394
+ return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
395
+ kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
307396 }
308397
309
-int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
398
+static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
310399 {
311
- return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
400
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
401
+ struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
402
+
403
+ if (pmc)
404
+ __set_bit(pmc->idx, pmu->pmc_in_use);
405
+}
406
+
407
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
408
+{
409
+ return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
312410 }
313411
314412 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
315413 {
316
- return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
414
+ kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
415
+ return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
317416 }
318417
319418 /* refresh PMU settings. This function generally is called when underlying
....@@ -322,7 +421,7 @@
322421 */
323422 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
324423 {
325
- kvm_x86_ops->pmu_ops->refresh(vcpu);
424
+ kvm_x86_ops.pmu_ops->refresh(vcpu);
326425 }
327426
328427 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
....@@ -330,7 +429,7 @@
330429 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
331430
332431 irq_work_sync(&pmu->irq_work);
333
- kvm_x86_ops->pmu_ops->reset(vcpu);
432
+ kvm_x86_ops.pmu_ops->reset(vcpu);
334433 }
335434
336435 void kvm_pmu_init(struct kvm_vcpu *vcpu)
....@@ -338,12 +437,96 @@
338437 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
339438
340439 memset(pmu, 0, sizeof(*pmu));
341
- kvm_x86_ops->pmu_ops->init(vcpu);
440
+ kvm_x86_ops.pmu_ops->init(vcpu);
342441 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
442
+ pmu->event_count = 0;
443
+ pmu->need_cleanup = false;
343444 kvm_pmu_refresh(vcpu);
445
+}
446
+
447
+static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
448
+{
449
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
450
+
451
+ if (pmc_is_fixed(pmc))
452
+ return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
453
+ pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
454
+
455
+ return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
456
+}
457
+
458
+/* Release perf_events for vPMCs that have been unused for a full time slice. */
459
+void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
460
+{
461
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
462
+ struct kvm_pmc *pmc = NULL;
463
+ DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
464
+ int i;
465
+
466
+ pmu->need_cleanup = false;
467
+
468
+ bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
469
+ pmu->pmc_in_use, X86_PMC_IDX_MAX);
470
+
471
+ for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
472
+ pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
473
+
474
+ if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
475
+ pmc_stop_counter(pmc);
476
+ }
477
+
478
+ bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
344479 }
345480
346481 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
347482 {
348483 kvm_pmu_reset(vcpu);
349484 }
485
+
486
+int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
487
+{
488
+ struct kvm_pmu_event_filter tmp, *filter;
489
+ size_t size;
490
+ int r;
491
+
492
+ if (copy_from_user(&tmp, argp, sizeof(tmp)))
493
+ return -EFAULT;
494
+
495
+ if (tmp.action != KVM_PMU_EVENT_ALLOW &&
496
+ tmp.action != KVM_PMU_EVENT_DENY)
497
+ return -EINVAL;
498
+
499
+ if (tmp.flags != 0)
500
+ return -EINVAL;
501
+
502
+ if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
503
+ return -E2BIG;
504
+
505
+ size = struct_size(filter, events, tmp.nevents);
506
+ filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
507
+ if (!filter)
508
+ return -ENOMEM;
509
+
510
+ r = -EFAULT;
511
+ if (copy_from_user(filter, argp, size))
512
+ goto cleanup;
513
+
514
+ /* Ensure nevents can't be changed between the user copies. */
515
+ *filter = tmp;
516
+
517
+ /*
518
+ * Sort the in-kernel list so that we can search it with bsearch.
519
+ */
520
+ sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
521
+
522
+ mutex_lock(&kvm->lock);
523
+ filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
524
+ mutex_is_locked(&kvm->lock));
525
+ mutex_unlock(&kvm->lock);
526
+
527
+ synchronize_srcu_expedited(&kvm->srcu);
528
+ r = 0;
529
+cleanup:
530
+ kfree(filter);
531
+ return r;
532
+}