.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Hypervisor supplied "gpci" ("get performance counter info") performance |
---|
3 | 4 | * counter support |
---|
4 | 5 | * |
---|
5 | 6 | * Author: Cody P Schafer <cody@linux.vnet.ibm.com> |
---|
6 | 7 | * Copyright 2014 IBM Corporation. |
---|
7 | | - * |
---|
8 | | - * This program is free software; you can redistribute it and/or |
---|
9 | | - * modify it under the terms of the GNU General Public License |
---|
10 | | - * as published by the Free Software Foundation; either version |
---|
11 | | - * 2 of the License, or (at your option) any later version. |
---|
12 | 8 | */ |
---|
13 | 9 | |
---|
14 | 10 | #define pr_fmt(fmt) "hv-gpci: " fmt |
---|
.. | .. |
---|
52 | 48 | /* u32, byte offset */ |
---|
53 | 49 | EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63); |
---|
54 | 50 | |
---|
| 51 | +static cpumask_t hv_gpci_cpumask; |
---|
| 52 | + |
---|
55 | 53 | static struct attribute *format_attrs[] = { |
---|
56 | 54 | &format_attr_request.attr, |
---|
57 | 55 | &format_attr_starting_index.attr, |
---|
.. | .. |
---|
74 | 72 | |
---|
75 | 73 | static struct attribute_group event_group = { |
---|
76 | 74 | .name = "events", |
---|
77 | | - .attrs = hv_gpci_event_attrs, |
---|
| 75 | + /* .attrs is set in init */ |
---|
78 | 76 | }; |
---|
79 | 77 | |
---|
80 | 78 | #define HV_CAPS_ATTR(_name, _format) \ |
---|
.. | .. |
---|
98 | 96 | return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT); |
---|
99 | 97 | } |
---|
100 | 98 | |
---|
| 99 | +static ssize_t cpumask_show(struct device *dev, |
---|
| 100 | + struct device_attribute *attr, char *buf) |
---|
| 101 | +{ |
---|
| 102 | + return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask); |
---|
| 103 | +} |
---|
| 104 | + |
---|
101 | 105 | static DEVICE_ATTR_RO(kernel_version); |
---|
| 106 | +static DEVICE_ATTR_RO(cpumask); |
---|
| 107 | + |
---|
102 | 108 | HV_CAPS_ATTR(version, "0x%x\n"); |
---|
103 | 109 | HV_CAPS_ATTR(ga, "%d\n"); |
---|
104 | 110 | HV_CAPS_ATTR(expanded, "%d\n"); |
---|
.. | .. |
---|
115 | 121 | NULL, |
---|
116 | 122 | }; |
---|
117 | 123 | |
---|
| 124 | +static struct attribute *cpumask_attrs[] = { |
---|
| 125 | + &dev_attr_cpumask.attr, |
---|
| 126 | + NULL, |
---|
| 127 | +}; |
---|
| 128 | + |
---|
| 129 | +static struct attribute_group cpumask_attr_group = { |
---|
| 130 | + .attrs = cpumask_attrs, |
---|
| 131 | +}; |
---|
| 132 | + |
---|
118 | 133 | static struct attribute_group interface_group = { |
---|
119 | 134 | .name = "interface", |
---|
120 | 135 | .attrs = interface_attrs, |
---|
.. | .. |
---|
124 | 139 | &format_group, |
---|
125 | 140 | &event_group, |
---|
126 | 141 | &interface_group, |
---|
| 142 | + &cpumask_attr_group, |
---|
127 | 143 | NULL, |
---|
128 | 144 | }; |
---|
129 | 145 | |
---|
130 | | -#define HGPCI_REQ_BUFFER_SIZE 4096 |
---|
131 | | -#define HGPCI_MAX_DATA_BYTES \ |
---|
132 | | - (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params)) |
---|
133 | | - |
---|
134 | 146 | static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t)); |
---|
135 | | - |
---|
136 | | -struct hv_gpci_request_buffer { |
---|
137 | | - struct hv_get_perf_counter_info_params params; |
---|
138 | | - uint8_t bytes[HGPCI_MAX_DATA_BYTES]; |
---|
139 | | -} __packed; |
---|
140 | 147 | |
---|
141 | 148 | static unsigned long single_gpci_request(u32 req, u32 starting_index, |
---|
142 | 149 | u16 secondary_index, u8 version_in, u32 offset, u8 length, |
---|
.. | .. |
---|
232 | 239 | return -EINVAL; |
---|
233 | 240 | } |
---|
234 | 241 | |
---|
235 | | - /* unsupported modes and filters */ |
---|
236 | | - if (event->attr.exclude_user || |
---|
237 | | - event->attr.exclude_kernel || |
---|
238 | | - event->attr.exclude_hv || |
---|
239 | | - event->attr.exclude_idle || |
---|
240 | | - event->attr.exclude_host || |
---|
241 | | - event->attr.exclude_guest) |
---|
242 | | - return -EINVAL; |
---|
243 | | - |
---|
244 | 242 | /* no branch sampling */ |
---|
245 | 243 | if (has_branch_stack(event)) |
---|
246 | 244 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
285 | 283 | .start = h_gpci_event_start, |
---|
286 | 284 | .stop = h_gpci_event_stop, |
---|
287 | 285 | .read = h_gpci_event_update, |
---|
| 286 | + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
---|
288 | 287 | }; |
---|
| 288 | + |
---|
| 289 | +static int ppc_hv_gpci_cpu_online(unsigned int cpu) |
---|
| 290 | +{ |
---|
| 291 | + if (cpumask_empty(&hv_gpci_cpumask)) |
---|
| 292 | + cpumask_set_cpu(cpu, &hv_gpci_cpumask); |
---|
| 293 | + |
---|
| 294 | + return 0; |
---|
| 295 | +} |
---|
| 296 | + |
---|
| 297 | +static int ppc_hv_gpci_cpu_offline(unsigned int cpu) |
---|
| 298 | +{ |
---|
| 299 | + int target; |
---|
| 300 | + |
---|
| 301 | + /* Check if exiting cpu is used for collecting gpci events */ |
---|
| 302 | + if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask)) |
---|
| 303 | + return 0; |
---|
| 304 | + |
---|
| 305 | + /* Find a new cpu to collect gpci events */ |
---|
| 306 | + target = cpumask_last(cpu_active_mask); |
---|
| 307 | + |
---|
| 308 | + if (target < 0 || target >= nr_cpu_ids) { |
---|
| 309 | + pr_err("hv_gpci: CPU hotplug init failed\n"); |
---|
| 310 | + return -1; |
---|
| 311 | + } |
---|
| 312 | + |
---|
| 313 | + /* Migrate gpci events to the new target */ |
---|
| 314 | + cpumask_set_cpu(target, &hv_gpci_cpumask); |
---|
| 315 | + perf_pmu_migrate_context(&h_gpci_pmu, cpu, target); |
---|
| 316 | + |
---|
| 317 | + return 0; |
---|
| 318 | +} |
---|
| 319 | + |
---|
| 320 | +static int hv_gpci_cpu_hotplug_init(void) |
---|
| 321 | +{ |
---|
| 322 | + return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, |
---|
| 323 | + "perf/powerpc/hv_gcpi:online", |
---|
| 324 | + ppc_hv_gpci_cpu_online, |
---|
| 325 | + ppc_hv_gpci_cpu_offline); |
---|
| 326 | +} |
---|
289 | 327 | |
---|
290 | 328 | static int hv_gpci_init(void) |
---|
291 | 329 | { |
---|
292 | 330 | int r; |
---|
293 | 331 | unsigned long hret; |
---|
294 | 332 | struct hv_perf_caps caps; |
---|
| 333 | + struct hv_gpci_request_buffer *arg; |
---|
295 | 334 | |
---|
296 | 335 | hv_gpci_assert_offsets_correct(); |
---|
297 | 336 | |
---|
.. | .. |
---|
307 | 346 | return -ENODEV; |
---|
308 | 347 | } |
---|
309 | 348 | |
---|
| 349 | + /* init cpuhotplug */ |
---|
| 350 | + r = hv_gpci_cpu_hotplug_init(); |
---|
| 351 | + if (r) |
---|
| 352 | + return r; |
---|
| 353 | + |
---|
310 | 354 | /* sampling not supported */ |
---|
311 | 355 | h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
---|
312 | 356 | |
---|
| 357 | + arg = (void *)get_cpu_var(hv_gpci_reqb); |
---|
| 358 | + memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); |
---|
| 359 | + |
---|
| 360 | + /* |
---|
| 361 | + * hcall H_GET_PERF_COUNTER_INFO populates the output |
---|
| 362 | + * counter_info_version value based on the system hypervisor. |
---|
| 363 | + * Pass the counter request 0x10 corresponds to request type |
---|
| 364 | + * 'Dispatch_timebase_by_processor', to get the supported |
---|
| 365 | + * counter_info_version. |
---|
| 366 | + */ |
---|
| 367 | + arg->params.counter_request = cpu_to_be32(0x10); |
---|
| 368 | + |
---|
| 369 | + r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, |
---|
| 370 | + virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); |
---|
| 371 | + if (r) { |
---|
| 372 | + pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); |
---|
| 373 | + arg->params.counter_info_version_out = 0x8; |
---|
| 374 | + } |
---|
| 375 | + |
---|
| 376 | + /* |
---|
| 377 | + * Use counter_info_version_out value to assign |
---|
| 378 | + * required hv-gpci event list. |
---|
| 379 | + */ |
---|
| 380 | + if (arg->params.counter_info_version_out >= 0x8) |
---|
| 381 | + event_group.attrs = hv_gpci_event_attrs; |
---|
| 382 | + else |
---|
| 383 | + event_group.attrs = hv_gpci_event_attrs_v6; |
---|
| 384 | + |
---|
| 385 | + put_cpu_var(hv_gpci_reqb); |
---|
| 386 | + |
---|
313 | 387 | r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); |
---|
314 | 388 | if (r) |
---|
315 | 389 | return r; |
---|