.. | .. |
---|
24 | 24 | static struct pmu etm_pmu; |
---|
25 | 25 | static bool etm_perf_up; |
---|
26 | 26 | |
---|
27 | | -static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); |
---|
| 27 | +/* |
---|
| 28 | + * An ETM context for a running event includes the perf aux handle |
---|
| 29 | + * and aux_data. For ETM, the aux_data (etm_event_data), consists of |
---|
| 30 | + * the trace path and the sink configuration. The event data is accessible |
---|
| 31 | + * via perf_get_aux(handle). However, a sink could "end" a perf output |
---|
| 32 | + * handle via the IRQ handler. And if the "sink" encounters a failure |
---|
| 33 | + * to "begin" another session (e.g due to lack of space in the buffer), |
---|
| 34 | + * the handle will be cleared. Thus, the event_data may not be accessible |
---|
| 35 | + * from the handle when we get to the etm_event_stop(), which is required |
---|
| 36 | + * for stopping the trace path. The event_data is guaranteed to stay alive |
---|
| 37 | + * until "free_aux()", which cannot happen as long as the event is active on |
---|
| 38 | + * the ETM. Thus the event_data for the session must be part of the ETM context |
---|
| 39 | + * to make sure we can disable the trace path. |
---|
| 40 | + */ |
---|
| 41 | +struct etm_ctxt { |
---|
| 42 | + struct perf_output_handle handle; |
---|
| 43 | + struct etm_event_data *event_data; |
---|
| 44 | +}; |
---|
| 45 | + |
---|
| 46 | +static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); |
---|
28 | 47 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); |
---|
29 | 48 | |
---|
30 | | -/* ETMv3.5/PTM's ETMCR is 'config' */ |
---|
| 49 | +/* |
---|
| 50 | + * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config'; |
---|
| 51 | + * now take them as general formats and apply on all ETMs. |
---|
| 52 | + */ |
---|
31 | 53 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); |
---|
32 | | -PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID)); |
---|
| 54 | +/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */ |
---|
| 55 | +PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); |
---|
| 56 | +/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */ |
---|
| 57 | +PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2)); |
---|
33 | 58 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); |
---|
34 | 59 | PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); |
---|
35 | 60 | /* Sink ID - same for all ETMs */ |
---|
36 | 61 | PMU_FORMAT_ATTR(sinkid, "config2:0-31"); |
---|
37 | 62 | |
---|
| 63 | +/* |
---|
| 64 | + * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 |
---|
| 65 | + * when the kernel is running at EL1; when the kernel is at EL2, |
---|
| 66 | + * the PID is in CONTEXTIDR_EL2. |
---|
| 67 | + */ |
---|
| 68 | +static ssize_t format_attr_contextid_show(struct device *dev, |
---|
| 69 | + struct device_attribute *attr, |
---|
| 70 | + char *page) |
---|
| 71 | +{ |
---|
| 72 | + int pid_fmt = ETM_OPT_CTXTID; |
---|
| 73 | + |
---|
| 74 | +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) |
---|
| 75 | + pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID; |
---|
| 76 | +#endif |
---|
| 77 | + return sprintf(page, "config:%d\n", pid_fmt); |
---|
| 78 | +} |
---|
| 79 | + |
---|
| 80 | +struct device_attribute format_attr_contextid = |
---|
| 81 | + __ATTR(contextid, 0444, format_attr_contextid_show, NULL); |
---|
| 82 | + |
---|
38 | 83 | static struct attribute *etm_config_formats_attr[] = { |
---|
39 | 84 | &format_attr_cycacc.attr, |
---|
40 | 85 | &format_attr_contextid.attr, |
---|
| 86 | + &format_attr_contextid1.attr, |
---|
| 87 | + &format_attr_contextid2.attr, |
---|
41 | 88 | &format_attr_timestamp.attr, |
---|
42 | 89 | &format_attr_retstack.attr, |
---|
43 | 90 | &format_attr_sinkid.attr, |
---|
.. | .. |
---|
204 | 251 | schedule_work(&event_data->work); |
---|
205 | 252 | } |
---|
206 | 253 | |
---|
| 254 | +/* |
---|
| 255 | + * Check if two given sinks are compatible with each other, |
---|
| 256 | + * so that they can use the same sink buffers, when an event |
---|
| 257 | + * moves around. |
---|
| 258 | + */ |
---|
| 259 | +static bool sinks_compatible(struct coresight_device *a, |
---|
| 260 | + struct coresight_device *b) |
---|
| 261 | +{ |
---|
| 262 | + if (!a || !b) |
---|
| 263 | + return false; |
---|
| 264 | + /* |
---|
| 265 | + * If the sinks are of the same subtype and driven |
---|
| 266 | + * by the same driver, we can use the same buffer |
---|
| 267 | + * on these sinks. |
---|
| 268 | + */ |
---|
| 269 | + return (a->subtype.sink_subtype == b->subtype.sink_subtype) && |
---|
| 270 | + (sink_ops(a) == sink_ops(b)); |
---|
| 271 | +} |
---|
| 272 | + |
---|
207 | 273 | static void *etm_setup_aux(struct perf_event *event, void **pages, |
---|
208 | 274 | int nr_pages, bool overwrite) |
---|
209 | 275 | { |
---|
210 | 276 | u32 id; |
---|
211 | 277 | int cpu = event->cpu; |
---|
212 | 278 | cpumask_t *mask; |
---|
213 | | - struct coresight_device *sink; |
---|
| 279 | + struct coresight_device *sink = NULL; |
---|
| 280 | + struct coresight_device *user_sink = NULL, *last_sink = NULL; |
---|
214 | 281 | struct etm_event_data *event_data = NULL; |
---|
215 | 282 | |
---|
216 | 283 | event_data = alloc_event_data(cpu); |
---|
.. | .. |
---|
221 | 288 | /* First get the selected sink from user space. */ |
---|
222 | 289 | if (event->attr.config2) { |
---|
223 | 290 | id = (u32)event->attr.config2; |
---|
224 | | - sink = coresight_get_sink_by_id(id); |
---|
225 | | - } else { |
---|
226 | | - sink = coresight_get_enabled_sink(true); |
---|
| 291 | + sink = user_sink = coresight_get_sink_by_id(id); |
---|
227 | 292 | } |
---|
228 | | - |
---|
229 | | - if (!sink) |
---|
230 | | - goto err; |
---|
231 | 293 | |
---|
232 | 294 | mask = &event_data->mask; |
---|
233 | 295 | |
---|
.. | .. |
---|
254 | 316 | } |
---|
255 | 317 | |
---|
256 | 318 | /* |
---|
| 319 | + * No sink provided - look for a default sink for all the ETMs, |
---|
| 320 | + * where this event can be scheduled. |
---|
| 321 | + * We allocate the sink specific buffers only once for this |
---|
| 322 | + * event. If the ETMs have different default sink devices, we |
---|
| 323 | + * can only use a single "type" of sink as the event can carry |
---|
| 324 | + * only one sink specific buffer. Thus we have to make sure |
---|
| 325 | + * that the sinks are of the same type and driven by the same |
---|
| 326 | + * driver, as the one we allocate the buffer for. As such |
---|
| 327 | + * we choose the first sink and check if the remaining ETMs |
---|
| 328 | + * have a compatible default sink. We don't trace on a CPU |
---|
| 329 | + * if the sink is not compatible. |
---|
| 330 | + */ |
---|
| 331 | + if (!user_sink) { |
---|
| 332 | + /* Find the default sink for this ETM */ |
---|
| 333 | + sink = coresight_find_default_sink(csdev); |
---|
| 334 | + if (!sink) { |
---|
| 335 | + cpumask_clear_cpu(cpu, mask); |
---|
| 336 | + continue; |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + /* Check if this sink compatible with the last sink */ |
---|
| 340 | + if (last_sink && !sinks_compatible(last_sink, sink)) { |
---|
| 341 | + cpumask_clear_cpu(cpu, mask); |
---|
| 342 | + continue; |
---|
| 343 | + } |
---|
| 344 | + last_sink = sink; |
---|
| 345 | + } |
---|
| 346 | + |
---|
| 347 | + /* |
---|
257 | 348 | * Building a path doesn't enable it, it simply builds a |
---|
258 | 349 | * list of devices from source to sink that can be |
---|
259 | 350 | * referenced later when the path is actually needed. |
---|
.. | .. |
---|
267 | 358 | *etm_event_cpu_path_ptr(event_data, cpu) = path; |
---|
268 | 359 | } |
---|
269 | 360 | |
---|
| 361 | + /* no sink found for any CPU - cannot trace */ |
---|
| 362 | + if (!sink) |
---|
| 363 | + goto err; |
---|
| 364 | + |
---|
270 | 365 | /* If we don't have any CPUs ready for tracing, abort */ |
---|
271 | 366 | cpu = cpumask_first(mask); |
---|
272 | 367 | if (cpu >= nr_cpu_ids) |
---|
.. | .. |
---|
275 | 370 | if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) |
---|
276 | 371 | goto err; |
---|
277 | 372 | |
---|
278 | | - /* Allocate the sink buffer for this session */ |
---|
| 373 | + /* |
---|
| 374 | + * Allocate the sink buffer for this session. All the sinks |
---|
| 375 | + * where this event can be scheduled are ensured to be of the |
---|
| 376 | + * same type. Thus the same sink configuration is used by the |
---|
| 377 | + * sinks. |
---|
| 378 | + */ |
---|
279 | 379 | event_data->snk_config = |
---|
280 | 380 | sink_ops(sink)->alloc_buffer(sink, event, pages, |
---|
281 | 381 | nr_pages, overwrite); |
---|
.. | .. |
---|
295 | 395 | { |
---|
296 | 396 | int cpu = smp_processor_id(); |
---|
297 | 397 | struct etm_event_data *event_data; |
---|
298 | | - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); |
---|
| 398 | + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
---|
| 399 | + struct perf_output_handle *handle = &ctxt->handle; |
---|
299 | 400 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
---|
300 | 401 | struct list_head *path; |
---|
301 | 402 | |
---|
302 | 403 | if (!csdev) |
---|
| 404 | + goto fail; |
---|
| 405 | + |
---|
| 406 | + /* Have we messed up our tracking ? */ |
---|
| 407 | + if (WARN_ON(ctxt->event_data)) |
---|
303 | 408 | goto fail; |
---|
304 | 409 | |
---|
305 | 410 | /* |
---|
.. | .. |
---|
337 | 442 | if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) |
---|
338 | 443 | goto fail_disable_path; |
---|
339 | 444 | |
---|
| 445 | + /* Save the event_data for this ETM */ |
---|
| 446 | + ctxt->event_data = event_data; |
---|
340 | 447 | out: |
---|
341 | 448 | return; |
---|
342 | 449 | |
---|
.. | .. |
---|
355 | 462 | int cpu = smp_processor_id(); |
---|
356 | 463 | unsigned long size; |
---|
357 | 464 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); |
---|
358 | | - struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); |
---|
359 | | - struct etm_event_data *event_data = perf_get_aux(handle); |
---|
| 465 | + struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); |
---|
| 466 | + struct perf_output_handle *handle = &ctxt->handle; |
---|
| 467 | + struct etm_event_data *event_data; |
---|
360 | 468 | struct list_head *path; |
---|
361 | 469 | |
---|
| 470 | + /* |
---|
| 471 | + * If we still have access to the event_data via handle, |
---|
| 472 | + * confirm that we haven't messed up the tracking. |
---|
| 473 | + */ |
---|
| 474 | + if (handle->event && |
---|
| 475 | + WARN_ON(perf_get_aux(handle) != ctxt->event_data)) |
---|
| 476 | + return; |
---|
| 477 | + |
---|
| 478 | + event_data = ctxt->event_data; |
---|
| 479 | + /* Clear the event_data as this ETM is stopping the trace. */ |
---|
| 480 | + ctxt->event_data = NULL; |
---|
| 481 | + |
---|
362 | 482 | if (event->hw.state == PERF_HES_STOPPED) |
---|
| 483 | + return; |
---|
| 484 | + |
---|
| 485 | + /* We must have a valid event_data for a running event */ |
---|
| 486 | + if (WARN_ON(!event_data)) |
---|
363 | 487 | return; |
---|
364 | 488 | |
---|
365 | 489 | if (!csdev) |
---|
.. | .. |
---|
379 | 503 | /* tell the core */ |
---|
380 | 504 | event->hw.state = PERF_HES_STOPPED; |
---|
381 | 505 | |
---|
382 | | - if (mode & PERF_EF_UPDATE) { |
---|
| 506 | + /* |
---|
| 507 | + * If the handle is not bound to an event anymore |
---|
| 508 | + * (e.g, the sink driver was unable to restart the |
---|
| 509 | + * handle due to lack of buffer space), we don't |
---|
| 510 | + * have to do anything here. |
---|
| 511 | + */ |
---|
| 512 | + if (handle->event && (mode & PERF_EF_UPDATE)) { |
---|
383 | 513 | if (WARN_ON_ONCE(handle->event != event)) |
---|
384 | 514 | return; |
---|
385 | 515 | |
---|
.. | .. |
---|
516 | 646 | |
---|
517 | 647 | return 0; |
---|
518 | 648 | } |
---|
| 649 | +EXPORT_SYMBOL_GPL(etm_perf_symlink); |
---|
519 | 650 | |
---|
520 | 651 | static ssize_t etm_perf_sink_name_show(struct device *dev, |
---|
521 | 652 | struct device_attribute *dattr, |
---|
.. | .. |
---|
589 | 720 | csdev->ea = NULL; |
---|
590 | 721 | } |
---|
591 | 722 | |
---|
592 | | -static int __init etm_perf_init(void) |
---|
| 723 | +int __init etm_perf_init(void) |
---|
593 | 724 | { |
---|
594 | 725 | int ret; |
---|
595 | 726 | |
---|
.. | .. |
---|
609 | 740 | etm_pmu.addr_filters_sync = etm_addr_filters_sync; |
---|
610 | 741 | etm_pmu.addr_filters_validate = etm_addr_filters_validate; |
---|
611 | 742 | etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX; |
---|
| 743 | + etm_pmu.module = THIS_MODULE; |
---|
612 | 744 | |
---|
613 | 745 | ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); |
---|
614 | 746 | if (ret == 0) |
---|
.. | .. |
---|
616 | 748 | |
---|
617 | 749 | return ret; |
---|
618 | 750 | } |
---|
619 | | -device_initcall(etm_perf_init); |
---|
| 751 | + |
---|
| 752 | +void __exit etm_perf_exit(void) |
---|
| 753 | +{ |
---|
| 754 | + perf_pmu_unregister(&etm_pmu); |
---|
| 755 | +} |
---|