From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 08 Dec 2023 10:40:48 +0000 Subject: [PATCH] 移去rt --- kernel/tools/perf/util/stat.c | 257 +++++++++++++++++++++++++++++++++++++-------------- 1 files changed, 186 insertions(+), 71 deletions(-) diff --git a/kernel/tools/perf/util/stat.c b/kernel/tools/perf/util/stat.c index b254bee..bd0decd 100644 --- a/kernel/tools/perf/util/stat.c +++ b/kernel/tools/perf/util/stat.c @@ -2,10 +2,18 @@ #include <errno.h> #include <inttypes.h> #include <math.h> +#include <string.h> +#include "counts.h" +#include "cpumap.h" +#include "debug.h" +#include "header.h" #include "stat.h" +#include "session.h" +#include "target.h" #include "evlist.h" #include "evsel.h" #include "thread_map.h" +#include <linux/zalloc.h> void update_stats(struct stats *stats, u64 val) { @@ -67,7 +75,7 @@ return pct; } -bool __perf_evsel_stat__is(struct perf_evsel *evsel, +bool __perf_evsel_stat__is(struct evsel *evsel, enum perf_stat_evsel_id id) { struct perf_stat_evsel *ps = evsel->stats; @@ -87,12 +95,16 @@ ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired), ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles), ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles), + ID(TOPDOWN_RETIRING, topdown-retiring), + ID(TOPDOWN_BAD_SPEC, topdown-bad-spec), + ID(TOPDOWN_FE_BOUND, topdown-fe-bound), + ID(TOPDOWN_BE_BOUND, topdown-be-bound), ID(SMI_NUM, msr/smi/), ID(APERF, msr/aperf/), }; #undef ID -static void perf_stat_evsel_id_init(struct perf_evsel *evsel) +static void perf_stat_evsel_id_init(struct evsel *evsel) { struct perf_stat_evsel *ps = evsel->stats; int i; @@ -100,14 +112,14 @@ /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { - if (!strcmp(perf_evsel__name(evsel), id_str[i])) { + if (!strcmp(evsel__name(evsel), id_str[i])) { ps->id = i; break; } } } -static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) +static void evsel__reset_stat_priv(struct evsel *evsel) { int i; struct perf_stat_evsel *ps = evsel->stats; @@ -118,26 +130,25 @@ perf_stat_evsel_id_init(evsel); } -static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) +static int evsel__alloc_stat_priv(struct evsel *evsel) { evsel->stats = zalloc(sizeof(struct perf_stat_evsel)); if (evsel->stats == NULL) return -ENOMEM; - perf_evsel__reset_stat_priv(evsel); + evsel__reset_stat_priv(evsel); return 0; } -static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) +static void evsel__free_stat_priv(struct evsel *evsel) { struct perf_stat_evsel *ps = evsel->stats; if (ps) - free(ps->group_data); + zfree(&ps->group_data); zfree(&evsel->stats); } -static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, - int ncpus, int nthreads) +static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads) { struct perf_counts *counts; @@ -148,40 +159,37 @@ return counts ? 0 : -ENOMEM; } -static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) +static void evsel__free_prev_raw_counts(struct evsel *evsel) { perf_counts__delete(evsel->prev_raw_counts); evsel->prev_raw_counts = NULL; } -static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel) +static void evsel__reset_prev_raw_counts(struct evsel *evsel) { - if (evsel->prev_raw_counts) { - evsel->prev_raw_counts->aggr.val = 0; - evsel->prev_raw_counts->aggr.ena = 0; - evsel->prev_raw_counts->aggr.run = 0; - } + if (evsel->prev_raw_counts) + perf_counts__reset(evsel->prev_raw_counts); } -static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) +static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw) { - int ncpus = perf_evsel__nr_cpus(evsel); - int nthreads = thread_map__nr(evsel->threads); + int ncpus = evsel__nr_cpus(evsel); + int nthreads = perf_thread_map__nr(evsel->core.threads); - if (perf_evsel__alloc_stat_priv(evsel) < 0 || - perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || - (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) + if (evsel__alloc_stat_priv(evsel) < 0 || + evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || + (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) return -ENOMEM; return 0; } -int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) +int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - if (perf_evsel__alloc_stats(evsel, alloc_raw)) + if (evsel__alloc_stats(evsel, alloc_raw)) goto out_free; } @@ -192,46 +200,90 @@ return -1; } -void perf_evlist__free_stats(struct perf_evlist *evlist) +void perf_evlist__free_stats(struct evlist *evlist) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - perf_evsel__free_stat_priv(evsel); - perf_evsel__free_counts(evsel); - perf_evsel__free_prev_raw_counts(evsel); + evsel__free_stat_priv(evsel); + evsel__free_counts(evsel); + evsel__free_prev_raw_counts(evsel); } } -void perf_evlist__reset_stats(struct perf_evlist *evlist) +void perf_evlist__reset_stats(struct evlist *evlist) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) { - perf_evsel__reset_stat_priv(evsel); - perf_evsel__reset_counts(evsel); + evsel__reset_stat_priv(evsel); + evsel__reset_counts(evsel); } } -void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist) +void perf_evlist__reset_prev_raw_counts(struct evlist *evlist) { - struct perf_evsel *evsel; + struct evsel *evsel; evlist__for_each_entry(evlist, evsel) - perf_evsel__reset_prev_raw_counts(evsel); + evsel__reset_prev_raw_counts(evsel); } -static void zero_per_pkg(struct perf_evsel *counter) +static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel) +{ + int ncpus = evsel__nr_cpus(evsel); + int nthreads = perf_thread_map__nr(evsel->core.threads); + + for (int thread = 0; thread < nthreads; thread++) { + for (int cpu = 0; cpu < ncpus; cpu++) { + *perf_counts(evsel->counts, cpu, thread) = + *perf_counts(evsel->prev_raw_counts, cpu, + thread); + } + } + + evsel->counts->aggr = evsel->prev_raw_counts->aggr; +} + +void perf_evlist__copy_prev_raw_counts(struct evlist *evlist) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) + perf_evsel__copy_prev_raw_counts(evsel); +} + +void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist) +{ + struct evsel *evsel; + + /* + * To collect the overall statistics for interval mode, + * we copy the counts from evsel->prev_raw_counts to + * evsel->counts. The perf_stat_process_counter creates + * aggr values from per cpu values, but the per cpu values + * are 0 for AGGR_GLOBAL. So we use a trick that saves the + * previous aggr value to the first member of perf_counts, + * then aggr calculation in process_counter_values can work + * correctly. + */ + evlist__for_each_entry(evlist, evsel) { + *perf_counts(evsel->prev_raw_counts, 0, 0) = + evsel->prev_raw_counts->aggr; + } +} + +static void zero_per_pkg(struct evsel *counter) { if (counter->per_pkg_mask) - memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); + memset(counter->per_pkg_mask, 0, cpu__max_cpu()); } -static int check_per_pkg(struct perf_evsel *counter, +static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, int cpu, bool *skip) { unsigned long *mask = counter->per_pkg_mask; - struct cpu_map *cpus = perf_evsel__cpus(counter); + struct perf_cpu_map *cpus = evsel__cpus(counter); int s; *skip = false; @@ -239,11 +291,11 @@ if (!counter->per_pkg) return 0; - if (cpu_map__empty(cpus)) + if (perf_cpu_map__empty(cpus)) return 0; if (!mask) { - mask = zalloc(MAX_NR_CPUS); + mask = zalloc(cpu__max_cpu()); if (!mask) return -ENOMEM; @@ -270,7 +322,7 @@ } static int -process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel, +process_counter_values(struct perf_stat_config *config, struct evsel *evsel, int cpu, int thread, struct perf_counts_values *count) { @@ -289,14 +341,18 @@ switch (config->aggr_mode) { case AGGR_THREAD: case AGGR_CORE: + case AGGR_DIE: case AGGR_SOCKET: + case AGGR_NODE: case AGGR_NONE: if (!evsel->snapshot) - perf_evsel__compute_deltas(evsel, cpu, thread, count); + evsel__compute_deltas(evsel, cpu, thread, count); perf_counts_values__scale(count, config->scale, NULL); - if (config->aggr_mode == AGGR_NONE) - perf_stat__update_shadow_stats(evsel, count->val, cpu, - &rt_stat); + if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) { + perf_stat__update_shadow_stats(evsel, count->val, + cpu, &rt_stat); + } + if (config->aggr_mode == AGGR_THREAD) { if (config->stats) perf_stat__update_shadow_stats(evsel, @@ -308,10 +364,8 @@ break; case AGGR_GLOBAL: aggr->val += count->val; - if (config->scale) { - aggr->ena += count->ena; - aggr->run += count->run; - } + aggr->ena += count->ena; + aggr->run += count->run; case AGGR_UNSET: default: break; @@ -321,13 +375,13 @@ } static int process_counter_maps(struct perf_stat_config *config, - struct perf_evsel *counter) + struct evsel *counter) { - int nthreads = thread_map__nr(counter->threads); - int ncpus = perf_evsel__nr_cpus(counter); + int nthreads = perf_thread_map__nr(counter->core.threads); + int ncpus = evsel__nr_cpus(counter); int cpu, thread; - if (counter->system_wide) + if (counter->core.system_wide) nthreads = 1; for (thread = 0; thread < nthreads; thread++) { @@ -342,7 +396,7 @@ } int perf_stat_process_counter(struct perf_stat_config *config, - struct perf_evsel *counter) + struct evsel *counter) { struct perf_counts_values *aggr = &counter->counts->aggr; struct perf_stat_evsel *ps = counter->stats; @@ -358,7 +412,7 @@ * interval mode, otherwise overall avg running * averages will be shown for each interval. */ - if (config->interval) { + if (config->interval || config->summary) { for (i = 0; i < 3; i++) init_stats(&ps->res_stats[i]); } @@ -374,7 +428,7 @@ return 0; if (!counter->snapshot) - perf_evsel__compute_deltas(counter, -1, -1, aggr); + evsel__compute_deltas(counter, -1, -1, aggr); perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); for (i = 0; i < 3; i++) @@ -382,7 +436,7 @@ if (verbose > 0) { fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - perf_evsel__name(counter), count[0], count[1], count[2]); + evsel__name(counter), count[0], count[1], count[2]); } /* @@ -393,13 +447,12 @@ return 0; } -int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused, - union perf_event *event, - struct perf_session *session) +int perf_event__process_stat_event(struct perf_session *session, + union perf_event *event) { struct perf_counts_values count; - struct stat_event *st = &event->stat; - struct perf_evsel *counter; + struct perf_record_stat *st = &event->stat; + struct evsel *counter; count.val = st->val; count.ena = st->ena; @@ -418,12 +471,12 @@ size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp) { - struct stat_event *st = (struct stat_event *) event; + struct perf_record_stat *st = (struct perf_record_stat *)event; size_t ret; - ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n", + ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n", st->id, st->cpu, st->thread); - ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n", + ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n", st->val, st->ena, st->run); return ret; @@ -431,10 +484,10 @@ size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp) { - struct stat_round_event *rd = (struct stat_round_event *)event; + struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event; size_t ret; - ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time, + ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time, rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL"); return ret; @@ -454,3 +507,65 @@ return ret; } + +int create_perf_stat_counter(struct evsel *evsel, + struct perf_stat_config *config, + struct target *target, + int cpu) +{ + struct perf_event_attr *attr = &evsel->core.attr; + struct evsel *leader = evsel->leader; + + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + + /* + * The event is part of non trivial group, let's enable + * the group read (for leader) and ID retrieval for all + * members. + */ + if (leader->core.nr_members > 1) + attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; + + attr->inherit = !config->no_inherit; + + /* + * Some events get initialized with sample_(period/type) set, + * like tracepoints. Clear it up for counting. + */ + attr->sample_period = 0; + + if (config->identifier) + attr->sample_type = PERF_SAMPLE_IDENTIFIER; + + if (config->all_user) { + attr->exclude_kernel = 1; + attr->exclude_user = 0; + } + + if (config->all_kernel) { + attr->exclude_kernel = 0; + attr->exclude_user = 1; + } + + /* + * Disabling all counters initially, they will be enabled + * either manually by us or by kernel via enable_on_exec + * set later. + */ + if (evsel__is_group_leader(evsel)) { + attr->disabled = 1; + + /* + * In case of initial_delay we enable tracee + * events manually. + */ + if (target__none(target) && !config->initial_delay) + attr->enable_on_exec = 1; + } + + if (target__has_cpu(target) && !target__has_per_thread(target)) + return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); + + return evsel__open_per_thread(evsel, evsel->core.threads); +} -- Gitblit v1.6.2