diff options
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r-- | tools/perf/builtin-stat.c | 185 |
1 files changed, 107 insertions, 78 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 2c38dd98f6ca..7006f848f87a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -610,38 +610,33 @@ static int dispatch_events(bool forks, int timeout, int interval, int *times) enum counter_recovery { COUNTER_SKIP, COUNTER_RETRY, - COUNTER_FATAL, }; -static enum counter_recovery stat_handle_error(struct evsel *counter) +static enum counter_recovery stat_handle_error(struct evsel *counter, int err) { char msg[BUFSIZ]; + + assert(!counter->supported); + /* * PPC returns ENXIO for HW counters until 2.6.37 * (behavior changed with commit b0a873e). */ - if (errno == EINVAL || errno == ENOSYS || - errno == ENOENT || errno == ENXIO) { - if (verbose > 0) + if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) { + if (verbose > 0) { ui__warning("%s event is not supported by the kernel.\n", evsel__name(counter)); - counter->supported = false; - /* - * errored is a sticky flag that means one of the counter's - * cpu event had a problem and needs to be reexamined. - */ - counter->errored = true; - - if ((evsel__leader(counter) != counter) || - !(counter->core.leader->nr_members > 1)) - return COUNTER_SKIP; - } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) { + } + return COUNTER_SKIP; + } + if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); + counter->supported = true; return COUNTER_RETRY; - } else if (target__has_per_thread(&target) && errno != EOPNOTSUPP && - evsel_list->core.threads && - evsel_list->core.threads->err_thread != -1) { + } + if (target__has_per_thread(&target) && err != EOPNOTSUPP && + evsel_list->core.threads && evsel_list->core.threads->err_thread != -1) { /* * For global --per-thread case, skip current * error thread. @@ -649,37 +644,73 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) if (!thread_map__remove(evsel_list->core.threads, evsel_list->core.threads->err_thread)) { evsel_list->core.threads->err_thread = -1; + counter->supported = true; return COUNTER_RETRY; } - } else if (counter->skippable) { - if (verbose > 0) - ui__warning("skipping event %s that kernel failed to open .\n", - evsel__name(counter)); - counter->supported = false; - counter->errored = true; - return COUNTER_SKIP; } + if (verbose > 0) { + ui__warning(err == EOPNOTSUPP + ? "%s event is not supported by the kernel.\n" + : "skipping event %s that kernel failed to open.\n", + evsel__name(counter)); + } + return COUNTER_SKIP; +} - if (errno == EOPNOTSUPP) { - if (verbose > 0) { - ui__warning("%s event is not supported by the kernel.\n", - evsel__name(counter)); - } - counter->supported = false; - counter->errored = true; +static int create_perf_stat_counter(struct evsel *evsel, + struct perf_stat_config *config, + int cpu_map_idx) +{ + struct perf_event_attr *attr = &evsel->core.attr; + struct evsel *leader = evsel__leader(evsel); + + /* Reset supported flag as creating a stat counter is retried. */ + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | + PERF_FORMAT_TOTAL_TIME_RUNNING; + + /* + * The event is part of non trivial group, let's enable + * the group read (for leader) and ID retrieval for all + * members. + */ + if (leader->core.nr_members > 1) + attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; - if ((evsel__leader(counter) != counter) || - !(counter->core.leader->nr_members > 1)) - return COUNTER_SKIP; + attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list); + + /* + * Some events get initialized with sample_(period/type) set, + * like tracepoints. Clear it up for counting. + */ + attr->sample_period = 0; + + if (config->identifier) + attr->sample_type = PERF_SAMPLE_IDENTIFIER; + + if (config->all_user) { + attr->exclude_kernel = 1; + attr->exclude_user = 0; } - evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); - ui__error("%s\n", msg); + if (config->all_kernel) { + attr->exclude_kernel = 0; + attr->exclude_user = 1; + } - if (child_pid != -1) - kill(child_pid, SIGTERM); + /* + * Disabling all counters initially, they will be enabled + * either manually by us or by kernel via enable_on_exec + * set later. + */ + if (evsel__is_group_leader(evsel)) { + attr->disabled = 1; - return COUNTER_FATAL; + if (target__enable_on_exec(&target)) + attr->enable_on_exec = 1; + } + + return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx, + evsel->core.threads); } static int __run_perf_stat(int argc, const char **argv, int run_idx) @@ -696,8 +727,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; struct evlist_cpu_iterator evlist_cpu_itr; struct affinity saved_affinity, *affinity = NULL; - int err; - bool second_pass = false; + int err, open_err = 0; + bool second_pass = false, has_supported_counters; if (forks) { if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { @@ -737,14 +768,17 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (target.use_bpf) break; - if (counter->reset_group || counter->errored) + if (counter->reset_group || !counter->supported) continue; if (evsel__is_bperf(counter)) continue; -try_again: - if (create_perf_stat_counter(counter, &stat_config, &target, - evlist_cpu_itr.cpu_map_idx) < 0) { + while (true) { + if (create_perf_stat_counter(counter, &stat_config, + evlist_cpu_itr.cpu_map_idx) == 0) + break; + + open_err = errno; /* * Weak group failed. We cannot just undo this here * because earlier CPUs might be in group mode, and the kernel @@ -752,29 +786,19 @@ try_again: * it to later. * Don't close here because we're in the wrong affinity. */ - if ((errno == EINVAL || errno == EBADF) && + if ((open_err == EINVAL || open_err == EBADF) && evsel__leader(counter) != counter && counter->weak_group) { evlist__reset_weak_group(evsel_list, counter, false); assert(counter->reset_group); + counter->supported = true; second_pass = true; - continue; - } - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - err = -1; - goto err_out; - case COUNTER_RETRY: - goto try_again; - case COUNTER_SKIP: - continue; - default: break; } + if (stat_handle_error(counter, open_err) != COUNTER_RETRY) + break; } - counter->supported = true; } if (second_pass) { @@ -787,7 +811,7 @@ try_again: evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { counter = evlist_cpu_itr.evsel; - if (!counter->reset_group && !counter->errored) + if (!counter->reset_group && counter->supported) continue; perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); @@ -798,34 +822,29 @@ try_again: if (!counter->reset_group) continue; -try_again_reset: - pr_debug2("reopening weak %s\n", evsel__name(counter)); - if (create_perf_stat_counter(counter, &stat_config, &target, - evlist_cpu_itr.cpu_map_idx) < 0) { - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - err = -1; - goto err_out; - case COUNTER_RETRY: - goto try_again_reset; - case COUNTER_SKIP: - continue; - default: + + while (true) { + pr_debug2("reopening weak %s\n", evsel__name(counter)); + if (create_perf_stat_counter(counter, &stat_config, + evlist_cpu_itr.cpu_map_idx) == 0) + break; + + open_err = errno; + if (stat_handle_error(counter, open_err) != COUNTER_RETRY) break; - } } - counter->supported = true; } } affinity__cleanup(affinity); affinity = NULL; + has_supported_counters = false; evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) { perf_evsel__free_fd(&counter->core); continue; } + has_supported_counters = true; l = strlen(counter->unit); if (l > stat_config.unit_width) @@ -837,6 +856,16 @@ try_again_reset: goto err_out; } } + if (!has_supported_counters) { + evsel__open_strerror(evlist__first(evsel_list), &target, open_err, + msg, sizeof(msg)); + ui__error("No supported events found.\n%s\n", msg); + + if (child_pid != -1) + kill(child_pid, SIGTERM); + err = -1; + goto err_out; + } if (evlist__apply_filters(evsel_list, &counter, &target)) { pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", |