summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-record.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r--tools/perf/builtin-record.c161
1 files changed, 102 insertions, 59 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d76f01956e33..2584d0d8bc82 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -730,8 +730,6 @@ static void record__sig_exit(void)
raise(signr);
}
-#ifdef HAVE_AUXTRACE_SUPPORT
-
static int record__process_auxtrace(const struct perf_tool *tool,
struct mmap *map,
union perf_event *event, void *data1,
@@ -889,40 +887,6 @@ static int record__auxtrace_init(struct record *rec)
return auxtrace_parse_filters(rec->evlist);
}
-#else
-
-static inline
-int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct mmap *map __maybe_unused)
-{
- return 0;
-}
-
-static inline
-void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
- bool on_exit __maybe_unused)
-{
-}
-
-static inline
-int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
-{
- return 0;
-}
-
-static inline
-int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-static int record__auxtrace_init(struct record *rec __maybe_unused)
-{
- return 0;
-}
-
-#endif
-
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
@@ -983,7 +947,6 @@ static int record__config_tracking_events(struct record *rec)
*/
if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
perf_pmus__num_core_pmus() > 1) {
-
/*
* User space tasks can migrate between CPUs, so when tracing
* selected CPUs, sideband for all CPUs is still needed.
@@ -1388,10 +1351,27 @@ static int record__open(struct record *rec)
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
int rc = 0;
+ bool skipped = false;
+ bool removed_tracking = false;
evlist__for_each_entry(evlist, pos) {
+ if (removed_tracking) {
+ /*
+ * Normally the head of the list has tracking enabled
+ * for sideband data like mmaps. If this event is
+ * removed, make sure to add tracking to the next
+ * processed event.
+ */
+ if (!pos->tracking) {
+ pos->tracking = true;
+ evsel__config(pos, opts, &callchain_param);
+ }
+ removed_tracking = false;
+ }
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
+ bool report_error = true;
+
if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
@@ -1403,13 +1383,72 @@ try_again:
pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
}
- rc = -errno;
- evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
- ui__error("%s\n", msg);
- goto out;
+#if defined(__aarch64__) || defined(__arm__)
+ if (strstr(evsel__name(pos), "cycles")) {
+ struct evsel *pos2;
+ /*
+ * Unfortunately ARM has many events named
+ * "cycles" on PMUs like the system-level (L3)
+ * cache which don't support sampling. Only
+ * display such failures to open when there is
+ * only 1 cycles event or verbose is enabled.
+ */
+ evlist__for_each_entry(evlist, pos2) {
+ if (pos2 == pos)
+ continue;
+ if (strstr(evsel__name(pos2), "cycles")) {
+ report_error = false;
+ break;
+ }
+ }
+ }
+#endif
+ if (report_error || verbose > 0) {
+ ui__error("Failure to open event '%s' on PMU '%s' which will be "
+ "removed.\n%s\n",
+ evsel__name(pos), evsel__pmu_name(pos), msg);
+ }
+ if (pos->tracking)
+ removed_tracking = true;
+ pos->skippable = true;
+ skipped = true;
}
}
+ if (skipped) {
+ struct evsel *tmp;
+ int idx = 0;
+ bool evlist_empty = true;
+
+ /* Remove evsels that failed to open and update indices. */
+ evlist__for_each_entry_safe(evlist, tmp, pos) {
+ if (pos->skippable) {
+ evlist__remove(evlist, pos);
+ continue;
+ }
+
+ /*
+ * Note, dummy events may be command line parsed or
+ * added by the tool. We care about supporting `perf
+ * record -e dummy` which may be used as a permission
+ * check. Dummy events that are added to the command
+ * line and opened along with other events that fail,
+ * will still fail as if the dummy events were tool
+ * added events for the sake of code simplicity.
+ */
+ if (!evsel__is_dummy_event(pos))
+ evlist_empty = false;
+ }
+ evlist__for_each_entry(evlist, pos) {
+ pos->core.idx = idx++;
+ }
+ /* If list is empty then fail. */
+ if (evlist_empty) {
+ ui__error("Failure to open any events for recording.\n");
+ rc = -1;
+ goto out;
+ }
+ }
if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
@@ -1815,15 +1854,14 @@ record__finish_output(struct record *rec)
}
/* Buildid scanning disabled or build ID in kernel and synthesized map events. */
- if (!rec->no_buildid) {
+ if (!rec->no_buildid || !rec->no_buildid_cache) {
process_buildids(rec);
if (rec->buildid_all)
perf_session__dsos_hit_all(rec->session);
}
perf_session__write_header(rec->session, rec->evlist, fd, true);
-
- return;
+ perf_session__cache_build_ids(rec->session);
}
static int record__synthesize_workload(struct record *rec, bool tail)
@@ -2883,11 +2921,11 @@ out_free_threads:
rec->bytes_written += off_cpu_write(rec->session);
record__read_lost_samples(rec);
- record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
if (!err) {
+ record__synthesize(rec, true);
if (!rec->timestamp_filename) {
record__finish_output(rec);
} else {
@@ -3008,7 +3046,7 @@ static int perf_record_config(const char *var, const char *value, void *cb)
else if (!strcmp(value, "no-cache"))
rec->no_buildid_cache = true;
else if (!strcmp(value, "skip"))
- rec->no_buildid = true;
+ rec->no_buildid = rec->no_buildid_cache = true;
else if (!strcmp(value, "mmap"))
rec->buildid_mmap = true;
else if (!strcmp(value, "no-mmap"))
@@ -4117,24 +4155,25 @@ int cmd_record(int argc, const char **argv)
record.opts.record_switch_events = true;
}
- if (!rec->buildid_mmap) {
- pr_debug("Disabling build id in synthesized mmap2 events.\n");
- symbol_conf.no_buildid_mmap2 = true;
- } else if (rec->buildid_mmap_set) {
- /*
- * Explicitly passing --buildid-mmap disables buildid processing
- * and cache generation.
- */
- rec->no_buildid = true;
- }
if (rec->buildid_mmap && !perf_can_record_build_id()) {
pr_warning("Missing support for build id in kernel mmap events.\n"
"Disable this warning with --no-buildid-mmap\n");
rec->buildid_mmap = false;
}
+
if (rec->buildid_mmap) {
/* Enable perf_event_attr::build_id bit. */
rec->opts.build_id = true;
+ /* Disable build-ID table in the header. */
+ rec->no_buildid = true;
+ } else {
+ pr_debug("Disabling build id in synthesized mmap2 events.\n");
+ symbol_conf.no_buildid_mmap2 = true;
+ }
+
+ if (rec->no_buildid_set && rec->no_buildid) {
+ /* -B implies -N for historic reasons. */
+ rec->no_buildid_cache = true;
}
if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
@@ -4231,7 +4270,7 @@ int cmd_record(int argc, const char **argv)
err = -ENOMEM;
- if (rec->no_buildid_cache || rec->no_buildid) {
+ if (rec->no_buildid_cache) {
disable_buildid_cache();
} else if (rec->switch_output.enabled) {
/*
@@ -4266,9 +4305,13 @@ int cmd_record(int argc, const char **argv)
record.opts.tail_synthesize = true;
if (rec->evlist->core.nr_entries == 0) {
- err = parse_event(rec->evlist, "cycles:P");
- if (err)
+ struct evlist *def_evlist = evlist__new_default();
+
+ if (!def_evlist)
goto out;
+
+ evlist__splice_list_tail(rec->evlist, &def_evlist->core.entries);
+ evlist__delete(def_evlist);
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)