summaryrefslogtreecommitdiff
path: root/tools/perf/util/parse-events.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/parse-events.c')
-rw-r--r--tools/perf/util/parse-events.c418
1 files changed, 286 insertions, 132 deletions
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index afeb8d815bbf..2380de56a207 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/param.h>
+#include "cpumap.h"
#include "term.h"
#include "env.h"
#include "evlist.h"
@@ -17,6 +18,7 @@
#include "strbuf.h"
#include "debug.h"
#include <api/fs/tracing_path.h>
+#include <api/io_dir.h>
#include <perf/cpumap.h>
#include <util/parse-events-bison.h>
#include <util/parse-events-flex.h>
@@ -27,6 +29,7 @@
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/bpf-filter.h"
+#include "util/stat.h"
#include "util/util.h"
#include "tracepoint.h"
@@ -178,6 +181,26 @@ static char *get_config_name(const struct parse_events_terms *head_terms)
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
+static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms)
+{
+ struct parse_events_term *term;
+ struct perf_cpu_map *cpus = NULL;
+
+ if (!head_terms)
+ return NULL;
+
+ list_for_each_entry(term, &head_terms->terms, list) {
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) {
+ struct perf_cpu_map *cpu = perf_cpu_map__new_int(term->val.num);
+
+ perf_cpu_map__merge(&cpus, cpu);
+ perf_cpu_map__put(cpu);
+ }
+ }
+
+ return cpus;
+}
+
/**
* fix_raw - For each raw term see if there is an event (aka alias) in pmu that
* matches the raw's string value. If the string value matches an
@@ -227,25 +250,55 @@ __add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
bool init_attr,
const char *name, const char *metric_id, struct perf_pmu *pmu,
- struct list_head *config_terms, bool auto_merge_stats,
+ struct list_head *config_terms, struct evsel *first_wildcard_match,
struct perf_cpu_map *cpu_list, u64 alternate_hw_config)
{
struct evsel *evsel;
- struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
+ bool is_pmu_core;
+ struct perf_cpu_map *cpus;
- cpus = perf_cpu_map__get(cpus);
- if (pmu)
- perf_pmu__warn_invalid_formats(pmu);
+ /*
+ * Ensure the first_wildcard_match's PMU matches that of the new event
+ * being added. Otherwise try to match with another event further down
+ * the evlist.
+ */
+ if (first_wildcard_match) {
+ struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
+
+ first_wildcard_match = NULL;
+ list_for_each_entry_continue(pos, list, core.node) {
+ if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ }
+ }
- if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
- perf_pmu__warn_invalid_config(pmu, attr->config, name,
- PERF_PMU_FORMAT_VALUE_CONFIG, "config");
- perf_pmu__warn_invalid_config(pmu, attr->config1, name,
- PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
- perf_pmu__warn_invalid_config(pmu, attr->config2, name,
- PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
- perf_pmu__warn_invalid_config(pmu, attr->config3, name,
- PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ if (pmu) {
+ is_pmu_core = pmu->is_core;
+ cpus = perf_cpu_map__get(perf_cpu_map__is_empty(cpu_list) ? pmu->cpus : cpu_list);
+ perf_pmu__warn_invalid_formats(pmu);
+ if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
+ perf_pmu__warn_invalid_config(pmu, attr->config, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG, "config");
+ perf_pmu__warn_invalid_config(pmu, attr->config1, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
+ perf_pmu__warn_invalid_config(pmu, attr->config2, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
+ perf_pmu__warn_invalid_config(pmu, attr->config3, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ }
+ } else {
+ is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
+ attr->type == PERF_TYPE_HW_CACHE);
+ if (perf_cpu_map__is_empty(cpu_list))
+ cpus = is_pmu_core ? perf_cpu_map__new_online_cpus() : NULL;
+ else
+ cpus = perf_cpu_map__get(cpu_list);
}
if (init_attr)
event_attr_init(attr);
@@ -260,10 +313,10 @@ __add_event(struct list_head *list, int *idx,
evsel->core.cpus = cpus;
evsel->core.own_cpus = perf_cpu_map__get(cpus);
evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
- evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
- evsel->auto_merge_stats = auto_merge_stats;
+ evsel->core.is_pmu_core = is_pmu_core;
evsel->pmu = pmu;
evsel->alternate_hw_config = alternate_hw_config;
+ evsel->first_wildcard_match = first_wildcard_match;
if (name)
evsel->name = strdup(name);
@@ -286,7 +339,7 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
{
return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
metric_id, pmu, /*config_terms=*/NULL,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX);
}
@@ -297,7 +350,7 @@ static int add_event(struct list_head *list, int *idx,
{
return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
/*pmu=*/NULL, config_terms,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
alternate_hw_config) ? 0 : -ENOMEM;
}
@@ -422,7 +475,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config);
+ struct evsel *first_wildcard_match, u64 alternate_hw_config);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
@@ -432,11 +485,13 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
bool found_supported = false;
const char *config_name = get_config_name(parsed_terms);
const char *metric_id = get_config_metric_id(parsed_terms);
+ struct perf_cpu_map *cpus = get_config_cpu(parsed_terms);
+ int ret = 0;
+ struct evsel *first_wildcard_match = NULL;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
LIST_HEAD(config_terms);
struct perf_event_attr attr;
- int ret;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
@@ -448,10 +503,13 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
*/
ret = parse_events_add_pmu(parse_state, list, pmu,
parsed_terms,
- perf_pmu__auto_merge_stats(pmu),
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX);
if (ret)
- return ret;
+ goto out_err;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of(list->prev, struct evsel, core.node);
continue;
}
@@ -471,25 +529,32 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
if (parsed_terms) {
if (config_attr(&attr, parsed_terms, parse_state->error,
- config_term_common))
- return -EINVAL;
-
- if (get_config_terms(parsed_terms, &config_terms))
- return -ENOMEM;
+ config_term_common)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ if (get_config_terms(parsed_terms, &config_terms)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
- return -ENOMEM;
+ metric_id, pmu, &config_terms, first_wildcard_match,
+ cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
+ ret = -ENOMEM;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match = container_of(list->prev, struct evsel, core.node);
free_config_terms(&config_terms);
+ if (ret)
+ goto out_err;
}
+out_err:
+ perf_cpu_map__put(cpus);
return found_supported ? 0 : -EINVAL;
}
-#ifdef HAVE_LIBTRACEEVENT
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name, int column)
{
@@ -555,8 +620,8 @@ static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
struct parse_events_terms *head_config, YYLTYPE *loc)
{
char *evt_path;
- struct dirent *evt_ent;
- DIR *evt_dir;
+ struct io_dirent64 *evt_ent;
+ struct io_dir evt_dir;
int ret = 0, found = 0;
evt_path = get_events_file(sys_name);
@@ -564,14 +629,14 @@ static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
- evt_dir = opendir(evt_path);
- if (!evt_dir) {
+ io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ if (evt_dir.dirfd < 0) {
put_events_file(evt_path);
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
- while (!ret && (evt_ent = readdir(evt_dir))) {
+ while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) {
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
|| !strcmp(evt_ent->d_name, "enable")
@@ -593,7 +658,7 @@ static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
}
put_events_file(evt_path);
- closedir(evt_dir);
+ close(evt_dir.dirfd);
return ret;
}
@@ -616,17 +681,23 @@ static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
{
- struct dirent *events_ent;
- DIR *events_dir;
+ struct io_dirent64 *events_ent;
+ struct io_dir events_dir;
int ret = 0;
+ char *events_dir_path = get_tracing_file("events");
- events_dir = tracing_events__opendir();
- if (!events_dir) {
+ if (!events_dir_path) {
+ tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
+ return -1;
+ }
+ io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
+ put_events_file(events_dir_path);
+ if (events_dir.dirfd < 0) {
tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
return -1;
}
- while (!ret && (events_ent = readdir(events_dir))) {
+ while (!ret && (events_ent = io_dir__readdir(&events_dir))) {
if (!strcmp(events_ent->d_name, ".")
|| !strcmp(events_ent->d_name, "..")
|| !strcmp(events_ent->d_name, "enable")
@@ -640,11 +711,9 @@ static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
evt_name, err, head_config, loc);
}
-
- closedir(events_dir);
+ close(events_dir.dirfd);
return ret;
}
-#endif /* HAVE_LIBTRACEEVENT */
size_t default_breakpoint_len(void)
{
@@ -795,11 +864,13 @@ const char *parse_events__term_type_str(enum parse_events__term_type term_type)
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
+ [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
[PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
[PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
[PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
+ [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
};
if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
return "unknown term";
@@ -829,6 +900,7 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
return true;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
@@ -844,6 +916,7 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
@@ -963,6 +1036,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ CHECK_TYPE_VAL(STR);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM);
if (term->val.num > UINT_MAX) {
@@ -972,6 +1048,15 @@ do { \
return -EINVAL;
}
break;
+ case PARSE_EVENTS__TERM_TYPE_CPU:
+ CHECK_TYPE_VAL(NUM);
+ if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
+ parse_events_error__handle(err, term->err_val,
+ strdup("too big"),
+ NULL);
+ return -EINVAL;
+ }
+ break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
@@ -1066,7 +1151,6 @@ static int config_term_pmu(struct perf_event_attr *attr,
return config_term_common(attr, term, err);
}
-#ifdef HAVE_LIBTRACEEVENT
static int config_term_tracepoint(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
@@ -1081,6 +1165,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
case PARSE_EVENTS__TERM_TYPE_USER:
@@ -1099,6 +1184,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
if (err) {
parse_events_error__handle(err, term->err_term,
@@ -1111,7 +1197,6 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
return 0;
}
-#endif
static int config_attr(struct perf_event_attr *attr,
const struct parse_events_terms *head,
@@ -1217,6 +1302,9 @@ do { \
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0, term->weak);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
@@ -1231,6 +1319,7 @@ do { \
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
break;
}
@@ -1279,11 +1368,13 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
break;
}
@@ -1303,7 +1394,7 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
-#ifdef HAVE_LIBTRACEEVENT
+
if (head_config) {
struct perf_event_attr attr;
@@ -1318,26 +1409,18 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
else
return add_tracepoint_event(parse_state, list, sys, event,
err, head_config, loc);
-#else
- (void)parse_state;
- (void)list;
- (void)sys;
- (void)event;
- (void)head_config;
- parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
- strdup("libtraceevent is necessary for tracepoint support"));
- return -1;
-#endif
}
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, const struct parse_events_terms *head_config)
+ u64 config, const struct parse_events_terms *head_config,
+ struct evsel *first_wildcard_match)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
const char *name, *metric_id;
+ struct perf_cpu_map *cpus;
int ret;
memset(&attr, 0, sizeof(attr));
@@ -1359,10 +1442,11 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
name = get_config_name(head_config);
metric_id = get_config_metric_id(head_config);
+ cpus = get_config_cpu(head_config);
ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
- ) == NULL ? -ENOMEM : 0;
+ metric_id, pmu, &config_terms, first_wildcard_match,
+ cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
+ perf_cpu_map__put(cpus);
free_config_terms(&config_terms);
return ret;
}
@@ -1378,6 +1462,7 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
/* Wildcards on numeric values are only supported by core PMUs. */
if (wildcard && perf_pmus__supports_extended_type()) {
+ struct evsel *first_wildcard_match = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
int ret;
@@ -1387,15 +1472,20 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
ret = __parse_events_add_numeric(parse_state, list, pmu,
type, pmu->type,
- config, head_config);
+ config, head_config,
+ first_wildcard_match);
if (ret)
return ret;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of(list->prev, struct evsel, core.node);
}
if (found_supported)
return 0;
}
return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
- type, /*extended_type=*/0, config, head_config);
+ type, /*extended_type=*/0, config, head_config,
+ /*first_wildcard_match=*/NULL);
}
static bool config_term_percore(struct list_head *config_terms)
@@ -1413,7 +1503,7 @@ static bool config_term_percore(struct list_head *config_terms)
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config)
+ struct evsel *first_wildcard_match, u64 alternate_hw_config)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
@@ -1422,6 +1512,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
LIST_HEAD(config_terms);
struct parse_events_terms parsed_terms;
bool alias_rewrote_terms = false;
+ struct perf_cpu_map *term_cpu = NULL;
if (verbose > 1) {
struct strbuf sb;
@@ -1449,7 +1540,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
- /*config_terms=*/NULL, auto_merge_stats,
+ /*config_terms=*/NULL, first_wildcard_match,
/*cpu_list=*/NULL, alternate_hw_config);
return evsel ? 0 : -ENOMEM;
}
@@ -1516,11 +1607,12 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
+ term_cpu = get_config_cpu(&parsed_terms);
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
get_config_name(&parsed_terms),
get_config_metric_id(&parsed_terms), pmu,
- &config_terms, auto_merge_stats, /*cpu_list=*/NULL,
- alternate_hw_config);
+ &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
+ perf_cpu_map__put(term_cpu);
if (!evsel) {
parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
@@ -1537,6 +1629,10 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->scale = info.scale;
evsel->per_pkg = info.per_pkg;
evsel->snapshot = info.snapshot;
+ evsel->retirement_latency.mean = info.retirement_latency_mean;
+ evsel->retirement_latency.min = info.retirement_latency_min;
+ evsel->retirement_latency.max = info.retirement_latency_max;
+
return 0;
}
@@ -1552,6 +1648,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
int ok = 0;
const char *config;
struct parse_events_terms parsed_terms;
+ struct evsel *first_wildcard_match = NULL;
*listp = NULL;
@@ -1584,17 +1681,14 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
INIT_LIST_HEAD(list);
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- bool auto_merge_stats;
-
if (parse_events__filter_pmu(parse_state, pmu))
continue;
if (!perf_pmu__have_event(pmu, event_name))
continue;
- auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
if (!parse_events_add_pmu(parse_state, list, pmu,
- &parsed_terms, auto_merge_stats, hw_config)) {
+ &parsed_terms, first_wildcard_match, hw_config)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1603,11 +1697,13 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
strbuf_release(&sb);
ok++;
}
+ if (first_wildcard_match == NULL)
+ first_wildcard_match = container_of(list->prev, struct evsel, core.node);
}
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
- /*auto_merge_stats=*/true, hw_config)) {
+ first_wildcard_match, hw_config)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1638,6 +1734,7 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
struct perf_pmu *pmu;
int ok = 0;
char *help;
+ struct evsel *first_wildcard_match = NULL;
*listp = malloc(sizeof(**listp));
if (!*listp)
@@ -1648,14 +1745,14 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
/* Attempt to add to list assuming event_or_pmu is a PMU name. */
pmu = perf_pmus__find(event_or_pmu);
if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
- /*auto_merge_stats=*/false,
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX))
return 0;
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
const_parsed_terms,
- /*auto_merge_stats=*/false,
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX))
return 0;
}
@@ -1664,16 +1761,17 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
if (!parse_events__filter_pmu(parse_state, pmu) &&
- perf_pmu__match(pmu, event_or_pmu)) {
- bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
-
+ perf_pmu__wildcard_match(pmu, event_or_pmu)) {
if (!parse_events_add_pmu(parse_state, *listp, pmu,
const_parsed_terms,
- auto_merge_stats,
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
ok++;
parse_state->wild_card_pmus = true;
}
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of((*listp)->prev, struct evsel, core.node);
}
}
if (ok)
@@ -1978,48 +2076,55 @@ static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct li
int *force_grouped_idx = _fg_idx;
int lhs_sort_idx, rhs_sort_idx, ret;
const char *lhs_pmu_name, *rhs_pmu_name;
- bool lhs_has_group, rhs_has_group;
/*
- * First sort by grouping/leader. Read the leader idx only if the evsel
- * is part of a group, by default ungrouped events will be sorted
- * relative to grouped events based on where the first ungrouped event
- * occurs. If both events don't have a group we want to fall-through to
- * the arch specific sorting, that can reorder and fix things like
- * Intel's topdown events.
+ * Get the indexes of the 2 events to sort. If the events are
+ * in groups then the leader's index is used otherwise the
+ * event's index is used. An index may be forced for events that
+ * must be in the same group, namely Intel topdown events.
*/
- if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
- lhs_has_group = true;
- lhs_sort_idx = lhs_core->leader->idx;
+ if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
+ lhs_sort_idx = *force_grouped_idx;
} else {
- lhs_has_group = false;
- lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
- ? *force_grouped_idx
- : lhs_core->idx;
- }
- if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
- rhs_has_group = true;
- rhs_sort_idx = rhs_core->leader->idx;
+ bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
+
+ lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
+ }
+ if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
+ rhs_sort_idx = *force_grouped_idx;
} else {
- rhs_has_group = false;
- rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
- ? *force_grouped_idx
- : rhs_core->idx;
+ bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
+
+ rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
}
+ /* If the indices differ then respect the insertion order. */
if (lhs_sort_idx != rhs_sort_idx)
return lhs_sort_idx - rhs_sort_idx;
- /* Group by PMU if there is a group. Groups can't span PMUs. */
- if (lhs_has_group && rhs_has_group) {
- lhs_pmu_name = lhs->group_pmu_name;
- rhs_pmu_name = rhs->group_pmu_name;
- ret = strcmp(lhs_pmu_name, rhs_pmu_name);
- if (ret)
- return ret;
- }
+ /*
+ * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
+ * be in the same group. Events in the same group need to be ordered by
+ * their grouping PMU name as the group will be broken to ensure only
+ * events on the same PMU are programmed together.
+ *
+ * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
+ * events are being forced to be at force_group_index. If only one event
+ * is being forced then the other event is the group leader of the group
+ * we're trying to force the event into. Ensure for the force grouped
+ * case that the PMU name ordering is also respected.
+ */
+ lhs_pmu_name = lhs->group_pmu_name;
+ rhs_pmu_name = rhs->group_pmu_name;
+ ret = strcmp(lhs_pmu_name, rhs_pmu_name);
+ if (ret)
+ return ret;
- /* Architecture specific sorting. */
+ /*
+ * Architecture specific sorting, by default sort events in the same
+ * group with the same PMU by their insertion index. On Intel topdown
+ * constraints must be adhered to - slots first, etc.
+ */
return arch_evlist__cmp(lhs, rhs);
}
@@ -2028,9 +2133,11 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
int idx = 0, force_grouped_idx = -1;
struct evsel *pos, *cur_leader = NULL;
struct perf_evsel *cur_leaders_grp = NULL;
- bool idx_changed = false, cur_leader_force_grouped = false;
+ bool idx_changed = false;
int orig_num_leaders = 0, num_leaders = 0;
int ret;
+ struct evsel *force_grouped_leader = NULL;
+ bool last_event_was_forced_leader = false;
/*
* Compute index to insert ungrouped events at. Place them where the
@@ -2053,10 +2160,13 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
*/
pos->core.idx = idx++;
- /* Remember an index to sort all forced grouped events together to. */
- if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
- arch_evsel__must_be_in_group(pos))
- force_grouped_idx = pos->core.idx;
+ /*
+ * Remember an index to sort all forced grouped events
+ * together to. Use the group leader as some events
+ * must appear first within the group.
+ */
+ if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
+ force_grouped_idx = pos_leader->core.idx;
}
/* Sort events. */
@@ -2084,31 +2194,66 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
* Set the group leader respecting the given groupings and that
* groups can't span PMUs.
*/
- if (!cur_leader)
+ if (!cur_leader) {
cur_leader = pos;
+ cur_leaders_grp = &pos->core;
+ if (pos_force_grouped)
+ force_grouped_leader = pos;
+ }
cur_leader_pmu_name = cur_leader->group_pmu_name;
- if ((cur_leaders_grp != pos->core.leader &&
- (!pos_force_grouped || !cur_leader_force_grouped)) ||
- strcmp(cur_leader_pmu_name, pos_pmu_name)) {
- /* Event is for a different group/PMU than last. */
+ if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
+ /* PMU changed so the group/leader must change. */
cur_leader = pos;
- /*
- * Remember the leader's group before it is overwritten,
- * so that later events match as being in the same
- * group.
- */
cur_leaders_grp = pos->core.leader;
+ if (pos_force_grouped && force_grouped_leader == NULL)
+ force_grouped_leader = pos;
+ } else if (cur_leaders_grp != pos->core.leader) {
+ bool split_even_if_last_leader_was_forced = true;
+
/*
- * Avoid forcing events into groups with events that
- * don't need to be in the group.
+ * Event is for a different group. If the last event was
+ * the forced group leader then subsequent group events
+ * and forced events should be in the same group. If
+ * there are no other forced group events then the
+ * forced group leader wasn't really being forced into a
+ * group, it just set arch_evsel__must_be_in_group, and
+ * we don't want the group to split here.
*/
- cur_leader_force_grouped = pos_force_grouped;
+ if (force_grouped_idx != -1 && last_event_was_forced_leader) {
+ struct evsel *pos2 = pos;
+ /*
+ * Search the whole list as the group leaders
+ * aren't currently valid.
+ */
+ list_for_each_entry_continue(pos2, list, core.node) {
+ if (pos->core.leader == pos2->core.leader &&
+ arch_evsel__must_be_in_group(pos2)) {
+ split_even_if_last_leader_was_forced = false;
+ break;
+ }
+ }
+ }
+ if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
+ if (pos_force_grouped) {
+ if (force_grouped_leader) {
+ cur_leader = force_grouped_leader;
+ cur_leaders_grp = force_grouped_leader->core.leader;
+ } else {
+ cur_leader = force_grouped_leader = pos;
+ cur_leaders_grp = &pos->core;
+ }
+ } else {
+ cur_leader = pos;
+ cur_leaders_grp = pos->core.leader;
+ }
+ }
}
if (pos_leader != cur_leader) {
/* The leader changed so update it. */
evsel__set_leader(pos, cur_leader);
}
+ last_event_was_forced_leader = (force_grouped_leader == pos);
}
list_for_each_entry(pos, list, core.node) {
struct evsel *pos_leader = evsel__leader(pos);
@@ -2147,14 +2292,23 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
if (ret2 < 0)
return ret;
- if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
- pr_warning("WARNING: events were regrouped to match PMUs\n");
-
/*
* Add list to the evlist even with errors to allow callers to clean up.
*/
evlist__splice_list_tail(evlist, &parse_state.list);
+ if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
+ pr_warning("WARNING: events were regrouped to match PMUs\n");
+
+ if (verbose > 0) {
+ struct strbuf sb = STRBUF_INIT;
+
+ evlist__uniquify_evsel_names(evlist, &stat_config);
+ evlist__format_evsels(evlist, &sb, 2048);
+ pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
+ strbuf_release(&sb);
+ }
+ }
if (!ret) {
struct evsel *last;