summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_counter_cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_counter_cgroup.c')
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c80
1 files changed, 57 insertions, 23 deletions
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 690be3ce3e11..17d7196c6589 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -4,6 +4,7 @@
/* Copyright (c) 2021 Google */
#include <assert.h>
+#include <errno.h>
#include <limits.h>
#include <unistd.h>
#include <sys/file.h>
@@ -27,6 +28,7 @@
#include "cpumap.h"
#include "thread_map.h"
+#include "bpf_skel/bperf_cgroup.h"
#include "bpf_skel/bperf_cgroup.skel.h"
static struct perf_event_attr cgrp_switch_attr = {
@@ -42,6 +44,55 @@ static struct bperf_cgroup_bpf *skel;
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
+static void setup_rodata(struct bperf_cgroup_bpf *sk, int evlist_size)
+{
+ int map_size, total_cpus = cpu__max_cpu().cpu;
+
+ sk->rodata->num_cpus = total_cpus;
+ sk->rodata->num_events = evlist_size / nr_cgroups;
+
+ if (cgroup_is_v2("perf_event") > 0)
+ sk->rodata->use_cgroup_v2 = 1;
+
+ BUG_ON(evlist_size % nr_cgroups != 0);
+
+ /* we need one copy of events per cpu for reading */
+ map_size = total_cpus * evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.events, map_size);
+ bpf_map__set_max_entries(sk->maps.cgrp_idx, nr_cgroups);
+ /* previous result is saved in a per-cpu array */
+ map_size = evlist_size / nr_cgroups;
+ bpf_map__set_max_entries(sk->maps.prev_readings, map_size);
+ /* cgroup result needs all events (per-cpu) */
+ map_size = evlist_size;
+ bpf_map__set_max_entries(sk->maps.cgrp_readings, map_size);
+}
+
+static void test_max_events_program_load(void)
+{
+#ifndef NDEBUG
+ /*
+ * Test that the program verifies with the maximum number of events. If
+ * this test fails unfortunately perf needs recompiling with a lower
+ * BPERF_CGROUP__MAX_EVENTS to avoid BPF verifier issues.
+ */
+ int err, max_events = BPERF_CGROUP__MAX_EVENTS * nr_cgroups;
+ struct bperf_cgroup_bpf *test_skel = bperf_cgroup_bpf__open();
+
+ if (!test_skel) {
+ pr_err("Failed to open cgroup skeleton\n");
+ return;
+ }
+ setup_rodata(test_skel, max_events);
+ err = bperf_cgroup_bpf__load(test_skel);
+ if (err) {
+ pr_err("Failed to load cgroup skeleton with max events %d.\n",
+ BPERF_CGROUP__MAX_EVENTS);
+ }
+ bperf_cgroup_bpf__destroy(test_skel);
+#endif
+}
+
static int bperf_load_program(struct evlist *evlist)
{
struct bpf_link *link;
@@ -50,35 +101,18 @@ static int bperf_load_program(struct evlist *evlist)
int i, j;
struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
- int map_size, map_fd;
- int prog_fd, err;
+ int map_fd, prog_fd, err;
+
+ set_max_rlimit();
+
+ test_max_events_program_load();
skel = bperf_cgroup_bpf__open();
if (!skel) {
pr_err("Failed to open cgroup skeleton\n");
return -1;
}
-
- skel->rodata->num_cpus = total_cpus;
- skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
-
- if (cgroup_is_v2("perf_event") > 0)
- skel->rodata->use_cgroup_v2 = 1;
-
- BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
-
- /* we need one copy of events per cpu for reading */
- map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.events, map_size);
- bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
- /* previous result is saved in a per-cpu array */
- map_size = evlist->core.nr_entries / nr_cgroups;
- bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
- /* cgroup result needs all events (per-cpu) */
- map_size = evlist->core.nr_entries;
- bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
-
- set_max_rlimit();
+ setup_rodata(skel, evlist->core.nr_entries);
err = bperf_cgroup_bpf__load(skel);
if (err) {