From 6966d4c4425b6796b1da13a6f86d09825df3d323 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Sun, 20 Feb 2022 15:27:50 +0800 Subject: libbpf: Remove redundant check in btf_fixup_datasec() The check 't->size && t->size != size' is redundant because if t->size compares unequal to 0, we will just skip straight to sorting variables. Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220220072750.209215-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ad43b6ce825e..7e978feaf822 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2795,7 +2795,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, goto sort_vars; ret = find_elf_sec_sz(obj, name, &size); - if (ret || !size || (t->size && t->size != size)) { + if (ret || !size) { pr_debug("Invalid size for section %s: %u bytes\n", name, size); return -ENOENT; } -- cgit From 08894d9c647aad08ddd19398e03a0aa1a70b7dc8 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Wed, 23 Feb 2022 16:52:44 +0800 Subject: libbpf: Simplify the find_elf_sec_sz() function The check in the last return statement is unnecessary, we can just return the ret variable. But we can simplify the function further by returning 0 immediately if we find the section size and -ENOENT otherwise. Thus we can also remove the ret variable. Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220223085244.3058118-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7e978feaf822..776b8e034d62 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1374,22 +1374,20 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) { - int ret = -ENOENT; Elf_Data *data; Elf_Scn *scn; - *size = 0; if (!name) return -EINVAL; scn = elf_sec_by_name(obj, name); data = elf_sec_data(obj, scn); if (data) { - ret = 0; /* found it */ *size = data->d_size; + return 0; /* found it */ } - return *size ? 0 : ret; + return -ENOENT; } static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off) -- cgit From a4fbfdd7a160eccaafc093eb5b34f838b1ca0bf0 Mon Sep 17 00:00:00 2001 From: Stijn Tintel Date: Fri, 25 Feb 2022 17:23:55 +0200 Subject: libbpf: Fix BPF_MAP_TYPE_PERF_EVENT_ARRAY auto-pinning When a BPF map of type BPF_MAP_TYPE_PERF_EVENT_ARRAY doesn't have the max_entries parameter set, the map will be created with max_entries set to the number of available CPUs. When we try to reuse such a pinned map, map_is_reuse_compat will return false, as max_entries in the map definition differs from max_entries of the existing map, causing the following error: libbpf: couldn't reuse pinned map at '/sys/fs/bpf/m_logging': parameter mismatch Fix this by overwriting max_entries in the map definition. For this to work, we need to do this in bpf_object__create_maps, before calling bpf_object__reuse_map. Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects") Signed-off-by: Stijn Tintel Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220225152355.315204-1-stijn@linux-ipv6.be --- tools/lib/bpf/libbpf.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 776b8e034d62..be6480e260c4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4859,7 +4859,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b LIBBPF_OPTS(bpf_map_create_opts, create_attr); struct bpf_map_def *def = &map->def; const char *map_name = NULL; - __u32 max_entries; int err = 0; if (kernel_supports(obj, FEAT_PROG_NAME)) @@ -4869,21 +4868,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; - if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { - int nr_cpus; - - nr_cpus = libbpf_num_possible_cpus(); - if (nr_cpus < 0) { - pr_warn("map '%s': failed to determine number of system CPUs: %d\n", - map->name, nr_cpus); - return nr_cpus; - } - pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); - max_entries = nr_cpus; - } else { - max_entries = def->max_entries; - } - if (bpf_map__is_struct_ops(map)) create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; @@ -4933,7 +4917,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b if (obj->gen_loader) { bpf_gen__map_create(obj->gen_loader, def->type, map_name, - def->key_size, def->value_size, max_entries, + def->key_size, def->value_size, def->max_entries, &create_attr, is_inner ? -1 : map - obj->maps); /* Pretend to have valid FD to pass various fd >= 0 checks. * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. @@ -4942,7 +4926,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b } else { map->fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, - max_entries, &create_attr); + def->max_entries, &create_attr); } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { @@ -4959,7 +4943,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b map->btf_value_type_id = 0; map->fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, - max_entries, &create_attr); + def->max_entries, &create_attr); } err = map->fd < 0 ? -errno : 0; @@ -5063,6 +5047,24 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj) return 0; } +static int map_set_def_max_entries(struct bpf_map *map) +{ + if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { + int nr_cpus; + + nr_cpus = libbpf_num_possible_cpus(); + if (nr_cpus < 0) { + pr_warn("map '%s': failed to determine number of system CPUs: %d\n", + map->name, nr_cpus); + return nr_cpus; + } + pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); + map->def.max_entries = nr_cpus; + } + + return 0; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -5095,6 +5097,10 @@ bpf_object__create_maps(struct bpf_object *obj) continue; } + err = map_set_def_max_entries(map); + if (err) + goto err_out; + retried = false; retry: if (map->pin_path) { -- cgit From 41332d6e3a430adc91e0af115b4261b0d2f116ec Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Thu, 3 Mar 2022 08:59:21 +0800 Subject: libbpf: Add a check to ensure that page_cnt is non-zero The page_cnt parameter is used to specify the number of memory pages allocated for each per-CPU buffer, it must be non-zero and a power of 2. Currently, the __perf_buffer__new() function attempts to validate that the page_cnt is a power of 2 but forgets checking for the case where page_cnt is zero, we can fix it by replacing 'page_cnt & (page_cnt - 1)' with 'page_cnt == 0 || (page_cnt & (page_cnt - 1))'. If so, we also don't need to add a check in perf_buffer__new_v0_6_0() to make sure that page_cnt is non-zero and the check for zero in perf_buffer__new_raw_v0_6_0() can also be removed. The code will be cleaner and more readable. Signed-off-by: Yuntao Wang Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220303005921.53436-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf/libbpf.c') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index be6480e260c4..81bf01d67671 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10951,7 +10951,7 @@ struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, { struct perf_buffer_params p = {}; - if (page_cnt == 0 || !attr) + if (!attr) return libbpf_err_ptr(-EINVAL); if (!OPTS_VALID(opts, perf_buffer_raw_opts)) @@ -10992,7 +10992,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, __u32 map_info_len; int err, i, j, n; - if (page_cnt & (page_cnt - 1)) { + if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { pr_warn("page count should be power of two, but is %zu\n", page_cnt); return ERR_PTR(-EINVAL); -- cgit