diff options
author | Alexei Starovoitov <ast@kernel.org> | 2021-12-09 15:40:34 -0800 |
---|---|---|
committer | Andrii Nakryiko <andrii@kernel.org> | 2021-12-10 15:35:14 -0800 |
commit | bd6b3b355af59af1f8c417c269a46be26a1dc87b (patch) | |
tree | 0e92aea9d74a32c822d7232e3293fb51a1cc4e87 /tools/lib/bpf/bpf.c | |
parent | ac55b3f00c323cf09d59a191e14bcf39b691078c (diff) | |
parent | b59e4ce8bcaab6445f4a0d37a96ca8953caaf5cf (diff) |
Merge branch 'Enhance and rework logging controls in libbpf'
Andrii Nakryiko says:
====================
Add new open options and per-program setters to control BTF and program
loading log verboseness and allow providing custom log buffers to capture logs
of interest. Note how custom log_buf and log_level are orthogonal, which
matches previous (alas less customizable) behavior of libbpf, even though it
sort of worked by accident: if someone specified log_level = 1 in
bpf_object__load_xattr(), first attempt to load any BPF program resulted in
wasted bpf() syscall with -EINVAL due to !!log_buf != !!log_level. Then on
retry libbpf would allocated log_buffer and try again, after which prog
loading would succeed and libbpf would print verbose program loading log
through its print callback.
This behavior is now documented and made more efficient, not wasting
unnecessary syscall. But additionally, log_level can be controlled globally on
a per-bpf_object level through bpf_object_open_opts, as well as on
a per-program basis with bpf_program__set_log_buf() and
bpf_program__set_log_level() APIs.
Now that we have a more future-proof way to set log_level, deprecate
bpf_object__load_xattr().
v2->v3:
- added log_buf selftests for bpf_prog_load() and bpf_btf_load();
- fix !log_buf in bpf_prog_load (John);
- fix log_level==0 in bpf_btf_load (thanks selftest!);
v1->v2:
- fix log_level == 0 handling of bpf_prog_load, add as patch #1 (Alexei);
- add comments explaining log_buf_size overflow prevention (Alexei).
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/lib/bpf/bpf.c')
-rw-r--r-- | tools/lib/bpf/bpf.c | 88 |
1 files changed, 66 insertions, 22 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 4e7836e1a7b5..6b2407e12060 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -303,10 +303,6 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, if (log_level && !log_buf) return libbpf_err(-EINVAL); - attr.log_level = log_level; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); func_info = OPTS_GET(opts, func_info, NULL); attr.func_info_rec_size = func_info_rec_size; @@ -321,6 +317,12 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); + if (log_level) { + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = log_level; + } + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); if (fd >= 0) return fd; @@ -366,16 +368,17 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type, goto done; } - if (log_level || !log_buf) - goto done; + if (log_level == 0 && log_buf) { + /* log_level == 0 with non-NULL log_buf requires retrying on error + * with log_level == 1 and log_buf/log_buf_size set, to get details of + * failure + */ + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_size; + attr.log_level = 1; - /* Try again with log */ - log_buf[0] = 0; - attr.log_buf = ptr_to_u64(log_buf); - attr.log_size = log_size; - attr.log_level = 1; - - fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts); + } done: /* free() doesn't affect errno, so we don't need to restore it */ free(finfo); @@ -1044,24 +1047,65 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd) return libbpf_err_errno(fd); } -int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, - bool do_log) +int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts) { - union bpf_attr attr = {}; + const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level); + union bpf_attr attr; + char *log_buf; + size_t log_size; + __u32 log_level; int fd; - attr.btf = ptr_to_u64(btf); + memset(&attr, 0, attr_sz); + + if (!OPTS_VALID(opts, bpf_btf_load_opts)) + return libbpf_err(-EINVAL); + + log_buf = OPTS_GET(opts, log_buf, NULL); + log_size = OPTS_GET(opts, log_size, 0); + log_level = OPTS_GET(opts, log_level, 0); + + if (log_size > UINT_MAX) + return libbpf_err(-EINVAL); + if (log_size && !log_buf) + return libbpf_err(-EINVAL); + + attr.btf = ptr_to_u64(btf_data); attr.btf_size = btf_size; + /* log_level == 0 and log_buf != NULL means "try loading without + * log_buf, but retry with log_buf and log_level=1 on error", which is + * consistent across low-level and high-level BTF and program loading + * APIs within libbpf and provides a sensible behavior in practice + */ + if (log_level) { + attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = log_level; + } -retry: - if (do_log && log_buf && log_buf_size) { - attr.btf_log_level = 1; - attr.btf_log_size = log_buf_size; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); + if (fd < 0 && log_buf && log_level == 0) { attr.btf_log_buf = ptr_to_u64(log_buf); + attr.btf_log_size = (__u32)log_size; + attr.btf_log_level = 1; + fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz); } + return libbpf_err_errno(fd); +} + +int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int fd; - fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr)); +retry: + if (do_log && log_buf && log_buf_size) { + opts.log_buf = log_buf; + opts.log_size = log_buf_size; + opts.log_level = 1; + } + fd = bpf_btf_load(btf, btf_size, &opts); if (fd < 0 && !do_log && log_buf && log_buf_size) { do_log = true; goto retry; |