diff options
| author | Andrii Nakryiko <andrii@kernel.org> | 2024-12-13 14:45:59 -0800 |
|---|---|---|
| committer | Andrii Nakryiko <andrii@kernel.org> | 2024-12-13 14:48:40 -0800 |
| commit | 3d1af4bd84a8aa854034e1ccea245bb28a114a11 (patch) | |
| tree | 0acb5f6324ae0ab1cee6d3d41a0b6840a4255dda /kernel | |
| parent | 6a10d2d3613fed501ae9c164cb1c4335e11cb714 (diff) | |
| parent | d677a10f80abf1ef65ae9bcf51b5a83ecf10e99a (diff) | |
Merge branch 'add-fd_array_cnt-attribute-for-bpf_prog_load'
Anton Protopopov says:
====================
Add fd_array_cnt attribute for BPF_PROG_LOAD
Add a new attribute to the bpf(BPF_PROG_LOAD) system call. If this
new attribute is non-zero, then the fd_array is considered to be a
continuous array of the fd_array_cnt length and to contain only
proper map file descriptors or btf file descriptors.
This change allows maps (and btfs), which aren't referenced directly
by a BPF program, to be bound to the program _and_ also to be present
during the program verification (so BPF_PROG_BIND_MAP is not enough
for this use case).
The primary reason for this change is that it is a prerequisite for
adding "instruction set" maps, which are both non-referenced by the
program and must be present during the program verification.
The first five commits add the new functionality, the sixth adds
corresponding self-tests, and the last one is a small additional fix.
v1 -> v2:
* rewrite the add_fd_from_fd_array() function (Eduard)
* a few cleanups in selftests (Eduard)
v2 -> v3:
* various renamings (Alexei)
* "0 is not special" (Alexei, Andrii)
* do not alloc memory on fd_array init (Alexei)
* fix leaking maps for error path (Hou Tao)
* use libbpf helpers vs. raw syscalls (Andrii)
* add comments on __btf_get_by_fd/__bpf_map_get (Alexei)
* remove extra code (Alexei)
v3 -> v4:
* simplify error path when parsing fd_array
* libbpf: pass fd_array_cnt only in prog_load (Alexei)
* selftests patch contained extra code (Alexei)
* renames, fix comments (Alexei)
v4 -> v5:
* Add btfs to env->used_btfs (Andrii)
* Fix an integer overflow (Andrii)
* A set of cleanups for selftests (Andrii)
====================
Link: https://patch.msgid.link/20241213130934.1087929-1-aspsk@isovalent.com
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/btf.c | 11 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 333 |
3 files changed, 214 insertions, 132 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e7a59e6462a9..2ef4fef71910 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7746,14 +7746,9 @@ struct btf *btf_get_by_fd(int fd) struct btf *btf; CLASS(fd, f)(fd); - if (fd_empty(f)) - return ERR_PTR(-EBADF); - - if (fd_file(f)->f_op != &btf_fops) - return ERR_PTR(-EINVAL); - - btf = fd_file(f)->private_data; - refcount_inc(&btf->refcnt); + btf = __btf_get_by_fd(f); + if (!IS_ERR(btf)) + refcount_inc(&btf->refcnt); return btf; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5684e8ce132d..4e88797fdbeb 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2730,7 +2730,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd +#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c855e7905c35..c0e772996c52 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19218,50 +19218,68 @@ static int find_btf_percpu_datasec(struct btf *btf) return -ENOENT; } +/* + * Add btf to the used_btfs array and return the index. (If the btf was + * already added, then just return the index.) Upon successful insertion + * increase btf refcnt, and, if present, also refcount the corresponding + * kernel module. + */ +static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf) +{ + struct btf_mod_pair *btf_mod; + int i; + + /* check whether we recorded this BTF (and maybe module) already */ + for (i = 0; i < env->used_btf_cnt; i++) + if (env->used_btfs[i].btf == btf) + return i; + + if (env->used_btf_cnt >= MAX_USED_BTFS) + return -E2BIG; + + btf_get(btf); + + btf_mod = &env->used_btfs[env->used_btf_cnt]; + btf_mod->btf = btf; + btf_mod->module = NULL; + + /* if we reference variables from kernel module, bump its refcount */ + if (btf_is_module(btf)) { + btf_mod->module = btf_try_get_module(btf); + if (!btf_mod->module) { + btf_put(btf); + return -ENXIO; + } + } + + return env->used_btf_cnt++; +} + /* replace pseudo btf_id with kernel symbol address */ -static int check_pseudo_btf_id(struct bpf_verifier_env *env, - struct bpf_insn *insn, - struct bpf_insn_aux_data *aux) +static int __check_pseudo_btf_id(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_insn_aux_data *aux, + struct btf *btf) { const struct btf_var_secinfo *vsi; const struct btf_type *datasec; - struct btf_mod_pair *btf_mod; const struct btf_type *t; const char *sym_name; bool percpu = false; u32 type, id = insn->imm; - struct btf *btf; s32 datasec_id; u64 addr; - int i, btf_fd, err; - - btf_fd = insn[1].imm; - if (btf_fd) { - btf = btf_get_by_fd(btf_fd); - if (IS_ERR(btf)) { - verbose(env, "invalid module BTF object FD specified.\n"); - return -EINVAL; - } - } else { - if (!btf_vmlinux) { - verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); - return -EINVAL; - } - btf = btf_vmlinux; - btf_get(btf); - } + int i; t = btf_type_by_id(btf, id); if (!t) { verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); - err = -ENOENT; - goto err_put; + return -ENOENT; } if (!btf_type_is_var(t) && !btf_type_is_func(t)) { verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); - err = -EINVAL; - goto err_put; + return -EINVAL; } sym_name = btf_name_by_offset(btf, t->name_off); @@ -19269,8 +19287,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, if (!addr) { verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", sym_name); - err = -ENOENT; - goto err_put; + return -ENOENT; } insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; @@ -19278,7 +19295,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, if (btf_type_is_func(t)) { aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; aux->btf_var.mem_size = 0; - goto check_btf; + return 0; } datasec_id = find_btf_percpu_datasec(btf); @@ -19309,8 +19326,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, tname = btf_name_by_offset(btf, t->name_off); verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", tname, PTR_ERR(ret)); - err = -EINVAL; - goto err_put; + return -EINVAL; } aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; aux->btf_var.mem_size = tsize; @@ -19319,39 +19335,43 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, aux->btf_var.btf = btf; aux->btf_var.btf_id = type; } -check_btf: - /* check whether we recorded this BTF (and maybe module) already */ - for (i = 0; i < env->used_btf_cnt; i++) { - if (env->used_btfs[i].btf == btf) { - btf_put(btf); - return 0; - } - } - if (env->used_btf_cnt >= MAX_USED_BTFS) { - err = -E2BIG; - goto err_put; - } + return 0; +} - btf_mod = &env->used_btfs[env->used_btf_cnt]; - btf_mod->btf = btf; - btf_mod->module = NULL; +static int check_pseudo_btf_id(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_insn_aux_data *aux) +{ + struct btf *btf; + int btf_fd; + int err; - /* if we reference variables from kernel module, bump its refcount */ - if (btf_is_module(btf)) { - btf_mod->module = btf_try_get_module(btf); - if (!btf_mod->module) { - err = -ENXIO; - goto err_put; + btf_fd = insn[1].imm; + if (btf_fd) { + CLASS(fd, f)(btf_fd); + + btf = __btf_get_by_fd(f); + if (IS_ERR(btf)) { + verbose(env, "invalid module BTF object FD specified.\n"); + return -EINVAL; + } + } else { + if (!btf_vmlinux) { + verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); + return -EINVAL; } + btf = btf_vmlinux; } - env->used_btf_cnt++; + err = __check_pseudo_btf_id(env, insn, aux, btf); + if (err) + return err; + err = __add_used_btf(env, btf); + if (err < 0) + return err; return 0; -err_put: - btf_put(btf); - return err; } static bool is_tracing_prog_type(enum bpf_prog_type type) @@ -19368,6 +19388,12 @@ static bool is_tracing_prog_type(enum bpf_prog_type type) } } +static bool bpf_map_is_cgroup_storage(struct bpf_map *map) +{ + return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); +} + static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) @@ -19446,39 +19472,47 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return -EINVAL; } - return 0; -} + if (bpf_map_is_cgroup_storage(map) && + bpf_cgroup_storage_assign(env->prog->aux, map)) { + verbose(env, "only one cgroup storage of each type is allowed\n"); + return -EBUSY; + } -static bool bpf_map_is_cgroup_storage(struct bpf_map *map) -{ - return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || - map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + if (map->map_type == BPF_MAP_TYPE_ARENA) { + if (env->prog->aux->arena) { + verbose(env, "Only one arena per program\n"); + return -EBUSY; + } + if (!env->allow_ptr_leaks || !env->bpf_capable) { + verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); + return -EPERM; + } + if (!env->prog->jit_requested) { + verbose(env, "JIT is required to use arena\n"); + return -EOPNOTSUPP; + } + if (!bpf_jit_supports_arena()) { + verbose(env, "JIT doesn't support arena\n"); + return -EOPNOTSUPP; + } + env->prog->aux->arena = (void *)map; + if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { + verbose(env, "arena's user address must be set via map_extra or mmap()\n"); + return -EINVAL; + } + } + + return 0; } -/* Add map behind fd to used maps list, if it's not already there, and return - * its index. Also set *reused to true if this map was already in the list of - * used maps. - * Returns <0 on error, or >= 0 index, on success. - */ -static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused) +static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map) { - CLASS(fd, f)(fd); - struct bpf_map *map; - int i; - - map = __bpf_map_get(f); - if (IS_ERR(map)) { - verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); - return PTR_ERR(map); - } + int i, err; /* check whether we recorded this map already */ - for (i = 0; i < env->used_map_cnt; i++) { - if (env->used_maps[i] == map) { - *reused = true; + for (i = 0; i < env->used_map_cnt; i++) + if (env->used_maps[i] == map) return i; - } - } if (env->used_map_cnt >= MAX_USED_MAPS) { verbose(env, "The total number of maps per program has reached the limit of %u\n", @@ -19486,6 +19520,10 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus return -E2BIG; } + err = check_map_prog_compatibility(env, map, env->prog); + if (err) + return err; + if (env->prog->sleepable) atomic64_inc(&map->sleepable_refcnt); @@ -19496,12 +19534,29 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus */ bpf_map_inc(map); - *reused = false; env->used_maps[env->used_map_cnt++] = map; return env->used_map_cnt - 1; } +/* Add map behind fd to used maps list, if it's not already there, and return + * its index. + * Returns <0 on error, or >= 0 index, on success. + */ +static int add_used_map(struct bpf_verifier_env *env, int fd) +{ + struct bpf_map *map; + CLASS(fd, f)(fd); + + map = __bpf_map_get(f); + if (IS_ERR(map)) { + verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); + return PTR_ERR(map); + } + + return __add_used_map(env, map); +} + /* find and rewrite pseudo imm in ld_imm64 instructions: * * 1. if it accesses map FD, replace it with actual map pointer. @@ -19533,7 +19588,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) int map_idx; u64 addr; u32 fd; - bool reused; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || @@ -19594,7 +19648,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) break; } - map_idx = add_used_map_from_fd(env, fd, &reused); + map_idx = add_used_map(env, fd); if (map_idx < 0) return map_idx; map = env->used_maps[map_idx]; @@ -19602,10 +19656,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) aux = &env->insn_aux_data[i]; aux->map_index = map_idx; - err = check_map_prog_compatibility(env, map, env->prog); - if (err) - return err; - if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { addr = (unsigned long)map; @@ -19636,39 +19686,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) insn[0].imm = (u32)addr; insn[1].imm = addr >> 32; - /* proceed with extra checks only if its newly added used map */ - if (reused) - goto next_insn; - - if (bpf_map_is_cgroup_storage(map) && - bpf_cgroup_storage_assign(env->prog->aux, map)) { - verbose(env, "only one cgroup storage of each type is allowed\n"); - return -EBUSY; - } - if (map->map_type == BPF_MAP_TYPE_ARENA) { - if (env->prog->aux->arena) { - verbose(env, "Only one arena per program\n"); - return -EBUSY; - } - if (!env->allow_ptr_leaks || !env->bpf_capable) { - verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); - return -EPERM; - } - if (!env->prog->jit_requested) { - verbose(env, "JIT is required to use arena\n"); - return -EOPNOTSUPP; - } - if (!bpf_jit_supports_arena()) { - verbose(env, "JIT doesn't support arena\n"); - return -EOPNOTSUPP; - } - env->prog->aux->arena = (void *)map; - if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { - verbose(env, "arena's user address must be set via map_extra or mmap()\n"); - return -EINVAL; - } - } - next_insn: insn++; i++; @@ -22839,6 +22856,73 @@ struct btf *bpf_get_btf_vmlinux(void) return btf_vmlinux; } +/* + * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In + * this case expect that every file descriptor in the array is either a map or + * a BTF. Everything else is considered to be trash. + */ +static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd) +{ + struct bpf_map *map; + struct btf *btf; + CLASS(fd, f)(fd); + int err; + + map = __bpf_map_get(f); + if (!IS_ERR(map)) { + err = __add_used_map(env, map); + if (err < 0) + return err; + return 0; + } + + btf = __btf_get_by_fd(f); + if (!IS_ERR(btf)) { + err = __add_used_btf(env, btf); + if (err < 0) + return err; + return 0; + } + + verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd); + return PTR_ERR(map); +} + +static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr) +{ + size_t size = sizeof(int); + int ret; + int fd; + u32 i; + + env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); + + /* + * The only difference between old (no fd_array_cnt is given) and new + * APIs is that in the latter case the fd_array is expected to be + * continuous and is scanned for map fds right away + */ + if (!attr->fd_array_cnt) + return 0; + + /* Check for integer overflow */ + if (attr->fd_array_cnt >= (U32_MAX / size)) { + verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt); + return -EINVAL; + } + + for (i = 0; i < attr->fd_array_cnt; i++) { + if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size)) + return -EFAULT; + + ret = add_fd_from_fd_array(env, fd); + if (ret) + return ret; + } + + return 0; +} + int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) { u64 start_time = ktime_get_ns(); @@ -22870,7 +22954,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; - env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); @@ -22893,6 +22976,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (ret) goto err_unlock; + ret = process_fd_array(env, attr, uattr); + if (ret) + goto skip_full_check; + mark_verifier_state_clean(env); if (IS_ERR(btf_vmlinux)) { |
