summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorEduard Zingerman <eddyz87@gmail.com>2025-09-18 19:18:43 -0700
committerAlexei Starovoitov <ast@kernel.org>2025-09-19 09:27:23 -0700
commit79f047c7d968b21ff4b72bd70c4533140553c56c (patch)
tree4f9b0a0eb47e015fcd8097b6abfd2e20fe5344de /kernel
parent107e169799057bc6a379ddb625cbe1e51cfc7d72 (diff)
bpf: table based bpf_insn_successors()
Converting bpf_insn_successors() to use lookup table makes it ~1.5 times faster. Also remove unnecessary conditionals: - `idx + 1 < prog->len` is unnecessary because after check_cfg() all jump targets are guaranteed to be within a program; - `i == 0 || succ[0] != dst` is unnecessary because any client of bpf_insn_successors() can handle duplicate edges: - compute_live_registers() - compute_scc() Moving bpf_insn_successors() to liveness.c allows its inlining in liveness.c:__update_stack_liveness(). Such inlining speeds up __update_stack_liveness() by ~40%. bpf_insn_successors() is used in both verifier.c and liveness.c. perf shows such move does not negatively impact users in verifier.c, as these are executed only once before main varification pass. Unlike __update_stack_liveness() which can be triggered multiple times. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-10-c3cd27bacc60@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/liveness.c56
-rw-r--r--kernel/bpf/verifier.c72
2 files changed, 57 insertions, 71 deletions
diff --git a/kernel/bpf/liveness.c b/kernel/bpf/liveness.c
index 6f9dfaaf6e64..3c611aba7f52 100644
--- a/kernel/bpf/liveness.c
+++ b/kernel/bpf/liveness.c
@@ -433,6 +433,62 @@ static void log_mask_change(struct bpf_verifier_env *env, struct callchain *call
bpf_log(&env->log, "\n");
}
+int bpf_jmp_offset(struct bpf_insn *insn)
+{
+ u8 code = insn->code;
+
+ if (code == (BPF_JMP32 | BPF_JA))
+ return insn->imm;
+ return insn->off;
+}
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for opcode_info_tbl");
+
+inline int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
+{
+ static const struct opcode_info {
+ bool can_jump;
+ bool can_fallthrough;
+ } opcode_info_tbl[256] = {
+ [0 ... 255] = {.can_jump = false, .can_fallthrough = true},
+ #define _J(code, ...) \
+ [BPF_JMP | code] = __VA_ARGS__, \
+ [BPF_JMP32 | code] = __VA_ARGS__
+
+ _J(BPF_EXIT, {.can_jump = false, .can_fallthrough = false}),
+ _J(BPF_JA, {.can_jump = true, .can_fallthrough = false}),
+ _J(BPF_JEQ, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JNE, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JLT, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JLE, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JGT, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JGE, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JSGT, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JSGE, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JSLT, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JSLE, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JCOND, {.can_jump = true, .can_fallthrough = true}),
+ _J(BPF_JSET, {.can_jump = true, .can_fallthrough = true}),
+ #undef _J
+ };
+ struct bpf_insn *insn = &prog->insnsi[idx];
+ const struct opcode_info *opcode_info;
+ int i = 0, insn_sz;
+
+ opcode_info = &opcode_info_tbl[BPF_CLASS(insn->code) | BPF_OP(insn->code)];
+ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
+ if (opcode_info->can_fallthrough)
+ succ[i++] = idx + insn_sz;
+
+ if (opcode_info->can_jump)
+ succ[i++] = idx + bpf_jmp_offset(insn) + 1;
+
+ return i;
+}
+
+__diag_pop();
+
static struct func_instance *get_outer_instance(struct bpf_verifier_env *env,
struct func_instance *instance)
{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e1da2471442b..1d4183bc3cd1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3485,15 +3485,6 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
return 0;
}
-static int jmp_offset(struct bpf_insn *insn)
-{
- u8 code = insn->code;
-
- if (code == (BPF_JMP32 | BPF_JA))
- return insn->imm;
- return insn->off;
-}
-
static int check_subprogs(struct bpf_verifier_env *env)
{
int i, subprog_start, subprog_end, off, cur_subprog = 0;
@@ -3520,7 +3511,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
goto next;
- off = i + jmp_offset(&insn[i]) + 1;
+ off = i + bpf_jmp_offset(&insn[i]) + 1;
if (off < subprog_start || off >= subprog_end) {
verbose(env, "jump out of range from insn %d to %d\n", i, off);
return -EINVAL;
@@ -23944,67 +23935,6 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
return 0;
}
-static bool can_fallthrough(struct bpf_insn *insn)
-{
- u8 class = BPF_CLASS(insn->code);
- u8 opcode = BPF_OP(insn->code);
-
- if (class != BPF_JMP && class != BPF_JMP32)
- return true;
-
- if (opcode == BPF_EXIT || opcode == BPF_JA)
- return false;
-
- return true;
-}
-
-static bool can_jump(struct bpf_insn *insn)
-{
- u8 class = BPF_CLASS(insn->code);
- u8 opcode = BPF_OP(insn->code);
-
- if (class != BPF_JMP && class != BPF_JMP32)
- return false;
-
- switch (opcode) {
- case BPF_JA:
- case BPF_JEQ:
- case BPF_JNE:
- case BPF_JLT:
- case BPF_JLE:
- case BPF_JGT:
- case BPF_JGE:
- case BPF_JSGT:
- case BPF_JSGE:
- case BPF_JSLT:
- case BPF_JSLE:
- case BPF_JCOND:
- case BPF_JSET:
- return true;
- }
-
- return false;
-}
-
-int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
-{
- struct bpf_insn *insn = &prog->insnsi[idx];
- int i = 0, insn_sz;
- u32 dst;
-
- insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
- if (can_fallthrough(insn) && idx + 1 < prog->len)
- succ[i++] = idx + insn_sz;
-
- if (can_jump(insn)) {
- dst = idx + jmp_offset(insn) + 1;
- if (i == 0 || succ[0] != dst)
- succ[i++] = dst;
- }
-
- return i;
-}
-
/* Each field is a register bitmask */
struct insn_live_regs {
u16 use; /* registers read by instruction */