summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEduard Zingerman <eddyz87@gmail.com>2025-09-18 19:18:41 -0700
committerAlexei Starovoitov <ast@kernel.org>2025-09-19 09:27:23 -0700
commitccf25a67c7e29cfa6815d193054789b45ef825ad (patch)
tree9df851b84a8d166f3962fa619bc8d163a5f5a33b
parente41c237953b36cdd025b82996a74bfe39c509d20 (diff)
bpf: signal error if old liveness is more conservative than new
Unlike the new algorithm, register chain based liveness tracking is fully path sensitive, and thus should be strictly more accurate. Validate the new algorithm by signaling an error whenever it considers a stack slot dead while the old algorithm considers it alive. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-8-c3cd27bacc60@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--kernel/bpf/verifier.c9
2 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 2e3bdd50e2ba..dec5da3a2e59 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -852,6 +852,7 @@ struct bpf_verifier_env {
/* array of pointers to bpf_scc_info indexed by SCC id */
struct bpf_scc_info **scc_info;
u32 scc_cnt;
+ bool internal_error;
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb931a144b95..f70e34a38c13 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18576,6 +18576,11 @@ static void clean_func_state(struct bpf_verifier_env *env,
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
if (!bpf_stack_slot_alive(env, st->frameno, i)) {
+ if (st->stack[i].spilled_ptr.live & REG_LIVE_READ) {
+ verifier_bug(env, "incorrect live marks #1 for insn %d frameno %d spi %d\n",
+ env->insn_idx, st->frameno, i);
+ env->internal_error = true;
+ }
__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
st->stack[i].slot_type[j] = STACK_INVALID;
@@ -19546,6 +19551,8 @@ skip_inf_loop_check:
loop = incomplete_read_marks(env, &sl->state);
if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
hit:
+ if (env->internal_error)
+ return -EFAULT;
sl->hit_cnt++;
/* reached equivalent register/stack state,
* prune the search.
@@ -19660,6 +19667,8 @@ hit:
return 1;
}
miss:
+ if (env->internal_error)
+ return -EFAULT;
/* when new state is not going to be added do not increase miss count.
* Otherwise several loop iterations will remove the state
* recorded earlier. The goal of these heuristics is to have