summaryrefslogtreecommitdiff
path: root/include/linux/bpf_verifier.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf_verifier.h')
-rw-r--r--include/linux/bpf_verifier.h95
1 files changed, 72 insertions, 23 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 48b7b2eeb7e2..256274acb1d8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,6 +115,14 @@ struct bpf_reg_state {
int depth:30;
} iter;
+ /* For irq stack slots */
+ struct {
+ enum {
+ IRQ_NATIVE_KFUNC,
+ IRQ_LOCK_KFUNC,
+ } kfunc_class;
+ } irq;
+
/* Max size from any of the above. */
struct {
unsigned long raw1;
@@ -233,6 +241,7 @@ enum bpf_stack_slot_type {
*/
STACK_DYNPTR,
STACK_ITER,
+ STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -254,8 +263,12 @@ struct bpf_reference_state {
* default to pointer reference on zero initialization of a state.
*/
enum ref_state_type {
- REF_TYPE_PTR = 0,
- REF_TYPE_LOCK,
+ REF_TYPE_PTR = (1 << 1),
+ REF_TYPE_IRQ = (1 << 2),
+ REF_TYPE_LOCK = (1 << 3),
+ REF_TYPE_RES_LOCK = (1 << 4),
+ REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@@ -315,9 +328,6 @@ struct bpf_func_state {
u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
- int acquired_refs;
- int active_locks;
- struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@@ -346,7 +356,11 @@ enum {
INSN_F_SPI_MASK = 0x3f, /* 6 bits */
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
- INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
+ INSN_F_STACK_ACCESS = BIT(9),
+
+ INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
+ /* total 12 bits are used now. */
};
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
@@ -355,9 +369,9 @@ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
struct bpf_insn_hist_entry {
u32 idx;
/* insn idx can't be bigger than 1 million */
- u32 prev_idx : 22;
- /* special flags, e.g., whether insn is doing register stack spill/load */
- u32 flags : 10;
+ u32 prev_idx : 20;
+ /* special INSN_F_xxx flags */
+ u32 flags : 12;
/* additional registers that need precision tracking when this
* jump is backtracked, vector of six 10-bit records
*/
@@ -370,6 +384,8 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
+ /* Acquired reference states */
+ struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
@@ -419,14 +435,15 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;
- bool speculative;
+ u32 acquired_refs;
+ u32 active_locks;
+ u32 active_preempt_locks;
+ u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
bool active_rcu_lock;
- u32 active_preempt_lock;
- /* If this state was ever pointed-to by other state's loop_entry field
- * this flag would be set to true. Used to avoid freeing such states
- * while they are still in use.
- */
- bool used_as_loop_entry;
+
+ bool speculative;
bool in_sleepable;
/* first and last insn idx of this verifier state */
@@ -453,6 +470,11 @@ struct bpf_verifier_state {
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
+ /* If this state was ever pointed-to by other state's loop_entry field
+ * this flag would be set to true. Used to avoid freeing such states
+ * while they are still in use.
+ */
+ u32 used_as_loop_entry;
};
#define bpf_get_spilled_reg(slot, frame, mask) \
@@ -493,8 +515,10 @@ struct bpf_verifier_state {
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
- struct bpf_verifier_state_list *next;
- int miss_cnt, hit_cnt;
+ struct list_head node;
+ u32 miss_cnt;
+ u32 hit_cnt:31;
+ u32 in_free_list:1;
};
struct bpf_loop_inline_state {
@@ -571,6 +595,7 @@ struct bpf_insn_aux_data {
* bpf_fastcall pattern.
*/
u8 fastcall_spills_num:3;
+ u8 arg_prog:4;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -584,6 +609,8 @@ struct bpf_insn_aux_data {
* accepts callback function as a parameter.
*/
bool calls_callback;
+ /* registers alive before this instruction. */
+ u16 live_regs_before;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -660,6 +687,7 @@ struct bpf_subprog_info {
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
bool keep_fastcall_stack: 1;
bool changes_pkt_data: 1;
+ bool might_sleep: 1;
enum priv_stack_mode priv_stack_mode;
u8 arg_cnt;
@@ -705,8 +733,11 @@ struct bpf_verifier_env {
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
- struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_verifier_state_list *free_list;
+ /* Search pruning optimization, array of list_heads for
+ * lists of struct bpf_verifier_state_list.
+ */
+ struct list_head *explored_states;
+ struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
@@ -737,7 +768,11 @@ struct bpf_verifier_env {
struct {
int *insn_state;
int *insn_stack;
+ /* vector of instruction indexes sorted in post-order */
+ int *insn_postorder;
int cur_stack;
+ /* current position in the insn_postorder vector */
+ int cur_postorder;
} cfg;
struct backtrack_state bt;
struct bpf_insn_hist_entry *insn_hist;
@@ -762,6 +797,8 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ u32 free_list_size;
+ u32 explored_states_size;
bpfptr_t fd_array;
/* bit mask to keep track of whether a register has been accessed
@@ -806,6 +843,17 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...);
+#define verifier_bug_if(cond, env, fmt, args...) \
+ ({ \
+ bool __cond = (cond); \
+ if (unlikely(__cond)) { \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \
+ } \
+ (__cond); \
+ })
+#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args)
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@@ -980,8 +1028,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
-void print_verifier_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state, bool print_all);
-void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno);
#endif /* _LINUX_BPF_VERIFIER_H */