bpf: Move compute_insn_live_regs() into liveness.c

verifier.c is huge. Move compute_insn_live_regs() into liveness.c.

Mechanical move. No functional changes.

Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/r/20260412152936.54262-3-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2026-04-12 08:29:31 -07:00
parent 449f08fa59
commit fc150cddee
3 changed files with 250 additions and 249 deletions

View File

@@ -1204,6 +1204,7 @@ int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
int bpf_compute_live_registers(struct bpf_verifier_env *env);
#define BPF_MAP_KEY_POISON (1ULL << 63)
#define BPF_MAP_KEY_SEEN (1ULL << 62)
@@ -1234,6 +1235,7 @@ static inline u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
}
#define MAX_PACKET_OFF 0xffff
#define CALLER_SAVED_REGS 6
enum bpf_reg_arg_type {
SRC_OP, /* register is used as source operand */

View File

@@ -1953,3 +1953,250 @@ int bpf_compute_subprog_arg_access(struct bpf_verifier_env *env)
kvfree(info);
return err;
}
/* Each field is a register bitmask */
struct insn_live_regs {
u16 use; /* registers read by instruction */
u16 def; /* registers written by instruction */
u16 in; /* registers that may be alive before instruction */
u16 out; /* registers that may be alive after instruction */
};
/* Bitmask with 1s for all caller saved registers */
#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
/* Compute info->{use,def} fields for the instruction */
static void compute_insn_live_regs(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct insn_live_regs *info)
{
struct bpf_call_summary cs;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
u16 src = BIT(insn->src_reg);
u16 dst = BIT(insn->dst_reg);
u16 r0 = BIT(0);
u16 def = 0;
u16 use = 0xffff;
switch (class) {
case BPF_LD:
switch (mode) {
case BPF_IMM:
if (BPF_SIZE(insn->code) == BPF_DW) {
def = dst;
use = 0;
}
break;
case BPF_LD | BPF_ABS:
case BPF_LD | BPF_IND:
/* stick with defaults */
break;
}
break;
case BPF_LDX:
switch (mode) {
case BPF_MEM:
case BPF_MEMSX:
def = dst;
use = src;
break;
}
break;
case BPF_ST:
switch (mode) {
case BPF_MEM:
def = 0;
use = dst;
break;
}
break;
case BPF_STX:
switch (mode) {
case BPF_MEM:
def = 0;
use = dst | src;
break;
case BPF_ATOMIC:
switch (insn->imm) {
case BPF_CMPXCHG:
use = r0 | dst | src;
def = r0;
break;
case BPF_LOAD_ACQ:
def = dst;
use = src;
break;
case BPF_STORE_REL:
def = 0;
use = dst | src;
break;
default:
use = dst | src;
if (insn->imm & BPF_FETCH)
def = src;
else
def = 0;
}
break;
}
break;
case BPF_ALU:
case BPF_ALU64:
switch (code) {
case BPF_END:
use = dst;
def = dst;
break;
case BPF_MOV:
def = dst;
if (BPF_SRC(insn->code) == BPF_K)
use = 0;
else
use = src;
break;
default:
def = dst;
if (BPF_SRC(insn->code) == BPF_K)
use = dst;
else
use = dst | src;
}
break;
case BPF_JMP:
case BPF_JMP32:
switch (code) {
case BPF_JA:
def = 0;
if (BPF_SRC(insn->code) == BPF_X)
use = dst;
else
use = 0;
break;
case BPF_JCOND:
def = 0;
use = 0;
break;
case BPF_EXIT:
def = 0;
use = r0;
break;
case BPF_CALL:
def = ALL_CALLER_SAVED_REGS;
use = def & ~BIT(BPF_REG_0);
if (bpf_get_call_summary(env, insn, &cs))
use = GENMASK(cs.num_params, 1);
break;
default:
def = 0;
if (BPF_SRC(insn->code) == BPF_K)
use = dst;
else
use = dst | src;
}
break;
}
info->def = def;
info->use = use;
}
/* Compute may-live registers after each instruction in the program.
* The register is live after the instruction I if it is read by some
* instruction S following I during program execution and is not
* overwritten between I and S.
*
* Store result in env->insn_aux_data[i].live_regs.
*/
int bpf_compute_live_registers(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
struct bpf_insn *insns = env->prog->insnsi;
struct insn_live_regs *state;
int insn_cnt = env->prog->len;
int err = 0, i, j;
bool changed;
/* Use the following algorithm:
* - define the following:
* - I.use : a set of all registers read by instruction I;
* - I.def : a set of all registers written by instruction I;
* - I.in : a set of all registers that may be alive before I execution;
* - I.out : a set of all registers that may be alive after I execution;
* - insn_successors(I): a set of instructions S that might immediately
* follow I for some program execution;
* - associate separate empty sets 'I.in' and 'I.out' with each instruction;
* - visit each instruction in a postorder and update
* state[i].in, state[i].out as follows:
*
* state[i].out = U [state[s].in for S in insn_successors(i)]
* state[i].in = (state[i].out / state[i].def) U state[i].use
*
* (where U stands for set union, / stands for set difference)
* - repeat the computation while {in,out} fields changes for
* any instruction.
*/
state = kvzalloc_objs(*state, insn_cnt, GFP_KERNEL_ACCOUNT);
if (!state) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < insn_cnt; ++i)
compute_insn_live_regs(env, &insns[i], &state[i]);
/* Forward pass: resolve stack access through FP-derived pointers */
err = bpf_compute_subprog_arg_access(env);
if (err)
goto out;
changed = true;
while (changed) {
changed = false;
for (i = 0; i < env->cfg.cur_postorder; ++i) {
int insn_idx = env->cfg.insn_postorder[i];
struct insn_live_regs *live = &state[insn_idx];
struct bpf_iarray *succ;
u16 new_out = 0;
u16 new_in = 0;
succ = bpf_insn_successors(env, insn_idx);
for (int s = 0; s < succ->cnt; ++s)
new_out |= state[succ->items[s]].in;
new_in = (new_out & ~live->def) | live->use;
if (new_out != live->out || new_in != live->in) {
live->in = new_in;
live->out = new_out;
changed = true;
}
}
}
for (i = 0; i < insn_cnt; ++i)
insn_aux[i].live_regs_before = state[i].in;
if (env->log.level & BPF_LOG_LEVEL2) {
verbose(env, "Live regs before insn:\n");
for (i = 0; i < insn_cnt; ++i) {
if (env->insn_aux_data[i].scc)
verbose(env, "%3d ", env->insn_aux_data[i].scc);
else
verbose(env, " ");
verbose(env, "%3d: ", i);
for (j = BPF_REG_0; j < BPF_REG_10; ++j)
if (insn_aux[i].live_regs_before & BIT(j))
verbose(env, "%d", j);
else
verbose(env, ".");
verbose(env, " ");
bpf_verbose_insn(env, &insns[i]);
if (bpf_is_ldimm64(&insns[i]))
i++;
}
}
out:
kvfree(state);
return err;
}

View File

@@ -2144,7 +2144,6 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
return &elem->st;
}
#define CALLER_SAVED_REGS 6
static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
@@ -23461,253 +23460,6 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
return 0;
}
/* Each field is a register bitmask */
struct insn_live_regs {
u16 use; /* registers read by instruction */
u16 def; /* registers written by instruction */
u16 in; /* registers that may be alive before instruction */
u16 out; /* registers that may be alive after instruction */
};
/* Bitmask with 1s for all caller saved registers */
#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
/* Compute info->{use,def} fields for the instruction */
static void compute_insn_live_regs(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct insn_live_regs *info)
{
struct bpf_call_summary cs;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
u16 src = BIT(insn->src_reg);
u16 dst = BIT(insn->dst_reg);
u16 r0 = BIT(0);
u16 def = 0;
u16 use = 0xffff;
switch (class) {
case BPF_LD:
switch (mode) {
case BPF_IMM:
if (BPF_SIZE(insn->code) == BPF_DW) {
def = dst;
use = 0;
}
break;
case BPF_LD | BPF_ABS:
case BPF_LD | BPF_IND:
/* stick with defaults */
break;
}
break;
case BPF_LDX:
switch (mode) {
case BPF_MEM:
case BPF_MEMSX:
def = dst;
use = src;
break;
}
break;
case BPF_ST:
switch (mode) {
case BPF_MEM:
def = 0;
use = dst;
break;
}
break;
case BPF_STX:
switch (mode) {
case BPF_MEM:
def = 0;
use = dst | src;
break;
case BPF_ATOMIC:
switch (insn->imm) {
case BPF_CMPXCHG:
use = r0 | dst | src;
def = r0;
break;
case BPF_LOAD_ACQ:
def = dst;
use = src;
break;
case BPF_STORE_REL:
def = 0;
use = dst | src;
break;
default:
use = dst | src;
if (insn->imm & BPF_FETCH)
def = src;
else
def = 0;
}
break;
}
break;
case BPF_ALU:
case BPF_ALU64:
switch (code) {
case BPF_END:
use = dst;
def = dst;
break;
case BPF_MOV:
def = dst;
if (BPF_SRC(insn->code) == BPF_K)
use = 0;
else
use = src;
break;
default:
def = dst;
if (BPF_SRC(insn->code) == BPF_K)
use = dst;
else
use = dst | src;
}
break;
case BPF_JMP:
case BPF_JMP32:
switch (code) {
case BPF_JA:
def = 0;
if (BPF_SRC(insn->code) == BPF_X)
use = dst;
else
use = 0;
break;
case BPF_JCOND:
def = 0;
use = 0;
break;
case BPF_EXIT:
def = 0;
use = r0;
break;
case BPF_CALL:
def = ALL_CALLER_SAVED_REGS;
use = def & ~BIT(BPF_REG_0);
if (bpf_get_call_summary(env, insn, &cs))
use = GENMASK(cs.num_params, 1);
break;
default:
def = 0;
if (BPF_SRC(insn->code) == BPF_K)
use = dst;
else
use = dst | src;
}
break;
}
info->def = def;
info->use = use;
}
/* Compute may-live registers after each instruction in the program.
* The register is live after the instruction I if it is read by some
* instruction S following I during program execution and is not
* overwritten between I and S.
*
* Store result in env->insn_aux_data[i].live_regs.
*/
static int compute_live_registers(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
struct bpf_insn *insns = env->prog->insnsi;
struct insn_live_regs *state;
int insn_cnt = env->prog->len;
int err = 0, i, j;
bool changed;
/* Use the following algorithm:
* - define the following:
* - I.use : a set of all registers read by instruction I;
* - I.def : a set of all registers written by instruction I;
* - I.in : a set of all registers that may be alive before I execution;
* - I.out : a set of all registers that may be alive after I execution;
* - insn_successors(I): a set of instructions S that might immediately
* follow I for some program execution;
* - associate separate empty sets 'I.in' and 'I.out' with each instruction;
* - visit each instruction in a postorder and update
* state[i].in, state[i].out as follows:
*
* state[i].out = U [state[s].in for S in insn_successors(i)]
* state[i].in = (state[i].out / state[i].def) U state[i].use
*
* (where U stands for set union, / stands for set difference)
* - repeat the computation while {in,out} fields changes for
* any instruction.
*/
state = kvzalloc_objs(*state, insn_cnt, GFP_KERNEL_ACCOUNT);
if (!state) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < insn_cnt; ++i)
compute_insn_live_regs(env, &insns[i], &state[i]);
/* Forward pass: resolve stack access through FP-derived pointers */
err = bpf_compute_subprog_arg_access(env);
if (err)
goto out;
changed = true;
while (changed) {
changed = false;
for (i = 0; i < env->cfg.cur_postorder; ++i) {
int insn_idx = env->cfg.insn_postorder[i];
struct insn_live_regs *live = &state[insn_idx];
struct bpf_iarray *succ;
u16 new_out = 0;
u16 new_in = 0;
succ = bpf_insn_successors(env, insn_idx);
for (int s = 0; s < succ->cnt; ++s)
new_out |= state[succ->items[s]].in;
new_in = (new_out & ~live->def) | live->use;
if (new_out != live->out || new_in != live->in) {
live->in = new_in;
live->out = new_out;
changed = true;
}
}
}
for (i = 0; i < insn_cnt; ++i)
insn_aux[i].live_regs_before = state[i].in;
if (env->log.level & BPF_LOG_LEVEL2) {
verbose(env, "Live regs before insn:\n");
for (i = 0; i < insn_cnt; ++i) {
if (env->insn_aux_data[i].scc)
verbose(env, "%3d ", env->insn_aux_data[i].scc);
else
verbose(env, " ");
verbose(env, "%3d: ", i);
for (j = BPF_REG_0; j < BPF_REG_10; ++j)
if (insn_aux[i].live_regs_before & BIT(j))
verbose(env, "%d", j);
else
verbose(env, ".");
verbose(env, " ");
bpf_verbose_insn(env, &insns[i]);
if (bpf_is_ldimm64(&insns[i]))
i++;
}
}
out:
kvfree(state);
return err;
}
/*
* Compute strongly connected components (SCCs) on the CFG.
* Assign an SCC number to each instruction, recorded in env->insn_aux[*].scc.
@@ -24247,7 +23999,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret < 0)
goto skip_full_check;
ret = compute_live_registers(env);
ret = bpf_compute_live_registers(env);
if (ret < 0)
goto skip_full_check;