Merge branch 'bpf-prep-patches-for-static-stack-liveness'

Alexei Starovoitov says:

====================
bpf: Prep patches for static stack liveness.

v4->v5:
- minor test fixup

v3->v4:
- fixed invalid recursion detection when calback is called multiple times

v3: https://lore.kernel.org/bpf/20260402212856.86606-1-alexei.starovoitov@gmail.com/

v2->v3:
- added recursive call detection
- fixed ubsan warning
- removed double declaration in the header
- added Acks

v2: https://lore.kernel.org/bpf/20260402061744.10885-1-alexei.starovoitov@gmail.com/

v1->v2:
. fixed bugs spotted by Eduard, Mykyta, claude and gemini
. fixed selftests that were failing in unpriv
. gemini(sashiko) found several precision improvements in patch 6,
  but they made no difference in real programs.

v1: https://lore.kernel.org/bpf/20260401021635.34636-1-alexei.starovoitov@gmail.com/
First 6 prep patches for static stack liveness.

. do src/dst_reg validation early and remove defensive checks

. sort subprog in topo order. We wanted to do this long ago
  to process global subprogs this way and in other cases.

. Add constant folding pass that computes map_ptr, subprog_idx,
  loads from readonly maps, and other constants that fit into 32-bit

. Use these constants to eliminate dead code. Replace predicted
  conditional branches with "jmp always". That reduces JIT prog size.

. Add two helpers that return access size from their arguments.
====================

Link: https://patch.msgid.link/20260403024422.87231-1-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov
2026-04-03 08:33:48 -07:00
11 changed files with 1057 additions and 98 deletions

View File

@@ -595,6 +595,18 @@ struct bpf_insn_aux_data {
u32 scc;
/* registers alive before this instruction. */
u16 live_regs_before;
/*
* Bitmask of R0-R9 that hold known values at this instruction.
* const_reg_mask: scalar constants that fit in 32 bits.
* const_reg_map_mask: map pointers, val is map_index into used_maps[].
* const_reg_subprog_mask: subprog pointers, val is subprog number.
* const_reg_vals[i] holds the 32-bit value for register i.
* Populated by compute_const_regs() pre-pass.
*/
u16 const_reg_mask;
u16 const_reg_map_mask;
u16 const_reg_subprog_mask;
u32 const_reg_vals[10];
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -787,6 +799,8 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
/* subprog indices sorted in topological order: leaves first, callers last */
int subprog_topo_order[BPF_MAX_SUBPROGS + 2];
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
@@ -865,6 +879,30 @@ static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env
return &env->subprog_info[subprog];
}
struct bpf_call_summary {
u8 num_params;
bool is_void;
bool fastcall;
};
static inline bool bpf_helper_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == 0;
}
static inline bool bpf_pseudo_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == BPF_PSEUDO_CALL;
}
static inline bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
}
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
@@ -943,6 +981,10 @@ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
int mark_chain_precision(struct bpf_verifier_env *env, int regno);
bool bpf_map_is_rdonly(const struct bpf_map *map);
int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
bool is_ldsx);
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */
@@ -1086,6 +1128,23 @@ struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
int bpf_find_subprog(struct bpf_verifier_env *env, int off);
int bpf_compute_const_regs(struct bpf_verifier_env *env);
int bpf_prune_dead_branches(struct bpf_verifier_env *env);
int bpf_compute_postorder(struct bpf_verifier_env *env);
bool bpf_insn_is_cond_jump(u8 code);
bool bpf_is_may_goto_insn(struct bpf_insn *insn);
void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn);
bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
struct bpf_call_summary *cs);
s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env,
struct bpf_insn *insn, int arg,
int insn_idx);
s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env,
struct bpf_insn *insn, int arg,
int insn_idx);
int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
int bpf_update_live_stack(struct bpf_verifier_env *env);

View File

@@ -6,7 +6,7 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += -Wno-override-init $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o liveness.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o liveness.o const_fold.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o bpf_insn_array.o

396
kernel/bpf/const_fold.c Normal file
View File

@@ -0,0 +1,396 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf_verifier.h>
/*
* Forward dataflow analysis to determine constant register values at every
* instruction. Tracks 64-bit constant values in R0-R9 through the program,
* using a fixed-point iteration in reverse postorder. Records which registers
* hold known constants and their values in
* env->insn_aux_data[].{const_reg_mask, const_reg_vals}.
*/
enum const_arg_state {
CONST_ARG_UNVISITED, /* instruction not yet reached */
CONST_ARG_UNKNOWN, /* register value not a known constant */
CONST_ARG_CONST, /* register holds a known 64-bit constant */
CONST_ARG_MAP_PTR, /* register holds a map pointer, map_index is set */
CONST_ARG_MAP_VALUE, /* register points to map value data, val is offset */
CONST_ARG_SUBPROG, /* register holds a subprog pointer, val is subprog number */
};
struct const_arg_info {
enum const_arg_state state;
u32 map_index;
u64 val;
};
static bool ci_is_unvisited(const struct const_arg_info *ci)
{
return ci->state == CONST_ARG_UNVISITED;
}
static bool ci_is_unknown(const struct const_arg_info *ci)
{
return ci->state == CONST_ARG_UNKNOWN;
}
static bool ci_is_const(const struct const_arg_info *ci)
{
return ci->state == CONST_ARG_CONST;
}
static bool ci_is_map_value(const struct const_arg_info *ci)
{
return ci->state == CONST_ARG_MAP_VALUE;
}
/* Transfer function: compute output register state from instruction. */
static void const_reg_xfer(struct bpf_verifier_env *env, struct const_arg_info *ci_out,
struct bpf_insn *insn, struct bpf_insn *insns, int idx)
{
struct const_arg_info unknown = { .state = CONST_ARG_UNKNOWN, .val = 0 };
struct const_arg_info *dst = &ci_out[insn->dst_reg];
struct const_arg_info *src = &ci_out[insn->src_reg];
u8 class = BPF_CLASS(insn->code);
u8 mode = BPF_MODE(insn->code);
u8 opcode = BPF_OP(insn->code) | BPF_SRC(insn->code);
int r;
switch (class) {
case BPF_ALU:
case BPF_ALU64:
switch (opcode) {
case BPF_MOV | BPF_K:
dst->state = CONST_ARG_CONST;
dst->val = (s64)insn->imm;
break;
case BPF_MOV | BPF_X:
*dst = *src;
if (!insn->off)
break;
if (!ci_is_const(dst)) {
*dst = unknown;
break;
}
switch (insn->off) {
case 8: dst->val = (s8)dst->val; break;
case 16: dst->val = (s16)dst->val; break;
case 32: dst->val = (s32)dst->val; break;
default: *dst = unknown; break;
}
break;
case BPF_ADD | BPF_K:
if (!ci_is_const(dst) && !ci_is_map_value(dst)) {
*dst = unknown;
break;
}
dst->val += insn->imm;
break;
case BPF_SUB | BPF_K:
if (!ci_is_const(dst) && !ci_is_map_value(dst)) {
*dst = unknown;
break;
}
dst->val -= insn->imm;
break;
case BPF_AND | BPF_K:
if (!ci_is_const(dst)) {
if (!insn->imm) {
dst->state = CONST_ARG_CONST;
dst->val = 0;
} else {
*dst = unknown;
}
break;
}
dst->val &= (s64)insn->imm;
break;
case BPF_AND | BPF_X:
if (ci_is_const(dst) && dst->val == 0)
break; /* 0 & x == 0 */
if (ci_is_const(src) && src->val == 0) {
dst->state = CONST_ARG_CONST;
dst->val = 0;
break;
}
if (!ci_is_const(dst) || !ci_is_const(src)) {
*dst = unknown;
break;
}
dst->val &= src->val;
break;
default:
*dst = unknown;
break;
}
if (class == BPF_ALU) {
if (ci_is_const(dst))
dst->val = (u32)dst->val;
else if (!ci_is_unknown(dst))
*dst = unknown;
}
break;
case BPF_LD:
if (mode == BPF_ABS || mode == BPF_IND)
goto process_call;
if (mode != BPF_IMM || BPF_SIZE(insn->code) != BPF_DW)
break;
if (insn->src_reg == BPF_PSEUDO_FUNC) {
int subprog = bpf_find_subprog(env, idx + insn->imm + 1);
if (subprog >= 0) {
dst->state = CONST_ARG_SUBPROG;
dst->val = subprog;
} else {
*dst = unknown;
}
} else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
dst->state = CONST_ARG_MAP_VALUE;
dst->map_index = env->insn_aux_data[idx].map_index;
dst->val = env->insn_aux_data[idx].map_off;
} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
insn->src_reg == BPF_PSEUDO_MAP_IDX) {
dst->state = CONST_ARG_MAP_PTR;
dst->map_index = env->insn_aux_data[idx].map_index;
} else if (insn->src_reg == 0) {
dst->state = CONST_ARG_CONST;
dst->val = (u64)(u32)insn->imm | ((u64)(u32)insns[idx + 1].imm << 32);
} else {
*dst = unknown;
}
break;
case BPF_LDX:
if (!ci_is_map_value(src)) {
*dst = unknown;
break;
}
struct bpf_map *map = env->used_maps[src->map_index];
int size = bpf_size_to_bytes(BPF_SIZE(insn->code));
bool is_ldsx = mode == BPF_MEMSX;
int off = src->val + insn->off;
u64 val = 0;
if (!bpf_map_is_rdonly(map) || !map->ops->map_direct_value_addr ||
map->map_type == BPF_MAP_TYPE_INSN_ARRAY ||
off < 0 || off + size > map->value_size ||
bpf_map_direct_read(map, off, size, &val, is_ldsx)) {
*dst = unknown;
break;
}
dst->state = CONST_ARG_CONST;
dst->val = val;
break;
case BPF_JMP:
if (opcode != BPF_CALL)
break;
process_call:
for (r = BPF_REG_0; r <= BPF_REG_5; r++)
ci_out[r] = unknown;
break;
case BPF_STX:
if (mode != BPF_ATOMIC)
break;
if (insn->imm == BPF_CMPXCHG)
ci_out[BPF_REG_0] = unknown;
else if (insn->imm == BPF_LOAD_ACQ)
*dst = unknown;
else if (insn->imm & BPF_FETCH)
*src = unknown;
break;
}
}
/* Join function: merge output state into a successor's input state. */
static bool const_reg_join(struct const_arg_info *ci_target,
struct const_arg_info *ci_out)
{
bool changed = false;
int r;
for (r = 0; r < MAX_BPF_REG; r++) {
struct const_arg_info *old = &ci_target[r];
struct const_arg_info *new = &ci_out[r];
if (ci_is_unvisited(old) && !ci_is_unvisited(new)) {
ci_target[r] = *new;
changed = true;
} else if (!ci_is_unknown(old) && !ci_is_unvisited(old) &&
(new->state != old->state || new->val != old->val ||
new->map_index != old->map_index)) {
old->state = CONST_ARG_UNKNOWN;
changed = true;
}
}
return changed;
}
int bpf_compute_const_regs(struct bpf_verifier_env *env)
{
struct const_arg_info unknown = { .state = CONST_ARG_UNKNOWN, .val = 0 };
struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
struct const_arg_info (*ci_in)[MAX_BPF_REG];
struct const_arg_info ci_out[MAX_BPF_REG];
struct bpf_iarray *succ;
bool changed;
int i, r;
/* kvzalloc zeroes memory, so all entries start as CONST_ARG_UNVISITED (0) */
ci_in = kvzalloc_objs(*ci_in, insn_cnt, GFP_KERNEL_ACCOUNT);
if (!ci_in)
return -ENOMEM;
/* Subprogram entries (including main at subprog 0): all registers unknown */
for (i = 0; i < env->subprog_cnt; i++) {
int start = env->subprog_info[i].start;
for (r = 0; r < MAX_BPF_REG; r++)
ci_in[start][r] = unknown;
}
redo:
changed = false;
for (i = env->cfg.cur_postorder - 1; i >= 0; i--) {
int idx = env->cfg.insn_postorder[i];
struct bpf_insn *insn = &insns[idx];
struct const_arg_info *ci = ci_in[idx];
memcpy(ci_out, ci, sizeof(ci_out));
const_reg_xfer(env, ci_out, insn, insns, idx);
succ = bpf_insn_successors(env, idx);
for (int s = 0; s < succ->cnt; s++)
changed |= const_reg_join(ci_in[succ->items[s]], ci_out);
}
if (changed)
goto redo;
/* Save computed constants into insn_aux[] if they fit into 32-bit */
for (i = 0; i < insn_cnt; i++) {
u16 mask = 0, map_mask = 0, subprog_mask = 0;
struct bpf_insn_aux_data *aux = &insn_aux[i];
struct const_arg_info *ci = ci_in[i];
for (r = BPF_REG_0; r < ARRAY_SIZE(aux->const_reg_vals); r++) {
struct const_arg_info *c = &ci[r];
switch (c->state) {
case CONST_ARG_CONST: {
u64 val = c->val;
if (val != (u32)val)
break;
mask |= BIT(r);
aux->const_reg_vals[r] = val;
break;
}
case CONST_ARG_MAP_PTR:
map_mask |= BIT(r);
aux->const_reg_vals[r] = c->map_index;
break;
case CONST_ARG_SUBPROG:
subprog_mask |= BIT(r);
aux->const_reg_vals[r] = c->val;
break;
default:
break;
}
}
aux->const_reg_mask = mask;
aux->const_reg_map_mask = map_mask;
aux->const_reg_subprog_mask = subprog_mask;
}
kvfree(ci_in);
return 0;
}
static int eval_const_branch(u8 opcode, u64 dst_val, u64 src_val)
{
switch (BPF_OP(opcode)) {
case BPF_JEQ: return dst_val == src_val;
case BPF_JNE: return dst_val != src_val;
case BPF_JGT: return dst_val > src_val;
case BPF_JGE: return dst_val >= src_val;
case BPF_JLT: return dst_val < src_val;
case BPF_JLE: return dst_val <= src_val;
case BPF_JSGT: return (s64)dst_val > (s64)src_val;
case BPF_JSGE: return (s64)dst_val >= (s64)src_val;
case BPF_JSLT: return (s64)dst_val < (s64)src_val;
case BPF_JSLE: return (s64)dst_val <= (s64)src_val;
case BPF_JSET: return (bool)(dst_val & src_val);
default: return -1;
}
}
/*
* Rewrite conditional branches with constant outcomes into unconditional
* jumps using register values resolved by bpf_compute_const_regs() pass.
* This eliminates dead edges from the CFG so that compute_live_registers()
* doesn't propagate liveness through dead code.
*/
int bpf_prune_dead_branches(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
bool changed = false;
int i;
for (i = 0; i < insn_cnt; i++) {
struct bpf_insn_aux_data *aux = &insn_aux[i];
struct bpf_insn *insn = &insns[i];
u8 class = BPF_CLASS(insn->code);
u64 dst_val, src_val;
int taken;
if (!bpf_insn_is_cond_jump(insn->code))
continue;
if (bpf_is_may_goto_insn(insn))
continue;
if (!(aux->const_reg_mask & BIT(insn->dst_reg)))
continue;
dst_val = aux->const_reg_vals[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_K) {
src_val = insn->imm;
} else {
if (!(aux->const_reg_mask & BIT(insn->src_reg)))
continue;
src_val = aux->const_reg_vals[insn->src_reg];
}
if (class == BPF_JMP32) {
/*
* The (s32) cast maps the 32-bit range into two u64 sub-ranges:
* [0x00000000, 0x7FFFFFFF] -> [0x0000000000000000, 0x000000007FFFFFFF]
* [0x80000000, 0xFFFFFFFF] -> [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
* The ordering is preserved within each sub-range, and
* the second sub-range is above the first as u64.
*/
dst_val = (s32)dst_val;
src_val = (s32)src_val;
}
taken = eval_const_branch(insn->code, dst_val, src_val);
if (taken < 0) {
bpf_log(&env->log, "Unknown conditional jump %x\n", insn->code);
return -EFAULT;
}
*insn = BPF_JMP_A(taken ? insn->off : 0);
changed = true;
}
if (!changed)
return 0;
/* recompute postorder, since CFG has changed */
kvfree(env->cfg.insn_postorder);
env->cfg.insn_postorder = NULL;
return bpf_compute_postorder(env);
}

View File

@@ -256,24 +256,6 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
static bool bpf_helper_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == 0;
}
static bool bpf_pseudo_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == BPF_PSEUDO_CALL;
}
static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
}
struct bpf_map_desc {
struct bpf_map *ptr;
int uid;
@@ -595,14 +577,14 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn
return false;
}
static bool is_may_goto_insn(struct bpf_insn *insn)
bool bpf_is_may_goto_insn(struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO;
}
static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
{
return is_may_goto_insn(&env->prog->insnsi[insn_idx]);
return bpf_is_may_goto_insn(&env->prog->insnsi[insn_idx]);
}
static bool is_storage_get_function(enum bpf_func_id func_id)
@@ -2256,13 +2238,6 @@ static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf
static void mark_reg_known_zero(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs */
for (regno = 0; regno < MAX_BPF_REG; regno++)
__mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_known_zero(regs + regno);
}
@@ -2936,13 +2911,6 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_unknown(env, regs + regno);
}
@@ -2975,13 +2943,6 @@ static void __mark_reg_not_init(const struct bpf_verifier_env *env,
static void mark_reg_not_init(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
if (WARN_ON(regno >= MAX_BPF_REG)) {
verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
/* Something bad happened, let's kill all regs except FP */
for (regno = 0; regno < BPF_REG_FP; regno++)
__mark_reg_not_init(env, regs + regno);
return;
}
__mark_reg_not_init(env, regs + regno);
}
@@ -3131,7 +3092,7 @@ struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *en
}
/* Find subprogram that starts exactly at 'off' */
static int find_subprog(struct bpf_verifier_env *env, int off)
int bpf_find_subprog(struct bpf_verifier_env *env, int off)
{
struct bpf_subprog_info *p;
@@ -3150,7 +3111,7 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
verbose(env, "call to invalid destination\n");
return -EINVAL;
}
ret = find_subprog(env, off);
ret = bpf_find_subprog(env, off);
if (ret >= 0)
return ret;
if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
@@ -3791,6 +3752,94 @@ static int check_subprogs(struct bpf_verifier_env *env)
return 0;
}
/*
* Sort subprogs in topological order so that leaf subprogs come first and
* their callers come later. This is a DFS post-order traversal of the call
* graph. Scan only reachable instructions (those in the computed postorder) of
* the current subprog to discover callees (direct subprogs and sync
* callbacks).
*/
static int sort_subprogs_topo(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *si = env->subprog_info;
int *insn_postorder = env->cfg.insn_postorder;
struct bpf_insn *insn = env->prog->insnsi;
int cnt = env->subprog_cnt;
int *dfs_stack = NULL;
int top = 0, order = 0;
int i, ret = 0;
u8 *color = NULL;
color = kvzalloc_objs(*color, cnt, GFP_KERNEL_ACCOUNT);
dfs_stack = kvmalloc_objs(*dfs_stack, cnt, GFP_KERNEL_ACCOUNT);
if (!color || !dfs_stack) {
ret = -ENOMEM;
goto out;
}
/*
* DFS post-order traversal.
* Color values: 0 = unvisited, 1 = on stack, 2 = done.
*/
for (i = 0; i < cnt; i++) {
if (color[i])
continue;
color[i] = 1;
dfs_stack[top++] = i;
while (top > 0) {
int cur = dfs_stack[top - 1];
int po_start = si[cur].postorder_start;
int po_end = si[cur + 1].postorder_start;
bool pushed = false;
int j;
for (j = po_start; j < po_end; j++) {
int idx = insn_postorder[j];
int callee;
if (!bpf_pseudo_call(&insn[idx]) && !bpf_pseudo_func(&insn[idx]))
continue;
callee = bpf_find_subprog(env, idx + insn[idx].imm + 1);
if (callee < 0) {
ret = -EFAULT;
goto out;
}
if (color[callee] == 2)
continue;
if (color[callee] == 1) {
if (bpf_pseudo_func(&insn[idx]))
continue;
verbose(env, "recursive call from %s() to %s()\n",
subprog_name(env, cur),
subprog_name(env, callee));
ret = -EINVAL;
goto out;
}
color[callee] = 1;
dfs_stack[top++] = callee;
pushed = true;
break;
}
if (!pushed) {
color[cur] = 2;
env->subprog_topo_order[order++] = cur;
top--;
}
}
}
if (env->log.level & BPF_LOG_LEVEL2)
for (i = 0; i < cnt; i++)
verbose(env, "topo_order[%d] = %s\n",
i, subprog_name(env, env->subprog_topo_order[i]));
out:
kvfree(dfs_stack);
kvfree(color);
return ret;
}
static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi, int nr_slots)
{
@@ -3986,11 +4035,6 @@ static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *r
struct bpf_reg_state *reg;
bool rw64;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
return -EINVAL;
}
mark_reg_scratched(env, regno);
reg = &regs[regno];
@@ -4235,7 +4279,7 @@ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
return btf_name_by_offset(desc_btf, func->name_off);
}
static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
void bpf_verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
const struct bpf_insn_cbs cbs = {
.cb_call = disasm_kfunc_name,
@@ -4459,7 +4503,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
verbose(env, "stack=%s before ", env->tmp_str_buf);
verbose(env, "%d: ", idx);
verbose_insn(env, insn);
bpf_verbose_insn(env, insn);
}
/* If there is a history record that some registers gained range at this insn,
@@ -4562,7 +4606,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
int subprog_insn_idx, subprog;
subprog_insn_idx = idx + insn->imm + 1;
subprog = find_subprog(env, subprog_insn_idx);
subprog = bpf_find_subprog(env, subprog_insn_idx);
if (subprog < 0)
return -EFAULT;
@@ -6894,7 +6938,7 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx,
/* find the callee */
next_insn = i + insn[i].imm + 1;
sidx = find_subprog(env, next_insn);
sidx = bpf_find_subprog(env, next_insn);
if (verifier_bug_if(sidx < 0, env, "callee not found at insn %d", next_insn))
return -EFAULT;
if (subprog[sidx].is_async_cb) {
@@ -7029,7 +7073,7 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
{
int start = idx + insn->imm + 1, subprog;
subprog = find_subprog(env, start);
subprog = bpf_find_subprog(env, start);
if (verifier_bug_if(subprog < 0, env, "get stack depth: no program at insn %d", start))
return -EFAULT;
return env->subprog_info[subprog].stack_depth;
@@ -7276,7 +7320,7 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
set_sext32_default_val(reg, size);
}
static bool bpf_map_is_rdonly(const struct bpf_map *map)
bool bpf_map_is_rdonly(const struct bpf_map *map)
{
/* A map is considered read-only if the following condition are true:
*
@@ -7296,8 +7340,8 @@ static bool bpf_map_is_rdonly(const struct bpf_map *map)
!bpf_map_write_active(map);
}
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
bool is_ldsx)
int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
bool is_ldsx)
{
void *ptr;
u64 addr;
@@ -10987,7 +11031,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int err, subprog, target_insn;
target_insn = *insn_idx + insn->imm + 1;
subprog = find_subprog(env, target_insn);
subprog = bpf_find_subprog(env, target_insn);
if (verifier_bug_if(subprog < 0, env, "target of func call at insn %d is not a program",
target_insn))
return -EFAULT;
@@ -14088,6 +14132,194 @@ static int fetch_kfunc_arg_meta(struct bpf_verifier_env *env,
return 0;
}
/*
* Determine how many bytes a helper accesses through a stack pointer at
* argument position @arg (0-based, corresponding to R1-R5).
*
* Returns:
* > 0 known read access size in bytes
* 0 doesn't read anything directly
* S64_MIN unknown
* < 0 known write access of (-return) bytes
*/
s64 bpf_helper_stack_access_bytes(struct bpf_verifier_env *env, struct bpf_insn *insn,
int arg, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
const struct bpf_func_proto *fn;
enum bpf_arg_type at;
s64 size;
if (get_helper_proto(env, insn->imm, &fn) < 0)
return S64_MIN;
at = fn->arg_type[arg];
switch (base_type(at)) {
case ARG_PTR_TO_MAP_KEY:
case ARG_PTR_TO_MAP_VALUE: {
bool is_key = base_type(at) == ARG_PTR_TO_MAP_KEY;
u64 val;
int i, map_reg;
for (i = 0; i < arg; i++) {
if (base_type(fn->arg_type[i]) == ARG_CONST_MAP_PTR)
break;
}
if (i >= arg)
goto scan_all_maps;
map_reg = BPF_REG_1 + i;
if (!(aux->const_reg_map_mask & BIT(map_reg)))
goto scan_all_maps;
i = aux->const_reg_vals[map_reg];
if (i < env->used_map_cnt) {
size = is_key ? env->used_maps[i]->key_size
: env->used_maps[i]->value_size;
goto out;
}
scan_all_maps:
/*
* Map pointer is not known at this call site (e.g. different
* maps on merged paths). Conservatively return the largest
* key_size or value_size across all maps used by the program.
*/
val = 0;
for (i = 0; i < env->used_map_cnt; i++) {
struct bpf_map *map = env->used_maps[i];
u32 sz = is_key ? map->key_size : map->value_size;
if (sz > val)
val = sz;
if (map->inner_map_meta) {
sz = is_key ? map->inner_map_meta->key_size
: map->inner_map_meta->value_size;
if (sz > val)
val = sz;
}
}
if (!val)
return S64_MIN;
size = val;
goto out;
}
case ARG_PTR_TO_MEM:
if (at & MEM_FIXED_SIZE) {
size = fn->arg_size[arg];
goto out;
}
if (arg + 1 < ARRAY_SIZE(fn->arg_type) &&
arg_type_is_mem_size(fn->arg_type[arg + 1])) {
int size_reg = BPF_REG_1 + arg + 1;
if (aux->const_reg_mask & BIT(size_reg)) {
size = (s64)aux->const_reg_vals[size_reg];
goto out;
}
/*
* Size arg is const on each path but differs across merged
* paths. MAX_BPF_STACK is a safe upper bound for reads.
*/
if (at & MEM_UNINIT)
return 0;
return MAX_BPF_STACK;
}
return S64_MIN;
case ARG_PTR_TO_DYNPTR:
size = BPF_DYNPTR_SIZE;
break;
case ARG_PTR_TO_STACK:
/*
* Only used by bpf_calls_callback() helpers. The helper itself
* doesn't access stack. The callback subprog does and it's
* analyzed separately.
*/
return 0;
default:
return S64_MIN;
}
out:
/*
* MEM_UNINIT args are write-only: the helper initializes the
* buffer without reading it.
*/
if (at & MEM_UNINIT)
return -size;
return size;
}
/*
* Determine how many bytes a kfunc accesses through a stack pointer at
* argument position @arg (0-based, corresponding to R1-R5).
*
* Returns:
* > 0 known read access size in bytes
* 0 doesn't access memory through that argument (ex: not a pointer)
* S64_MIN unknown
* < 0 known write access of (-return) bytes
*/
s64 bpf_kfunc_stack_access_bytes(struct bpf_verifier_env *env, struct bpf_insn *insn,
int arg, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
struct bpf_kfunc_call_arg_meta meta;
const struct btf_param *args;
const struct btf_type *t, *ref_t;
const struct btf *btf;
u32 nargs, type_size;
s64 size;
if (fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta) < 0)
return S64_MIN;
btf = meta.btf;
args = btf_params(meta.func_proto);
nargs = btf_type_vlen(meta.func_proto);
if (arg >= nargs)
return 0;
t = btf_type_skip_modifiers(btf, args[arg].type, NULL);
if (!btf_type_is_ptr(t))
return 0;
/* dynptr: fixed 16-byte on-stack representation */
if (is_kfunc_arg_dynptr(btf, &args[arg])) {
size = BPF_DYNPTR_SIZE;
goto out;
}
/* ptr + __sz/__szk pair: size is in the next register */
if (arg + 1 < nargs &&
(btf_param_match_suffix(btf, &args[arg + 1], "__sz") ||
btf_param_match_suffix(btf, &args[arg + 1], "__szk"))) {
int size_reg = BPF_REG_1 + arg + 1;
if (aux->const_reg_mask & BIT(size_reg)) {
size = (s64)aux->const_reg_vals[size_reg];
goto out;
}
return MAX_BPF_STACK;
}
/* fixed-size pointed-to type: resolve via BTF */
ref_t = btf_type_skip_modifiers(btf, t->type, NULL);
if (!IS_ERR(btf_resolve_size(btf, ref_t, &type_size))) {
size = type_size;
goto out;
}
return S64_MIN;
out:
/* KF_ITER_NEW kfuncs initialize the iterator state at arg 0 */
if (arg == 0 && meta.kfunc_flags & KF_ITER_NEW)
return -size;
if (is_kfunc_arg_uninit(btf, &args[arg]))
return -size;
return size;
}
/* check special kfuncs and return:
* 1 - not fall-through to 'else' branch, continue verification
* 0 - fall-through to 'else' branch
@@ -17918,8 +18150,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (insn->src_reg == BPF_PSEUDO_FUNC) {
struct bpf_prog_aux *aux = env->prog->aux;
u32 subprogno = find_subprog(env,
env->insn_idx + insn->imm + 1);
u32 subprogno = bpf_find_subprog(env,
env->insn_idx + insn->imm + 1);
if (!aux->func_info) {
verbose(env, "missing btf func_info\n");
@@ -18520,17 +18752,11 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
}
}
struct call_summary {
u8 num_params;
bool is_void;
bool fastcall;
};
/* If @call is a kfunc or helper call, fills @cs and returns true,
* otherwise returns false.
*/
static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
struct call_summary *cs)
bool bpf_get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
struct bpf_call_summary *cs)
{
struct bpf_kfunc_call_arg_meta meta;
const struct bpf_func_proto *fn;
@@ -18651,12 +18877,12 @@ static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
struct bpf_insn *call = &env->prog->insnsi[insn_idx];
u32 clobbered_regs_mask;
struct call_summary cs;
struct bpf_call_summary cs;
u32 expected_regs_mask;
s16 off;
int i;
if (!get_call_summary(env, call, &cs))
if (!bpf_get_call_summary(env, call, &cs))
return;
/* A bitmask specifying which caller saved registers are clobbered
@@ -19115,7 +19341,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
default:
/* conditional jump with two edges */
mark_prune_point(env, t);
if (is_may_goto_insn(insn))
if (bpf_is_may_goto_insn(insn))
mark_force_checkpoint(env, t);
ret = push_insn(t, t + 1, FALLTHROUGH, env);
@@ -19222,7 +19448,7 @@ static int check_cfg(struct bpf_verifier_env *env)
* [env->subprog_info[i].postorder_start, env->subprog_info[i+1].postorder_start)
* with indices of 'i' instructions in postorder.
*/
static int compute_postorder(struct bpf_verifier_env *env)
int bpf_compute_postorder(struct bpf_verifier_env *env)
{
u32 cur_postorder, i, top, stack_sz, s;
int *stack = NULL, *postorder = NULL, *state = NULL;
@@ -21516,7 +21742,7 @@ static int do_check(struct bpf_verifier_env *env)
verbose_linfo(env, env->insn_idx, "; ");
env->prev_log_pos = env->log.end_pos;
verbose(env, "%d: ", env->insn_idx);
verbose_insn(env, insn);
bpf_verbose_insn(env, insn);
env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
env->prev_log_pos = env->log.end_pos;
}
@@ -21531,6 +21757,27 @@ static int do_check(struct bpf_verifier_env *env)
sanitize_mark_insn_seen(env);
prev_insn_idx = env->insn_idx;
/* Sanity check: precomputed constants must match verifier state */
if (!state->speculative && insn_aux->const_reg_mask) {
struct bpf_reg_state *regs = cur_regs(env);
u16 mask = insn_aux->const_reg_mask;
for (int r = 0; r < ARRAY_SIZE(insn_aux->const_reg_vals); r++) {
u32 cval = insn_aux->const_reg_vals[r];
if (!(mask & BIT(r)))
continue;
if (regs[r].type != SCALAR_VALUE)
continue;
if (!tnum_is_const(regs[r].var_off))
continue;
if (verifier_bug_if((u32)regs[r].var_off.value != cval,
env, "const R%d: %u != %llu",
r, cval, regs[r].var_off.value))
return -EFAULT;
}
}
/* Reduce verification complexity by stopping speculative path
* verification when a nospec is encountered.
*/
@@ -21999,6 +22246,14 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
return err;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->dst_reg >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", insn->dst_reg);
return -EINVAL;
}
if (insn->src_reg >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", insn->src_reg);
return -EINVAL;
}
if (BPF_CLASS(insn->code) == BPF_LDX &&
((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) ||
insn->imm != 0)) {
@@ -22512,7 +22767,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
}
}
static bool insn_is_cond_jump(u8 code)
bool bpf_insn_is_cond_jump(u8 code)
{
u8 op;
@@ -22535,7 +22790,7 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
int i;
for (i = 0; i < insn_cnt; i++, insn++) {
if (!insn_is_cond_jump(insn->code))
if (!bpf_insn_is_cond_jump(insn->code))
continue;
if (!aux_data[i + 1].seen)
@@ -23031,7 +23286,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
* need a hard reject of the program. Thus -EFAULT is
* propagated in any case.
*/
subprog = find_subprog(env, i + insn->imm + 1);
subprog = bpf_find_subprog(env, i + insn->imm + 1);
if (verifier_bug_if(subprog < 0, env, "No program to jit at insn %d",
i + insn->imm + 1))
return -EFAULT;
@@ -23246,7 +23501,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (!bpf_pseudo_call(insn))
continue;
insn->off = env->insn_aux_data[i].call_imm;
subprog = find_subprog(env, i + insn->off + 1);
subprog = bpf_find_subprog(env, i + insn->off + 1);
insn->imm = subprog;
}
@@ -23857,7 +24112,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto next_insn;
}
if (is_may_goto_insn(insn) && bpf_jit_supports_timed_may_goto()) {
if (bpf_is_may_goto_insn(insn) && bpf_jit_supports_timed_may_goto()) {
int stack_off_cnt = -stack_depth - 16;
/*
@@ -23900,7 +24155,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
goto next_insn;
} else if (is_may_goto_insn(insn)) {
} else if (bpf_is_may_goto_insn(insn)) {
int stack_off = -stack_depth - 8;
stack_depth_extra = 8;
@@ -25794,7 +26049,7 @@ static void compute_insn_live_regs(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct insn_live_regs *info)
{
struct call_summary cs;
struct bpf_call_summary cs;
u8 class = BPF_CLASS(insn->code);
u8 code = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
@@ -25909,7 +26164,7 @@ static void compute_insn_live_regs(struct bpf_verifier_env *env,
case BPF_CALL:
def = ALL_CALLER_SAVED_REGS;
use = def & ~BIT(BPF_REG_0);
if (get_call_summary(env, insn, &cs))
if (bpf_get_call_summary(env, insn, &cs))
use = GENMASK(cs.num_params, 1);
break;
default:
@@ -26009,7 +26264,7 @@ static int compute_live_registers(struct bpf_verifier_env *env)
else
verbose(env, ".");
verbose(env, " ");
verbose_insn(env, &insns[i]);
bpf_verbose_insn(env, &insns[i]);
if (bpf_is_ldimm64(&insns[i]))
i++;
}
@@ -26326,7 +26581,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret < 0)
goto skip_full_check;
ret = compute_postorder(env);
ret = bpf_compute_postorder(env);
if (ret < 0)
goto skip_full_check;
@@ -26338,6 +26593,18 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret)
goto skip_full_check;
ret = bpf_compute_const_regs(env);
if (ret < 0)
goto skip_full_check;
ret = bpf_prune_dead_branches(env);
if (ret < 0)
goto skip_full_check;
ret = sort_subprogs_topo(env);
if (ret < 0)
goto skip_full_check;
ret = compute_scc(env);
if (ret < 0)
goto skip_full_check;

View File

@@ -93,6 +93,7 @@
#include "verifier_stack_ptr.skel.h"
#include "verifier_store_release.skel.h"
#include "verifier_subprog_precision.skel.h"
#include "verifier_subprog_topo.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_tailcall.skel.h"
#include "verifier_tailcall_jit.skel.h"
@@ -238,6 +239,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_store_release(void) { RUN(verifier_store_release); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subprog_topo(void) { RUN(verifier_subprog_topo); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_tailcall(void) { RUN(verifier_tailcall); }
void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }

View File

@@ -138,8 +138,7 @@ l0_%=: exit; \
SEC("tracepoint")
__description("bounded recursion")
__failure
/* verifier limitation in detecting max stack depth */
__msg("the call stack of 8 frames is too deep !")
__msg("recursive call from")
__naked void bounded_recursion(void)
{
asm volatile (" \

View File

@@ -592,10 +592,10 @@ __naked void check_ids_in_regsafe_2(void)
*/
SEC("socket")
__success __log_level(2)
__msg("11: (1d) if r3 == r4 goto pc+0")
__msg("14: (1d) if r3 == r4 goto pc+0")
__msg("frame 0: propagating r3,r4")
__msg("11: safe")
__msg("processed 15 insns")
__msg("14: safe")
__msg("processed 18 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const(void)
{
@@ -605,6 +605,7 @@ __naked void no_scalar_id_for_const(void)
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"r1 = 0;"
"r1 ^= r1;" /* prevent bpf_prune_dead_branches from folding the branch */
"r1 = r1;"
"r3 = r1;"
"r4 = r1;"
@@ -612,7 +613,9 @@ __naked void no_scalar_id_for_const(void)
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"r1 = 0;"
"r1 ^= r1;"
"r2 = 0;"
"r2 ^= r2;"
"r3 = r1;"
"r4 = r2;"
"l1_%=:"
@@ -628,10 +631,10 @@ __naked void no_scalar_id_for_const(void)
/* Same as no_scalar_id_for_const() but for 32-bit values */
SEC("socket")
__success __log_level(2)
__msg("11: (1e) if w3 == w4 goto pc+0")
__msg("14: (1e) if w3 == w4 goto pc+0")
__msg("frame 0: propagating r3,r4")
__msg("11: safe")
__msg("processed 15 insns")
__msg("14: safe")
__msg("processed 18 insns")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void no_scalar_id_for_const32(void)
{
@@ -641,6 +644,7 @@ __naked void no_scalar_id_for_const32(void)
"if r0 > 7 goto l0_%=;"
/* possibly generate same scalar ids for r3 and r4 */
"w1 = 0;"
"w1 ^= w1;" /* prevent bpf_prune_dead_branches from folding the branch */
"w1 = w1;"
"w3 = w1;"
"w4 = w1;"
@@ -648,11 +652,13 @@ __naked void no_scalar_id_for_const32(void)
"l0_%=:"
/* possibly generate different scalar ids for r3 and r4 */
"w1 = 0;"
"w1 ^= w1;"
"w2 = 0;"
"w2 ^= w2;"
"w3 = w1;"
"w4 = w2;"
"l1_%=:"
/* predictable jump, marks r1 and r2 precise */
/* predictable jump, marks r3 and r4 precise */
"if w3 == w4 goto +0;"
"r0 = 0;"
"exit;"

View File

@@ -0,0 +1,226 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* linear chain main -> A -> B */
__naked __noinline __used
static unsigned long linear_b(void)
{
asm volatile (
"r0 = 42;"
"exit;"
);
}
__naked __noinline __used
static unsigned long linear_a(void)
{
asm volatile (
"call linear_b;"
"exit;"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = linear_b")
__msg("topo_order[1] = linear_a")
__msg("topo_order[2] = topo_linear")
__naked int topo_linear(void)
{
asm volatile (
"call linear_a;"
"exit;"
);
}
/* diamond main -> A, main -> B, A -> C, B -> C */
__naked __noinline __used
static unsigned long diamond_c(void)
{
asm volatile (
"r0 = 1;"
"exit;"
);
}
__naked __noinline __used
static unsigned long diamond_b(void)
{
asm volatile (
"call diamond_c;"
"exit;"
);
}
__naked __noinline __used
static unsigned long diamond_a(void)
{
asm volatile (
"call diamond_c;"
"exit;"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = diamond_c")
__msg("topo_order[3] = topo_diamond")
__naked int topo_diamond(void)
{
asm volatile (
"call diamond_a;"
"call diamond_b;"
"exit;"
);
}
/* main -> global_a (global) -> static_leaf (static, leaf) */
__naked __noinline __used
static unsigned long static_leaf(void)
{
asm volatile (
"r0 = 7;"
"exit;"
);
}
__noinline __used
int global_a(int x)
{
return static_leaf();
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = static_leaf")
__msg("topo_order[1] = global_a")
__msg("topo_order[2] = topo_mixed")
__naked int topo_mixed(void)
{
asm volatile (
"r1 = 0;"
"call global_a;"
"exit;"
);
}
/*
* shared static callee from global and main:
* main -> shared_leaf (static)
* main -> global_b (global) -> shared_leaf (static)
*/
__naked __noinline __used
static unsigned long shared_leaf(void)
{
asm volatile (
"r0 = 99;"
"exit;"
);
}
__noinline __used
int global_b(int x)
{
return shared_leaf();
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = shared_leaf")
__msg("topo_order[1] = global_b")
__msg("topo_order[2] = topo_shared")
__naked int topo_shared(void)
{
asm volatile (
"call shared_leaf;"
"r1 = 0;"
"call global_b;"
"exit;"
);
}
/* duplicate calls to the same subprog */
__naked __noinline __used
static unsigned long dup_leaf(void)
{
asm volatile (
"r0 = 0;"
"exit;"
);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = dup_leaf")
__msg("topo_order[1] = topo_dup_calls")
__naked int topo_dup_calls(void)
{
asm volatile (
"call dup_leaf;"
"call dup_leaf;"
"exit;"
);
}
/* main calls bpf_loop() with loop_cb as the callback */
static int loop_cb(int idx, void *ctx)
{
return 0;
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = loop_cb")
__msg("topo_order[1] = topo_loop_cb")
int topo_loop_cb(void)
{
bpf_loop(1, loop_cb, NULL, 0);
return 0;
}
/*
* bpf_loop callback calling another subprog
* main -> bpf_loop(callback=loop_cb2) -> loop_cb2 -> loop_cb2_leaf
*/
__naked __noinline __used
static unsigned long loop_cb2_leaf(void)
{
asm volatile (
"r0 = 0;"
"exit;"
);
}
static int loop_cb2(int idx, void *ctx)
{
return loop_cb2_leaf();
}
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = loop_cb2_leaf")
__msg("topo_order[1] = loop_cb2")
__msg("topo_order[2] = topo_loop_cb_chain")
int topo_loop_cb_chain(void)
{
bpf_loop(1, loop_cb2, NULL, 0);
return 0;
}
/* no calls (single subprog) */
SEC("?raw_tp")
__success __log_level(2)
__msg("topo_order[0] = topo_no_calls")
__naked int topo_no_calls(void)
{
asm volatile (
"r0 = 0;"
"exit;"
);
}
char _license[] SEC("license") = "GPL";

View File

@@ -584,7 +584,7 @@ __naked void alu32_mov_u32_const(void)
{
asm volatile (" \
w7 = 0; \
w7 &= 1; \
w7 ^= w7; \
w0 = w7; \
if r0 == 0 goto l0_%=; \
r0 = *(u64*)(r7 + 0); \
@@ -894,7 +894,9 @@ __naked void unpriv_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
r8 ^= r8; \
r9 = 0; \
r9 ^= r9; \
r0 = r10; \
r1 = 0; \
r2 = r10; \
@@ -932,7 +934,9 @@ __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void)
{
asm volatile (" \
r8 = 0; \
r8 ^= r8; \
r9 = 0; \
r9 ^= r9; \
r0 = r10; \
r1 = 0; \
r2 = r10; \

View File

@@ -455,7 +455,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "the call stack of 9 frames is too deep",
.errstr = "recursive call",
.result = REJECT,
},
{
@@ -812,7 +812,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "the call stack of 9 frames is too deep",
.errstr = "recursive call",
.result = REJECT,
},
{
@@ -824,7 +824,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "the call stack of 9 frames is too deep",
.errstr = "recursive call",
.result = REJECT,
},
{

View File

@@ -28,7 +28,7 @@
{
"junk insn4",
.insns = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_RAW_INSN(-1, 0, 0, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode ff",
@@ -37,7 +37,7 @@
{
"junk insn5",
.insns = {
BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
BPF_RAW_INSN(0x7f, 0, 0, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",