mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-01 03:44:27 -04:00
Merge branch 'no_caller_saved_registers-attribute-for-helper-calls'
Eduard Zingerman says:
====================
no_caller_saved_registers attribute for helper calls
This patch-set seeks to allow using no_caller_saved_registers gcc/clang
attribute with some BPF helper functions (and kfuncs in the future).
As documented in [1], this attribute means that function scratches
only some of the caller saved registers defined by ABI.
For BPF the set of such registers could be defined as follows:
- R0 is scratched only if function is non-void;
- R1-R5 are scratched only if corresponding parameter type is defined
in the function prototype.
The goal of the patch-set is to implement no_caller_saved_registers
(nocsr for short) in a backwards compatible manner:
- for kernels that support the feature, gain some performance boost
from better register allocation;
- for kernels that don't support the feature, allow programs execution
with minor performance losses.
To achieve this, use a scheme suggested by Alexei Starovoitov:
- for nocsr calls clang allocates registers as-if relevant r0-r5
registers are not scratched by the call;
- as a post-processing step, clang visits each nocsr call and adds
spill/fill for every live r0-r5;
- stack offsets used for spills/fills are allocated as lowest
stack offsets in whole function and are not used for any other
purpose;
- when kernel loads a program, it looks for such patterns
(nocsr function surrounded by spills/fills) and checks if
spill/fill stack offsets are used exclusively in nocsr patterns;
- if so, and if current JIT inlines the call to the nocsr function
(e.g. a helper call), kernel removes unnecessary spill/fill pairs;
- when old kernel loads a program, presence of spill/fill pairs
keeps BPF program valid, albeit slightly less efficient.
Corresponding clang/llvm changes are available in [2].
The patch-set uses bpf_get_smp_processor_id() function as a canary,
making it the first helper with nocsr attribute.
For example, consider the following program:
#define __no_csr __attribute__((no_caller_saved_registers))
#define SEC(name) __attribute__((section(name), used))
#define bpf_printk(fmt, ...) bpf_trace_printk((fmt), sizeof(fmt), __VA_ARGS__)
typedef unsigned int __u32;
static long (* const bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6;
static __u32 (*const bpf_get_smp_processor_id)(void) __no_csr = (void *)8;
SEC("raw_tp")
int test(void *ctx)
{
__u32 task = bpf_get_smp_processor_id();
bpf_printk("ctx=%p, smp=%d", ctx, task);
return 0;
}
char _license[] SEC("license") = "GPL";
Compiled (using [2]) as follows:
$ clang --target=bpf -O2 -g -c -o nocsr.bpf.o nocsr.bpf.c
$ llvm-objdump --no-show-raw-insn -Sd nocsr.bpf.o
...
3rd parameter for printk call removable spill/fill pair
.--- 0: r3 = r1 |
; | __u32 task = bpf_get_smp_processor_id(); |
| 1: *(u64 *)(r10 - 0x8) = r3 <----------|
| 2: call 0x8 |
| 3: r3 = *(u64 *)(r10 - 0x8) <----------'
; | bpf_printk("ctx=%p, smp=%d", ctx, task);
| 4: r1 = 0x0 ll
| 6: r2 = 0xf
| 7: r4 = r0
'--> 8: call 0x6
; return 0;
9: r0 = 0x0
10: exit
Here is how the program looks after verifier processing:
# bpftool prog load ./nocsr.bpf.o /sys/fs/bpf/nocsr-test
# bpftool prog dump xlated pinned /sys/fs/bpf/nocsr-test
int test(void * ctx):
0: (bf) r3 = r1 <--- 3rd printk parameter
; __u32 task = bpf_get_smp_processor_id();
1: (b4) w0 = 197324 <--. inlined helper call,
2: (bf) r0 = &(void __percpu *)(r0) <--- spill/fill
3: (61) r0 = *(u32 *)(r0 +0) <--' pair removed
; bpf_printk("ctx=%p, smp=%d", ctx, task);
4: (18) r1 = map[id:5][0]+0
6: (b7) r2 = 15
7: (bf) r4 = r0
8: (85) call bpf_trace_printk#-125920
; return 0;
9: (b7) r0 = 0
10: (95) exit
[1] https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers
[2] https://github.com/eddyz87/llvm-project/tree/bpf-no-caller-saved-registers
Change list:
- v3 -> v4:
- When nocsr spills/fills are removed in the subprogram, allow these
spills/fills to reside in [-MAX_BPF_STACK-48..MAX_BPF_STACK) range
(suggested by Alexei);
- Dropped patches with special handling for bpf_probe_read_kernel()
(requested by Alexei);
- Reset aux .nocsr_pattern and .nocsr_spills_num fields in
check_nocsr_stack_contract() (requested by Andrii).
Andrii, I have not added an additional flag to
struct bpf_subprog_info, it currently does not have holes
and I really don't like adding a bool field there just as an
alternative indicator that nocsr is disabled.
Indicator at the moment:
- nocsr_stack_off >= S16_MIN means that nocsr rewrite is enabled;
- nocsr_stack_off == S16_MIN means that nocsr rewrite is disabled.
- v2 -> v3:
- As suggested by Andrii, 'nocsr_stack_off' is no longer checked at
rewrite time, instead mark_nocsr_patterns() now does two passes
over BPF program:
- on a first pass it computes the lowest stack spill offset for
the subprogram;
- on a second pass this offset is used to recognize nocsr pattern.
- As suggested by Alexei, a new mechanic is added to work around a
situation mentioned by Andrii, when more helper functions are
marked as nocsr at compile time than current kernel supports:
- all {spill*,helper call,fill*} patterns are now marked as
insn_aux_data[*].nocsr_pattern, thus relaxing failure condition
for check_nocsr_stack_contract();
- spill/fill pairs are not removed for patterns where helper can't
be inlined;
- see mark_nocsr_pattern_for_call() for details an example.
- As suggested by Alexei, subprogram stack depth is now adjusted
if all spill/fill pairs could be removed. This adjustment has
to take place before optimize_bpf_loop(), hence the rewrite
is moved from do_misc_fixups() to remove_nocsr_spills_fills()
(again).
- As suggested by Andrii, special measures are taken to work around
bpf_probe_read_kernel() access to BPF stack, see patches 11, 12.
Patch #11 is very simplistic, a more comprehensive solution would
be to change the type of the third parameter of the
bpf_probe_read_kernel() from ARG_ANYTHING to something else and
not only check nocsr contract, but also propagate stack slot
liveness information. However, such change would require update in
struct bpf_call_arg_meta processing, which currently implies that
every memory parameter is followed by a size parameter.
I can work on these changes, please comment.
- Stylistic changes suggested by Andrii.
- Added acks from Andrii.
- Dropped RFC tag.
- v1 -> v2:
- assume that functions inlined by either jit or verifier
conform to no_caller_saved_registers contract (Andrii, Puranjay);
- allow nocsr rewrite for bpf_get_smp_processor_id()
on arm64 and riscv64 architectures (Puranjay);
- __arch_{x86_64,arm64,riscv64} macro for test_loader;
- moved remove_nocsr_spills_fills() inside do_misc_fixups() (Andrii);
- moved nocsr pattern detection from check_cfg() to a separate pass
(Andrii);
- various stylistic/correctness changes according to Andrii's
comments.
Revisions:
- v1 https://lore.kernel.org/bpf/20240629094733.3863850-1-eddyz87@gmail.com/
- v2 https://lore.kernel.org/bpf/20240704102402.1644916-1-eddyz87@gmail.com/
- v3 https://lore.kernel.org/bpf/20240715230201.3901423-1-eddyz87@gmail.com/
====================
Link: https://lore.kernel.org/r/20240722233844.1406874-1-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
@@ -808,6 +808,12 @@ struct bpf_func_proto {
|
||||
bool gpl_only;
|
||||
bool pkt_access;
|
||||
bool might_sleep;
|
||||
/* set to true if helper follows contract for gcc/llvm
|
||||
* attribute no_caller_saved_registers:
|
||||
* - void functions do not scratch r0
|
||||
* - functions taking N arguments scratch only registers r1-rN
|
||||
*/
|
||||
bool allow_nocsr;
|
||||
enum bpf_return_type ret_type;
|
||||
union {
|
||||
struct {
|
||||
|
||||
@@ -576,6 +576,14 @@ struct bpf_insn_aux_data {
|
||||
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
|
||||
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
/* true if STX or LDX instruction is a part of a spill/fill
|
||||
* pattern for a no_caller_saved_registers call.
|
||||
*/
|
||||
u8 nocsr_pattern:1;
|
||||
/* for CALL instructions, a number of spill/fill pairs in the
|
||||
* no_caller_saved_registers pattern.
|
||||
*/
|
||||
u8 nocsr_spills_num:3;
|
||||
|
||||
/* below fields are initialized once */
|
||||
unsigned int orig_idx; /* original instruction index */
|
||||
@@ -645,6 +653,10 @@ struct bpf_subprog_info {
|
||||
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
|
||||
u16 stack_depth; /* max. stack depth used by this function */
|
||||
u16 stack_extra;
|
||||
/* offsets in range [stack_depth .. nocsr_stack_off)
|
||||
* are used for no_caller_saved_registers spills and fills.
|
||||
*/
|
||||
s16 nocsr_stack_off;
|
||||
bool has_tail_call: 1;
|
||||
bool tail_call_reachable: 1;
|
||||
bool has_ld_abs: 1;
|
||||
@@ -652,6 +664,8 @@ struct bpf_subprog_info {
|
||||
bool is_async_cb: 1;
|
||||
bool is_exception_cb: 1;
|
||||
bool args_cached: 1;
|
||||
/* true if nocsr stack region is used by functions that can't be inlined */
|
||||
bool keep_nocsr_stack: 1;
|
||||
|
||||
u8 arg_cnt;
|
||||
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
|
||||
|
||||
@@ -158,6 +158,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
|
||||
.func = bpf_get_smp_processor_id,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.allow_nocsr = true,
|
||||
};
|
||||
|
||||
BPF_CALL_0(bpf_get_numa_node_id)
|
||||
|
||||
@@ -4579,6 +4579,31 @@ static int get_reg_width(struct bpf_reg_state *reg)
|
||||
return fls64(reg->umax_value);
|
||||
}
|
||||
|
||||
/* See comment for mark_nocsr_pattern_for_call() */
|
||||
static void check_nocsr_stack_contract(struct bpf_verifier_env *env, struct bpf_func_state *state,
|
||||
int insn_idx, int off)
|
||||
{
|
||||
struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno];
|
||||
struct bpf_insn_aux_data *aux = env->insn_aux_data;
|
||||
int i;
|
||||
|
||||
if (subprog->nocsr_stack_off <= off || aux[insn_idx].nocsr_pattern)
|
||||
return;
|
||||
/* access to the region [max_stack_depth .. nocsr_stack_off)
|
||||
* from something that is not a part of the nocsr pattern,
|
||||
* disable nocsr rewrites for current subprogram by setting
|
||||
* nocsr_stack_off to a value smaller than any possible offset.
|
||||
*/
|
||||
subprog->nocsr_stack_off = S16_MIN;
|
||||
/* reset nocsr aux flags within subprogram,
|
||||
* happens at most once per subprogram
|
||||
*/
|
||||
for (i = subprog->start; i < (subprog + 1)->start; ++i) {
|
||||
aux[i].nocsr_spills_num = 0;
|
||||
aux[i].nocsr_pattern = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
|
||||
* stack boundary and alignment are checked in check_mem_access()
|
||||
*/
|
||||
@@ -4627,6 +4652,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
check_nocsr_stack_contract(env, state, insn_idx, off);
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
|
||||
bool reg_value_fits;
|
||||
@@ -4761,6 +4787,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
|
||||
return err;
|
||||
}
|
||||
|
||||
check_nocsr_stack_contract(env, state, insn_idx, min_off);
|
||||
/* Variable offset writes destroy any spilled pointers in range. */
|
||||
for (i = min_off; i < max_off; i++) {
|
||||
u8 new_type, *stype;
|
||||
@@ -4899,6 +4926,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
|
||||
reg = ®_state->stack[spi].spilled_ptr;
|
||||
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
check_nocsr_stack_contract(env, state, env->insn_idx, off);
|
||||
|
||||
if (is_spilled_reg(®_state->stack[spi])) {
|
||||
u8 spill_size = 1;
|
||||
@@ -5059,6 +5087,7 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
|
||||
min_off = reg->smin_value + off;
|
||||
max_off = reg->smax_value + off;
|
||||
mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
|
||||
check_nocsr_stack_contract(env, ptr_state, env->insn_idx, min_off);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6772,10 +6801,20 @@ static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *state,
|
||||
enum bpf_access_type t)
|
||||
{
|
||||
int min_valid_off;
|
||||
struct bpf_insn_aux_data *aux = &env->insn_aux_data[env->insn_idx];
|
||||
int min_valid_off, max_bpf_stack;
|
||||
|
||||
/* If accessing instruction is a spill/fill from nocsr pattern,
|
||||
* add room for all caller saved registers below MAX_BPF_STACK.
|
||||
* In case if nocsr rewrite won't happen maximal stack depth
|
||||
* would be checked by check_max_stack_depth_subprog().
|
||||
*/
|
||||
max_bpf_stack = MAX_BPF_STACK;
|
||||
if (aux->nocsr_pattern)
|
||||
max_bpf_stack += CALLER_SAVED_REGS * BPF_REG_SIZE;
|
||||
|
||||
if (t == BPF_WRITE || env->allow_uninit_stack)
|
||||
min_valid_off = -MAX_BPF_STACK;
|
||||
min_valid_off = -max_bpf_stack;
|
||||
else
|
||||
min_valid_off = -state->allocated_stack;
|
||||
|
||||
@@ -10369,6 +10408,19 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
|
||||
state->callback_subprogno == subprogno);
|
||||
}
|
||||
|
||||
static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
|
||||
const struct bpf_func_proto **ptr)
|
||||
{
|
||||
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID)
|
||||
return -ERANGE;
|
||||
|
||||
if (!env->ops->get_func_proto)
|
||||
return -EINVAL;
|
||||
|
||||
*ptr = env->ops->get_func_proto(func_id, env->prog);
|
||||
return *ptr ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx_p)
|
||||
{
|
||||
@@ -10385,18 +10437,16 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||
|
||||
/* find function prototype */
|
||||
func_id = insn->imm;
|
||||
if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
|
||||
verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
|
||||
func_id);
|
||||
err = get_helper_proto(env, insn->imm, &fn);
|
||||
if (err == -ERANGE) {
|
||||
verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (env->ops->get_func_proto)
|
||||
fn = env->ops->get_func_proto(func_id, env->prog);
|
||||
if (!fn) {
|
||||
if (err) {
|
||||
verbose(env, "program of this type cannot use helper %s#%d\n",
|
||||
func_id_name(func_id), func_id);
|
||||
return -EINVAL;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* eBPF programs must be GPL compatible to use GPL-ed functions */
|
||||
@@ -16050,6 +16100,239 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Bitmask with 1s for all caller saved registers */
|
||||
#define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
|
||||
|
||||
/* Return a bitmask specifying which caller saved registers are
|
||||
* clobbered by a call to a helper *as if* this helper follows
|
||||
* no_caller_saved_registers contract:
|
||||
* - includes R0 if function is non-void;
|
||||
* - includes R1-R5 if corresponding parameter has is described
|
||||
* in the function prototype.
|
||||
*/
|
||||
static u32 helper_nocsr_clobber_mask(const struct bpf_func_proto *fn)
|
||||
{
|
||||
u8 mask;
|
||||
int i;
|
||||
|
||||
mask = 0;
|
||||
if (fn->ret_type != RET_VOID)
|
||||
mask |= BIT(BPF_REG_0);
|
||||
for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i)
|
||||
if (fn->arg_type[i] != ARG_DONTCARE)
|
||||
mask |= BIT(BPF_REG_1 + i);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* True if do_misc_fixups() replaces calls to helper number 'imm',
|
||||
* replacement patch is presumed to follow no_caller_saved_registers contract
|
||||
* (see mark_nocsr_pattern_for_call() below).
|
||||
*/
|
||||
static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
|
||||
{
|
||||
switch (imm) {
|
||||
#ifdef CONFIG_X86_64
|
||||
case BPF_FUNC_get_smp_processor_id:
|
||||
return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
|
||||
#endif
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* GCC and LLVM define a no_caller_saved_registers function attribute.
|
||||
* This attribute means that function scratches only some of
|
||||
* the caller saved registers defined by ABI.
|
||||
* For BPF the set of such registers could be defined as follows:
|
||||
* - R0 is scratched only if function is non-void;
|
||||
* - R1-R5 are scratched only if corresponding parameter type is defined
|
||||
* in the function prototype.
|
||||
*
|
||||
* The contract between kernel and clang allows to simultaneously use
|
||||
* such functions and maintain backwards compatibility with old
|
||||
* kernels that don't understand no_caller_saved_registers calls
|
||||
* (nocsr for short):
|
||||
*
|
||||
* - for nocsr calls clang allocates registers as-if relevant r0-r5
|
||||
* registers are not scratched by the call;
|
||||
*
|
||||
* - as a post-processing step, clang visits each nocsr call and adds
|
||||
* spill/fill for every live r0-r5;
|
||||
*
|
||||
* - stack offsets used for the spill/fill are allocated as lowest
|
||||
* stack offsets in whole function and are not used for any other
|
||||
* purposes;
|
||||
*
|
||||
* - when kernel loads a program, it looks for such patterns
|
||||
* (nocsr function surrounded by spills/fills) and checks if
|
||||
* spill/fill stack offsets are used exclusively in nocsr patterns;
|
||||
*
|
||||
* - if so, and if verifier or current JIT inlines the call to the
|
||||
* nocsr function (e.g. a helper call), kernel removes unnecessary
|
||||
* spill/fill pairs;
|
||||
*
|
||||
* - when old kernel loads a program, presence of spill/fill pairs
|
||||
* keeps BPF program valid, albeit slightly less efficient.
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* r1 = 1;
|
||||
* r2 = 2;
|
||||
* *(u64 *)(r10 - 8) = r1; r1 = 1;
|
||||
* *(u64 *)(r10 - 16) = r2; r2 = 2;
|
||||
* call %[to_be_inlined] --> call %[to_be_inlined]
|
||||
* r2 = *(u64 *)(r10 - 16); r0 = r1;
|
||||
* r1 = *(u64 *)(r10 - 8); r0 += r2;
|
||||
* r0 = r1; exit;
|
||||
* r0 += r2;
|
||||
* exit;
|
||||
*
|
||||
* The purpose of mark_nocsr_pattern_for_call is to:
|
||||
* - look for such patterns;
|
||||
* - mark spill and fill instructions in env->insn_aux_data[*].nocsr_pattern;
|
||||
* - mark set env->insn_aux_data[*].nocsr_spills_num for call instruction;
|
||||
* - update env->subprog_info[*]->nocsr_stack_off to find an offset
|
||||
* at which nocsr spill/fill stack slots start;
|
||||
* - update env->subprog_info[*]->keep_nocsr_stack.
|
||||
*
|
||||
* The .nocsr_pattern and .nocsr_stack_off are used by
|
||||
* check_nocsr_stack_contract() to check if every stack access to
|
||||
* nocsr spill/fill stack slot originates from spill/fill
|
||||
* instructions, members of nocsr patterns.
|
||||
*
|
||||
* If such condition holds true for a subprogram, nocsr patterns could
|
||||
* be rewritten by remove_nocsr_spills_fills().
|
||||
* Otherwise nocsr patterns are not changed in the subprogram
|
||||
* (code, presumably, generated by an older clang version).
|
||||
*
|
||||
* For example, it is *not* safe to remove spill/fill below:
|
||||
*
|
||||
* r1 = 1;
|
||||
* *(u64 *)(r10 - 8) = r1; r1 = 1;
|
||||
* call %[to_be_inlined] --> call %[to_be_inlined]
|
||||
* r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
|
||||
* r0 = *(u64 *)(r10 - 8); r0 += r1;
|
||||
* r0 += r1; exit;
|
||||
* exit;
|
||||
*/
|
||||
static void mark_nocsr_pattern_for_call(struct bpf_verifier_env *env,
|
||||
struct bpf_subprog_info *subprog,
|
||||
int insn_idx, s16 lowest_off)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
|
||||
struct bpf_insn *call = &env->prog->insnsi[insn_idx];
|
||||
const struct bpf_func_proto *fn;
|
||||
u32 clobbered_regs_mask = ALL_CALLER_SAVED_REGS;
|
||||
u32 expected_regs_mask;
|
||||
bool can_be_inlined = false;
|
||||
s16 off;
|
||||
int i;
|
||||
|
||||
if (bpf_helper_call(call)) {
|
||||
if (get_helper_proto(env, call->imm, &fn) < 0)
|
||||
/* error would be reported later */
|
||||
return;
|
||||
clobbered_regs_mask = helper_nocsr_clobber_mask(fn);
|
||||
can_be_inlined = fn->allow_nocsr &&
|
||||
(verifier_inlines_helper_call(env, call->imm) ||
|
||||
bpf_jit_inlines_helper_call(call->imm));
|
||||
}
|
||||
|
||||
if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
|
||||
return;
|
||||
|
||||
/* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */
|
||||
expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS;
|
||||
|
||||
/* match pairs of form:
|
||||
*
|
||||
* *(u64 *)(r10 - Y) = rX (where Y % 8 == 0)
|
||||
* ...
|
||||
* call %[to_be_inlined]
|
||||
* ...
|
||||
* rX = *(u64 *)(r10 - Y)
|
||||
*/
|
||||
for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) {
|
||||
if (insn_idx - i < 0 || insn_idx + i >= env->prog->len)
|
||||
break;
|
||||
stx = &insns[insn_idx - i];
|
||||
ldx = &insns[insn_idx + i];
|
||||
/* must be a stack spill/fill pair */
|
||||
if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) ||
|
||||
ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) ||
|
||||
stx->dst_reg != BPF_REG_10 ||
|
||||
ldx->src_reg != BPF_REG_10)
|
||||
break;
|
||||
/* must be a spill/fill for the same reg */
|
||||
if (stx->src_reg != ldx->dst_reg)
|
||||
break;
|
||||
/* must be one of the previously unseen registers */
|
||||
if ((BIT(stx->src_reg) & expected_regs_mask) == 0)
|
||||
break;
|
||||
/* must be a spill/fill for the same expected offset,
|
||||
* no need to check offset alignment, BPF_DW stack access
|
||||
* is always 8-byte aligned.
|
||||
*/
|
||||
if (stx->off != off || ldx->off != off)
|
||||
break;
|
||||
expected_regs_mask &= ~BIT(stx->src_reg);
|
||||
env->insn_aux_data[insn_idx - i].nocsr_pattern = 1;
|
||||
env->insn_aux_data[insn_idx + i].nocsr_pattern = 1;
|
||||
}
|
||||
if (i == 1)
|
||||
return;
|
||||
|
||||
/* Conditionally set 'nocsr_spills_num' to allow forward
|
||||
* compatibility when more helper functions are marked as
|
||||
* nocsr at compile time than current kernel supports, e.g:
|
||||
*
|
||||
* 1: *(u64 *)(r10 - 8) = r1
|
||||
* 2: call A ;; assume A is nocsr for current kernel
|
||||
* 3: r1 = *(u64 *)(r10 - 8)
|
||||
* 4: *(u64 *)(r10 - 8) = r1
|
||||
* 5: call B ;; assume B is not nocsr for current kernel
|
||||
* 6: r1 = *(u64 *)(r10 - 8)
|
||||
*
|
||||
* There is no need to block nocsr rewrite for such program.
|
||||
* Set 'nocsr_pattern' for both calls to keep check_nocsr_stack_contract() happy,
|
||||
* don't set 'nocsr_spills_num' for call B so that remove_nocsr_spills_fills()
|
||||
* does not remove spill/fill pair {4,6}.
|
||||
*/
|
||||
if (can_be_inlined)
|
||||
env->insn_aux_data[insn_idx].nocsr_spills_num = i - 1;
|
||||
else
|
||||
subprog->keep_nocsr_stack = 1;
|
||||
subprog->nocsr_stack_off = min(subprog->nocsr_stack_off, off);
|
||||
}
|
||||
|
||||
static int mark_nocsr_patterns(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_subprog_info *subprog = env->subprog_info;
|
||||
struct bpf_insn *insn;
|
||||
s16 lowest_off;
|
||||
int s, i;
|
||||
|
||||
for (s = 0; s < env->subprog_cnt; ++s, ++subprog) {
|
||||
/* find lowest stack spill offset used in this subprog */
|
||||
lowest_off = 0;
|
||||
for (i = subprog->start; i < (subprog + 1)->start; ++i) {
|
||||
insn = env->prog->insnsi + i;
|
||||
if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) ||
|
||||
insn->dst_reg != BPF_REG_10)
|
||||
continue;
|
||||
lowest_off = min(lowest_off, insn->off);
|
||||
}
|
||||
/* use this offset to find nocsr patterns */
|
||||
for (i = subprog->start; i < (subprog + 1)->start; ++i) {
|
||||
insn = env->prog->insnsi + i;
|
||||
if (insn->code != (BPF_JMP | BPF_CALL))
|
||||
continue;
|
||||
mark_nocsr_pattern_for_call(env, subprog, i, lowest_off);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Visits the instruction at index t and returns one of the following:
|
||||
* < 0 - an error occurred
|
||||
* DONE_EXPLORING - the instruction was fully explored
|
||||
@@ -19198,9 +19481,11 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
|
||||
|
||||
static int opt_remove_nops(struct bpf_verifier_env *env)
|
||||
{
|
||||
const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
|
||||
const struct bpf_insn ja = NOP;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
int insn_cnt = env->prog->len;
|
||||
int i, err;
|
||||
@@ -20556,7 +20841,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
|
||||
/* Implement bpf_get_smp_processor_id() inline. */
|
||||
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
|
||||
prog->jit_requested && bpf_jit_supports_percpu_insn()) {
|
||||
verifier_inlines_helper_call(env, insn->imm)) {
|
||||
/* BPF_FUNC_get_smp_processor_id inlining is an
|
||||
* optimization, so if pcpu_hot.cpu_number is ever
|
||||
* changed in some incompatible and hard to support
|
||||
@@ -20946,6 +21231,40 @@ static int optimize_bpf_loop(struct bpf_verifier_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Remove unnecessary spill/fill pairs, members of nocsr pattern,
|
||||
* adjust subprograms stack depth when possible.
|
||||
*/
|
||||
static int remove_nocsr_spills_fills(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_subprog_info *subprog = env->subprog_info;
|
||||
struct bpf_insn_aux_data *aux = env->insn_aux_data;
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
int insn_cnt = env->prog->len;
|
||||
u32 spills_num;
|
||||
bool modified = false;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
if (aux[i].nocsr_spills_num > 0) {
|
||||
spills_num = aux[i].nocsr_spills_num;
|
||||
/* NOPs would be removed by opt_remove_nops() */
|
||||
for (j = 1; j <= spills_num; ++j) {
|
||||
*(insn - j) = NOP;
|
||||
*(insn + j) = NOP;
|
||||
}
|
||||
modified = true;
|
||||
}
|
||||
if ((subprog + 1)->start == i + 1) {
|
||||
if (modified && !subprog->keep_nocsr_stack)
|
||||
subprog->stack_depth = -subprog->nocsr_stack_off;
|
||||
subprog++;
|
||||
modified = false;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_states(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state_list *sl, *sln;
|
||||
@@ -21860,6 +22179,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = mark_nocsr_patterns(env);
|
||||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = do_check_main(env);
|
||||
ret = ret ?: do_check_subprogs(env);
|
||||
|
||||
@@ -21869,6 +22192,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
skip_full_check:
|
||||
kvfree(env->explored_states);
|
||||
|
||||
/* might decrease stack depth, keep it before passes that
|
||||
* allocate additional slots.
|
||||
*/
|
||||
if (ret == 0)
|
||||
ret = remove_nocsr_spills_fills(env);
|
||||
|
||||
if (ret == 0)
|
||||
ret = check_max_stack_depth(env);
|
||||
|
||||
|
||||
@@ -661,6 +661,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
|
||||
test_loader.c \
|
||||
xsk.c \
|
||||
disasm.c \
|
||||
disasm_helpers.c \
|
||||
json_writer.c \
|
||||
flow_dissector_load.h \
|
||||
ip_check_defrag_frags.h
|
||||
|
||||
69
tools/testing/selftests/bpf/disasm_helpers.c
Normal file
69
tools/testing/selftests/bpf/disasm_helpers.c
Normal file
@@ -0,0 +1,69 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include "disasm.h"
|
||||
|
||||
struct print_insn_context {
|
||||
char scratch[16];
|
||||
char *buf;
|
||||
size_t sz;
|
||||
};
|
||||
|
||||
static void print_insn_cb(void *private_data, const char *fmt, ...)
|
||||
{
|
||||
struct print_insn_context *ctx = private_data;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(ctx->buf, ctx->sz, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static const char *print_call_cb(void *private_data, const struct bpf_insn *insn)
|
||||
{
|
||||
struct print_insn_context *ctx = private_data;
|
||||
|
||||
/* For pseudo calls verifier.c:jit_subprogs() hides original
|
||||
* imm to insn->off and changes insn->imm to be an index of
|
||||
* the subprog instead.
|
||||
*/
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL) {
|
||||
snprintf(ctx->scratch, sizeof(ctx->scratch), "%+d", insn->off);
|
||||
return ctx->scratch;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz)
|
||||
{
|
||||
struct print_insn_context ctx = {
|
||||
.buf = buf,
|
||||
.sz = buf_sz,
|
||||
};
|
||||
struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn_cb,
|
||||
.cb_call = print_call_cb,
|
||||
.private_data = &ctx,
|
||||
};
|
||||
char *tmp, *pfx_end, *sfx_start;
|
||||
bool double_insn;
|
||||
int len;
|
||||
|
||||
print_bpf_insn(&cbs, insn, true);
|
||||
/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
|
||||
* for each instruction (FF stands for instruction `code` byte).
|
||||
* Remove the prefix inplace, and also simplify call instructions.
|
||||
* E.g.: "(85) call foo#10" -> "call foo".
|
||||
* Also remove newline in the end (the 'max(strlen(buf) - 1, 0)' thing).
|
||||
*/
|
||||
pfx_end = buf + 5;
|
||||
sfx_start = buf + max((int)strlen(buf) - 1, 0);
|
||||
if (strncmp(pfx_end, "call ", 5) == 0 && (tmp = strrchr(buf, '#')))
|
||||
sfx_start = tmp;
|
||||
len = sfx_start - pfx_end;
|
||||
memmove(buf, pfx_end, len);
|
||||
buf[len] = 0;
|
||||
double_insn = insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
return insn + (double_insn ? 2 : 1);
|
||||
}
|
||||
12
tools/testing/selftests/bpf/disasm_helpers.h
Normal file
12
tools/testing/selftests/bpf/disasm_helpers.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
|
||||
#ifndef __DISASM_HELPERS_H
|
||||
#define __DISASM_HELPERS_H
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
struct bpf_insn *disasm_insn(struct bpf_insn *insn, char *buf, size_t buf_sz);
|
||||
|
||||
#endif /* __DISASM_HELPERS_H */
|
||||
@@ -10,7 +10,8 @@
|
||||
#include "bpf/btf.h"
|
||||
#include "bpf_util.h"
|
||||
#include "linux/filter.h"
|
||||
#include "disasm.h"
|
||||
#include "linux/kernel.h"
|
||||
#include "disasm_helpers.h"
|
||||
|
||||
#define MAX_PROG_TEXT_SZ (32 * 1024)
|
||||
|
||||
@@ -628,63 +629,6 @@ static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_
|
||||
return false;
|
||||
}
|
||||
|
||||
static void print_insn(void *private_data, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vfprintf((FILE *)private_data, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/* Disassemble instructions to a stream */
|
||||
static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn,
|
||||
.cb_call = NULL,
|
||||
.cb_imm = NULL,
|
||||
.private_data = out,
|
||||
};
|
||||
bool double_insn = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (double_insn) {
|
||||
double_insn = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
print_bpf_insn(&cbs, insn + i, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix
|
||||
* for each instruction (FF stands for instruction `code` byte).
|
||||
* This function removes the prefix inplace for each line in `str`.
|
||||
*/
|
||||
static void remove_insn_prefix(char *str, int size)
|
||||
{
|
||||
const int prefix_size = 5;
|
||||
|
||||
int write_pos = 0, read_pos = prefix_size;
|
||||
int len = strlen(str);
|
||||
char c;
|
||||
|
||||
size = min(size, len);
|
||||
|
||||
while (read_pos < size) {
|
||||
c = str[read_pos++];
|
||||
if (c == 0)
|
||||
break;
|
||||
str[write_pos++] = c;
|
||||
if (c == '\n')
|
||||
read_pos += prefix_size;
|
||||
}
|
||||
str[write_pos] = 0;
|
||||
}
|
||||
|
||||
struct prog_info {
|
||||
char *prog_kind;
|
||||
enum bpf_prog_type prog_type;
|
||||
@@ -699,9 +643,10 @@ static void match_program(struct btf *btf,
|
||||
char *reg_map[][2],
|
||||
bool skip_first_insn)
|
||||
{
|
||||
struct bpf_insn *buf = NULL;
|
||||
struct bpf_insn *buf = NULL, *insn, *insn_end;
|
||||
int err = 0, prog_fd = 0;
|
||||
FILE *prog_out = NULL;
|
||||
char insn_buf[64];
|
||||
char *text = NULL;
|
||||
__u32 cnt = 0;
|
||||
|
||||
@@ -739,12 +684,13 @@ static void match_program(struct btf *btf,
|
||||
PRINT_FAIL("Can't open memory stream\n");
|
||||
goto out;
|
||||
}
|
||||
if (skip_first_insn)
|
||||
print_xlated(prog_out, buf + 1, cnt - 1);
|
||||
else
|
||||
print_xlated(prog_out, buf, cnt);
|
||||
insn_end = buf + cnt;
|
||||
insn = buf + (skip_first_insn ? 1 : 0);
|
||||
while (insn < insn_end) {
|
||||
insn = disasm_insn(insn, insn_buf, sizeof(insn_buf));
|
||||
fprintf(prog_out, "%s\n", insn_buf);
|
||||
}
|
||||
fclose(prog_out);
|
||||
remove_insn_prefix(text, MAX_PROG_TEXT_SZ);
|
||||
|
||||
ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map),
|
||||
pinfo->prog_kind);
|
||||
|
||||
@@ -53,6 +53,7 @@
|
||||
#include "verifier_movsx.skel.h"
|
||||
#include "verifier_netfilter_ctx.skel.h"
|
||||
#include "verifier_netfilter_retcode.skel.h"
|
||||
#include "verifier_nocsr.skel.h"
|
||||
#include "verifier_or_jmp32_k.skel.h"
|
||||
#include "verifier_precision.skel.h"
|
||||
#include "verifier_prevent_map_lookup.skel.h"
|
||||
@@ -173,6 +174,7 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); }
|
||||
void test_verifier_movsx(void) { RUN(verifier_movsx); }
|
||||
void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); }
|
||||
void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); }
|
||||
void test_verifier_nocsr(void) { RUN(verifier_nocsr); }
|
||||
void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); }
|
||||
void test_verifier_precision(void) { RUN(verifier_precision); }
|
||||
void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); }
|
||||
|
||||
@@ -26,6 +26,9 @@
|
||||
*
|
||||
* __regex Same as __msg, but using a regular expression.
|
||||
* __regex_unpriv Same as __msg_unpriv but using a regular expression.
|
||||
* __xlated Expect a line in a disassembly log after verifier applies rewrites.
|
||||
* Multiple __xlated attributes could be specified.
|
||||
* __xlated_unpriv Same as __xlated but for unprivileged mode.
|
||||
*
|
||||
* __success Expect program load success in privileged mode.
|
||||
* __success_unpriv Expect program load success in unprivileged mode.
|
||||
@@ -60,14 +63,20 @@
|
||||
* __auxiliary Annotated program is not a separate test, but used as auxiliary
|
||||
* for some other test cases and should always be loaded.
|
||||
* __auxiliary_unpriv Same, but load program in unprivileged mode.
|
||||
*
|
||||
* __arch_* Specify on which architecture the test case should be tested.
|
||||
* Several __arch_* annotations could be specified at once.
|
||||
* When test case is not run on current arch it is marked as skipped.
|
||||
*/
|
||||
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg)))
|
||||
#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex)))
|
||||
#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" msg)))
|
||||
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
|
||||
#define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
|
||||
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
|
||||
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg)))
|
||||
#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex)))
|
||||
#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" msg)))
|
||||
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
|
||||
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
|
||||
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
|
||||
@@ -77,6 +86,10 @@
|
||||
#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary")))
|
||||
#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv")))
|
||||
#define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path)))
|
||||
#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch)))
|
||||
#define __arch_x86_64 __arch("X86_64")
|
||||
#define __arch_arm64 __arch("ARM64")
|
||||
#define __arch_riscv64 __arch("RISCV64")
|
||||
|
||||
/* Convenience macro for use with 'asm volatile' blocks */
|
||||
#define __naked __attribute__((naked))
|
||||
|
||||
796
tools/testing/selftests/bpf/progs/verifier_nocsr.c
Normal file
796
tools/testing/selftests/bpf/progs/verifier_nocsr.c
Normal file
@@ -0,0 +1,796 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "../../../include/linux/filter.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 8")
|
||||
__xlated("4: r5 = 5")
|
||||
__xlated("5: w0 = ")
|
||||
__xlated("6: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("7: r0 = *(u32 *)(r0 +0)")
|
||||
__xlated("8: exit")
|
||||
__success
|
||||
__naked void simple(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"r2 = 2;"
|
||||
"r3 = 3;"
|
||||
"r4 = 4;"
|
||||
"r5 = 5;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"*(u64 *)(r10 - 24) = r2;"
|
||||
"*(u64 *)(r10 - 32) = r3;"
|
||||
"*(u64 *)(r10 - 40) = r4;"
|
||||
"*(u64 *)(r10 - 48) = r5;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r5 = *(u64 *)(r10 - 48);"
|
||||
"r4 = *(u64 *)(r10 - 40);"
|
||||
"r3 = *(u64 *)(r10 - 32);"
|
||||
"r2 = *(u64 *)(r10 - 24);"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
/* The logic for detecting and verifying nocsr pattern is the same for
|
||||
* any arch, however x86 differs from arm64 or riscv64 in a way
|
||||
* bpf_get_smp_processor_id is rewritten:
|
||||
* - on x86 it is done by verifier
|
||||
* - on arm64 and riscv64 it is done by jit
|
||||
*
|
||||
* Which leads to different xlated patterns for different archs:
|
||||
* - on x86 the call is expanded as 3 instructions
|
||||
* - on arm64 and riscv64 the call remains as is
|
||||
* (but spills/fills are still removed)
|
||||
*
|
||||
* It is really desirable to check instruction indexes in the xlated
|
||||
* patterns, so add this canary test to check that function rewrite by
|
||||
* jit is correctly processed by nocsr logic, keep the rest of the
|
||||
* tests as x86.
|
||||
*/
|
||||
SEC("raw_tp")
|
||||
__arch_arm64
|
||||
__arch_riscv64
|
||||
__xlated("0: r1 = 1")
|
||||
__xlated("1: call bpf_get_smp_processor_id")
|
||||
__xlated("2: exit")
|
||||
__success
|
||||
__naked void canary_arm64_riscv64(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("3: exit")
|
||||
__success
|
||||
__naked void canary_zero_spills(void)
|
||||
{
|
||||
asm volatile (
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 16")
|
||||
__xlated("1: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r2 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void wrong_reg_in_pattern1(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r2 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -16) = r6")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r6 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void wrong_reg_in_pattern2(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r6;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r6 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -16) = r0")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r0 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void wrong_reg_in_pattern3(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r0;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r0 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("2: *(u64 *)(r2 -16) = r1")
|
||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("6: r1 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void wrong_base_in_pattern(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"r2 = r10;"
|
||||
"*(u64 *)(r2 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r2 = 1")
|
||||
__success
|
||||
__naked void wrong_insn_in_pattern(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r2 = 1;"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("2: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("6: r1 = *(u64 *)(r10 -8)")
|
||||
__success
|
||||
__naked void wrong_off_in_pattern1(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u32 *)(r10 -4) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u32 *)(r10 -4)")
|
||||
__success
|
||||
__naked void wrong_off_in_pattern2(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u32 *)(r10 - 4) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u32 *)(r10 - 4);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u32 *)(r10 -16) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u32 *)(r10 -16)")
|
||||
__success
|
||||
__naked void wrong_size_in_pattern(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u32 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u32 *)(r10 - 16);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("2: *(u32 *)(r10 -8) = r1")
|
||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("6: r1 = *(u32 *)(r10 -8)")
|
||||
__success
|
||||
__naked void partial_pattern(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"r2 = 2;"
|
||||
"*(u32 *)(r10 - 8) = r1;"
|
||||
"*(u64 *)(r10 - 16) = r2;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r2 = *(u64 *)(r10 - 16);"
|
||||
"r1 = *(u32 *)(r10 - 8);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("0: r1 = 1")
|
||||
__xlated("1: r2 = 2")
|
||||
/* not patched, spills for -8, -16 not removed */
|
||||
__xlated("2: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: *(u64 *)(r10 -16) = r2")
|
||||
__xlated("5: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("7: r2 = *(u64 *)(r10 -16)")
|
||||
__xlated("8: r1 = *(u64 *)(r10 -8)")
|
||||
/* patched, spills for -24, -32 removed */
|
||||
__xlated("10: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("12: exit")
|
||||
__success
|
||||
__naked void min_stack_offset(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"r2 = 2;"
|
||||
/* this call won't be patched */
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"*(u64 *)(r10 - 16) = r2;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r2 = *(u64 *)(r10 - 16);"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
/* this call would be patched */
|
||||
"*(u64 *)(r10 - 24) = r1;"
|
||||
"*(u64 *)(r10 - 32) = r2;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r2 = *(u64 *)(r10 - 32);"
|
||||
"r1 = *(u64 *)(r10 - 24);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u64 *)(r10 -8)")
|
||||
__success
|
||||
__naked void bad_fixed_read(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"r1 = *(u64 *)(r1 - 0);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u64 *)(r10 -8)")
|
||||
__success
|
||||
__naked void bad_fixed_write(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"*(u64 *)(r1 - 0) = r1;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("6: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("8: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("10: r1 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void bad_varying_read(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = *(u64 *)(r1 + 0);" /* random scalar value */
|
||||
"r6 &= 0x7;" /* r6 range [0..7] */
|
||||
"r6 += 0x2;" /* r6 range [2..9] */
|
||||
"r7 = 0;"
|
||||
"r7 -= r6;" /* r7 range [-9..-2] */
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"r1 = r10;"
|
||||
"r1 += r7;"
|
||||
"r1 = *(u8 *)(r1 - 0);" /* touches slot [-16..-9] where spills are stored */
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("6: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("8: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("10: r1 = *(u64 *)(r10 -16)")
|
||||
__success
|
||||
__naked void bad_varying_write(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r6 = *(u64 *)(r1 + 0);" /* random scalar value */
|
||||
"r6 &= 0x7;" /* r6 range [0..7] */
|
||||
"r6 += 0x2;" /* r6 range [2..9] */
|
||||
"r7 = 0;"
|
||||
"r7 -= r6;" /* r7 range [-9..-2] */
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"r1 = r10;"
|
||||
"r1 += r7;"
|
||||
"*(u8 *)(r1 - 0) = r7;" /* touches slot [-16..-9] where spills are stored */
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u64 *)(r10 -8)")
|
||||
__success
|
||||
__naked void bad_write_in_subprog(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"call bad_write_in_subprog_aux;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
__used
|
||||
__naked static void bad_write_in_subprog_aux(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 1;"
|
||||
"*(u64 *)(r1 - 0) = r0;" /* invalidates nocsr contract for caller: */
|
||||
"exit;" /* caller stack at -8 used outside of the pattern */
|
||||
::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__xlated("1: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u64 *)(r10 -8)")
|
||||
__success
|
||||
__naked void bad_helper_write(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
/* nocsr pattern with stack offset -8 */
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"r2 = 1;"
|
||||
"r3 = 42;"
|
||||
/* read dst is fp[-8], thus nocsr rewrite not applied */
|
||||
"call %[bpf_probe_read_kernel];"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id),
|
||||
__imm(bpf_probe_read_kernel)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
/* main, not patched */
|
||||
__xlated("1: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r1 = *(u64 *)(r10 -8)")
|
||||
__xlated("9: call pc+1")
|
||||
__xlated("10: exit")
|
||||
/* subprogram, patched */
|
||||
__xlated("11: r1 = 1")
|
||||
__xlated("13: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("15: exit")
|
||||
__success
|
||||
__naked void invalidate_one_subprog(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"r1 = r10;"
|
||||
"r1 += -8;"
|
||||
"r1 = *(u64 *)(r1 - 0);"
|
||||
"call invalidate_one_subprog_aux;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
__used
|
||||
__naked static void invalidate_one_subprog_aux(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
/* main */
|
||||
__xlated("0: r1 = 1")
|
||||
__xlated("2: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("4: call pc+1")
|
||||
__xlated("5: exit")
|
||||
/* subprogram */
|
||||
__xlated("6: r1 = 1")
|
||||
__xlated("8: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("10: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("11: exit")
|
||||
__success
|
||||
__naked void subprogs_use_independent_offsets(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"call subprogs_use_independent_offsets_aux;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
__used
|
||||
__naked static void subprogs_use_independent_offsets_aux(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 24) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 24);"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 8")
|
||||
__xlated("2: r0 = &(void __percpu *)(r0)")
|
||||
__success
|
||||
__naked void helper_call_does_not_prevent_nocsr(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_prandom_u32];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id),
|
||||
__imm(bpf_get_prandom_u32)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 16")
|
||||
/* may_goto counter at -16 */
|
||||
__xlated("0: *(u64 *)(r10 -16) =")
|
||||
__xlated("1: r1 = 1")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
/* may_goto expansion starts */
|
||||
__xlated("5: r11 = *(u64 *)(r10 -16)")
|
||||
__xlated("6: if r11 == 0x0 goto pc+3")
|
||||
__xlated("7: r11 -= 1")
|
||||
__xlated("8: *(u64 *)(r10 -16) = r11")
|
||||
/* may_goto expansion ends */
|
||||
__xlated("9: *(u64 *)(r10 -8) = r1")
|
||||
__xlated("10: exit")
|
||||
__success
|
||||
__naked void may_goto_interaction(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
".8byte %[may_goto];"
|
||||
/* just touch some stack at -8 */
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_get_smp_processor_id),
|
||||
__imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
__used
|
||||
__naked static void dummy_loop_callback(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 32+0")
|
||||
__xlated("2: r1 = 1")
|
||||
__xlated("3: w0 =")
|
||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r0 = *(u32 *)(r0 +0)")
|
||||
/* bpf_loop params setup */
|
||||
__xlated("6: r2 =")
|
||||
__xlated("7: r3 = 0")
|
||||
__xlated("8: r4 = 0")
|
||||
/* ... part of the inlined bpf_loop */
|
||||
__xlated("12: *(u64 *)(r10 -32) = r6")
|
||||
__xlated("13: *(u64 *)(r10 -24) = r7")
|
||||
__xlated("14: *(u64 *)(r10 -16) = r8")
|
||||
/* ... */
|
||||
__xlated("21: call pc+8") /* dummy_loop_callback */
|
||||
/* ... last insns of the bpf_loop_interaction1 */
|
||||
__xlated("28: r0 = 0")
|
||||
__xlated("29: exit")
|
||||
/* dummy_loop_callback */
|
||||
__xlated("30: r0 = 0")
|
||||
__xlated("31: exit")
|
||||
__success
|
||||
__naked int bpf_loop_interaction1(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 1;"
|
||||
/* nocsr stack region at -16, but could be removed */
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"r2 = %[dummy_loop_callback];"
|
||||
"r3 = 0;"
|
||||
"r4 = 0;"
|
||||
"call %[bpf_loop];"
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(dummy_loop_callback),
|
||||
__imm(bpf_get_smp_processor_id),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common
|
||||
);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4) __msg("stack depth 40+0")
|
||||
/* call bpf_get_smp_processor_id */
|
||||
__xlated("2: r1 = 42")
|
||||
__xlated("3: w0 =")
|
||||
__xlated("4: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("5: r0 = *(u32 *)(r0 +0)")
|
||||
/* call bpf_get_prandom_u32 */
|
||||
__xlated("6: *(u64 *)(r10 -16) = r1")
|
||||
__xlated("7: call")
|
||||
__xlated("8: r1 = *(u64 *)(r10 -16)")
|
||||
/* ... */
|
||||
/* ... part of the inlined bpf_loop */
|
||||
__xlated("15: *(u64 *)(r10 -40) = r6")
|
||||
__xlated("16: *(u64 *)(r10 -32) = r7")
|
||||
__xlated("17: *(u64 *)(r10 -24) = r8")
|
||||
__success
|
||||
__naked int bpf_loop_interaction2(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = 42;"
|
||||
/* nocsr stack region at -16, cannot be removed */
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"*(u64 *)(r10 - 16) = r1;"
|
||||
"call %[bpf_get_prandom_u32];"
|
||||
"r1 = *(u64 *)(r10 - 16);"
|
||||
"r2 = %[dummy_loop_callback];"
|
||||
"r3 = 0;"
|
||||
"r4 = 0;"
|
||||
"call %[bpf_loop];"
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_ptr(dummy_loop_callback),
|
||||
__imm(bpf_get_smp_processor_id),
|
||||
__imm(bpf_get_prandom_u32),
|
||||
__imm(bpf_loop)
|
||||
: __clobber_common
|
||||
);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4)
|
||||
__msg("stack depth 512+0")
|
||||
/* just to print xlated version when debugging */
|
||||
__xlated("r0 = &(void __percpu *)(r0)")
|
||||
__success
|
||||
/* cumulative_stack_depth() stack usage is MAX_BPF_STACK,
|
||||
* called subprogram uses an additional slot for nocsr spill/fill,
|
||||
* since nocsr spill/fill could be removed the program still fits
|
||||
* in MAX_BPF_STACK and should be accepted.
|
||||
*/
|
||||
__naked int cumulative_stack_depth(void)
|
||||
{
|
||||
asm volatile(
|
||||
"r1 = 42;"
|
||||
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
|
||||
"call cumulative_stack_depth_subprog;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_const(max_bpf_stack, MAX_BPF_STACK)
|
||||
: __clobber_all
|
||||
);
|
||||
}
|
||||
|
||||
__used
|
||||
__naked static void cumulative_stack_depth_subprog(void)
|
||||
{
|
||||
asm volatile (
|
||||
"*(u64 *)(r10 - 8) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - 8);"
|
||||
"exit;"
|
||||
:: __imm(bpf_get_smp_processor_id) : __clobber_all);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4)
|
||||
__msg("stack depth 512")
|
||||
__xlated("0: r1 = 42")
|
||||
__xlated("1: *(u64 *)(r10 -512) = r1")
|
||||
__xlated("2: w0 = ")
|
||||
__xlated("3: r0 = &(void __percpu *)(r0)")
|
||||
__xlated("4: r0 = *(u32 *)(r0 +0)")
|
||||
__xlated("5: exit")
|
||||
__success
|
||||
__naked int nocsr_max_stack_ok(void)
|
||||
{
|
||||
asm volatile(
|
||||
"r1 = 42;"
|
||||
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
|
||||
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_const(max_bpf_stack, MAX_BPF_STACK),
|
||||
__imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
|
||||
__imm(bpf_get_smp_processor_id)
|
||||
: __clobber_all
|
||||
);
|
||||
}
|
||||
|
||||
SEC("raw_tp")
|
||||
__arch_x86_64
|
||||
__log_level(4)
|
||||
__msg("stack depth 520")
|
||||
__failure
|
||||
__naked int nocsr_max_stack_fail(void)
|
||||
{
|
||||
asm volatile(
|
||||
"r1 = 42;"
|
||||
"*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
|
||||
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
|
||||
"call %[bpf_get_smp_processor_id];"
|
||||
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
|
||||
/* call to prandom blocks nocsr rewrite */
|
||||
"*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
|
||||
"call %[bpf_get_prandom_u32];"
|
||||
"r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_const(max_bpf_stack, MAX_BPF_STACK),
|
||||
__imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
|
||||
__imm(bpf_get_smp_processor_id),
|
||||
__imm(bpf_get_prandom_u32)
|
||||
: __clobber_all
|
||||
);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <bpf/btf.h>
|
||||
|
||||
#include "autoconf_helper.h"
|
||||
#include "disasm_helpers.h"
|
||||
#include "unpriv_helpers.h"
|
||||
#include "cap_helpers.h"
|
||||
|
||||
@@ -19,10 +20,12 @@
|
||||
#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
|
||||
#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
|
||||
#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex="
|
||||
#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated="
|
||||
#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
|
||||
#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
|
||||
#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
|
||||
#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv="
|
||||
#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv="
|
||||
#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
|
||||
#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
|
||||
#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
|
||||
@@ -31,6 +34,7 @@
|
||||
#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
|
||||
#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
|
||||
#define TEST_BTF_PATH "comment:test_btf_path="
|
||||
#define TEST_TAG_ARCH "comment:test_arch="
|
||||
|
||||
/* Warning: duplicated in bpf_misc.h */
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
@@ -55,11 +59,16 @@ struct expect_msg {
|
||||
regex_t regex;
|
||||
};
|
||||
|
||||
struct expected_msgs {
|
||||
struct expect_msg *patterns;
|
||||
size_t cnt;
|
||||
};
|
||||
|
||||
struct test_subspec {
|
||||
char *name;
|
||||
bool expect_failure;
|
||||
struct expect_msg *expect_msgs;
|
||||
size_t expect_msg_cnt;
|
||||
struct expected_msgs expect_msgs;
|
||||
struct expected_msgs expect_xlated;
|
||||
int retval;
|
||||
bool execute;
|
||||
};
|
||||
@@ -72,6 +81,7 @@ struct test_spec {
|
||||
int log_level;
|
||||
int prog_flags;
|
||||
int mode_mask;
|
||||
int arch_mask;
|
||||
bool auxiliary;
|
||||
bool valid;
|
||||
};
|
||||
@@ -96,44 +106,47 @@ void test_loader_fini(struct test_loader *tester)
|
||||
free(tester->log_buf);
|
||||
}
|
||||
|
||||
static void free_test_spec(struct test_spec *spec)
|
||||
static void free_msgs(struct expected_msgs *msgs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < msgs->cnt; i++)
|
||||
if (msgs->patterns[i].regex_str)
|
||||
regfree(&msgs->patterns[i].regex);
|
||||
free(msgs->patterns);
|
||||
msgs->patterns = NULL;
|
||||
msgs->cnt = 0;
|
||||
}
|
||||
|
||||
static void free_test_spec(struct test_spec *spec)
|
||||
{
|
||||
/* Deallocate expect_msgs arrays. */
|
||||
for (i = 0; i < spec->priv.expect_msg_cnt; i++)
|
||||
if (spec->priv.expect_msgs[i].regex_str)
|
||||
regfree(&spec->priv.expect_msgs[i].regex);
|
||||
for (i = 0; i < spec->unpriv.expect_msg_cnt; i++)
|
||||
if (spec->unpriv.expect_msgs[i].regex_str)
|
||||
regfree(&spec->unpriv.expect_msgs[i].regex);
|
||||
free_msgs(&spec->priv.expect_msgs);
|
||||
free_msgs(&spec->unpriv.expect_msgs);
|
||||
free_msgs(&spec->priv.expect_xlated);
|
||||
free_msgs(&spec->unpriv.expect_xlated);
|
||||
|
||||
free(spec->priv.name);
|
||||
free(spec->unpriv.name);
|
||||
free(spec->priv.expect_msgs);
|
||||
free(spec->unpriv.expect_msgs);
|
||||
|
||||
spec->priv.name = NULL;
|
||||
spec->unpriv.name = NULL;
|
||||
spec->priv.expect_msgs = NULL;
|
||||
spec->unpriv.expect_msgs = NULL;
|
||||
}
|
||||
|
||||
static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec)
|
||||
static int push_msg(const char *substr, const char *regex_str, struct expected_msgs *msgs)
|
||||
{
|
||||
void *tmp;
|
||||
int regcomp_res;
|
||||
char error_msg[100];
|
||||
struct expect_msg *msg;
|
||||
|
||||
tmp = realloc(subspec->expect_msgs,
|
||||
(1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg));
|
||||
tmp = realloc(msgs->patterns,
|
||||
(1 + msgs->cnt) * sizeof(struct expect_msg));
|
||||
if (!tmp) {
|
||||
ASSERT_FAIL("failed to realloc memory for messages\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
subspec->expect_msgs = tmp;
|
||||
msg = &subspec->expect_msgs[subspec->expect_msg_cnt];
|
||||
msgs->patterns = tmp;
|
||||
msg = &msgs->patterns[msgs->cnt];
|
||||
|
||||
if (substr) {
|
||||
msg->substr = substr;
|
||||
@@ -150,7 +163,7 @@ static int push_msg(const char *substr, const char *regex_str, struct test_subsp
|
||||
}
|
||||
}
|
||||
|
||||
subspec->expect_msg_cnt += 1;
|
||||
msgs->cnt += 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -202,6 +215,12 @@ static void update_flags(int *flags, int flag, bool clear)
|
||||
*flags |= flag;
|
||||
}
|
||||
|
||||
enum arch {
|
||||
ARCH_X86_64 = 0x1,
|
||||
ARCH_ARM64 = 0x2,
|
||||
ARCH_RISCV64 = 0x4,
|
||||
};
|
||||
|
||||
/* Uses btf_decl_tag attributes to describe the expected test
|
||||
* behavior, see bpf_misc.h for detailed description of each attribute
|
||||
* and attribute combinations.
|
||||
@@ -215,6 +234,7 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
bool has_unpriv_result = false;
|
||||
bool has_unpriv_retval = false;
|
||||
int func_id, i, err = 0;
|
||||
u32 arch_mask = 0;
|
||||
struct btf *btf;
|
||||
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
@@ -272,25 +292,37 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
spec->mode_mask |= UNPRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
|
||||
err = push_msg(msg, NULL, &spec->priv);
|
||||
err = push_msg(msg, NULL, &spec->priv.expect_msgs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
|
||||
err = push_msg(msg, NULL, &spec->unpriv);
|
||||
err = push_msg(msg, NULL, &spec->unpriv.expect_msgs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1;
|
||||
err = push_msg(NULL, msg, &spec->priv);
|
||||
err = push_msg(NULL, msg, &spec->priv.expect_msgs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1;
|
||||
err = push_msg(NULL, msg, &spec->unpriv);
|
||||
err = push_msg(NULL, msg, &spec->unpriv.expect_msgs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_XLATED_PFX)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_XLATED_PFX) - 1;
|
||||
err = push_msg(msg, NULL, &spec->priv.expect_xlated);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= PRIV;
|
||||
} else if (str_has_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV)) {
|
||||
msg = s + sizeof(TEST_TAG_EXPECT_XLATED_PFX_UNPRIV) - 1;
|
||||
err = push_msg(msg, NULL, &spec->unpriv.expect_xlated);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
spec->mode_mask |= UNPRIV;
|
||||
@@ -341,11 +373,26 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
goto cleanup;
|
||||
update_flags(&spec->prog_flags, flags, clear);
|
||||
}
|
||||
} else if (str_has_pfx(s, TEST_TAG_ARCH)) {
|
||||
val = s + sizeof(TEST_TAG_ARCH) - 1;
|
||||
if (strcmp(val, "X86_64") == 0) {
|
||||
arch_mask |= ARCH_X86_64;
|
||||
} else if (strcmp(val, "ARM64") == 0) {
|
||||
arch_mask |= ARCH_ARM64;
|
||||
} else if (strcmp(val, "RISCV64") == 0) {
|
||||
arch_mask |= ARCH_RISCV64;
|
||||
} else {
|
||||
PRINT_FAIL("bad arch spec: '%s'", val);
|
||||
err = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
} else if (str_has_pfx(s, TEST_BTF_PATH)) {
|
||||
spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1;
|
||||
}
|
||||
}
|
||||
|
||||
spec->arch_mask = arch_mask;
|
||||
|
||||
if (spec->mode_mask == 0)
|
||||
spec->mode_mask = PRIV;
|
||||
|
||||
@@ -387,11 +434,22 @@ static int parse_test_spec(struct test_loader *tester,
|
||||
spec->unpriv.execute = spec->priv.execute;
|
||||
}
|
||||
|
||||
if (!spec->unpriv.expect_msgs) {
|
||||
for (i = 0; i < spec->priv.expect_msg_cnt; i++) {
|
||||
struct expect_msg *msg = &spec->priv.expect_msgs[i];
|
||||
if (spec->unpriv.expect_msgs.cnt == 0) {
|
||||
for (i = 0; i < spec->priv.expect_msgs.cnt; i++) {
|
||||
struct expect_msg *msg = &spec->priv.expect_msgs.patterns[i];
|
||||
|
||||
err = push_msg(msg->substr, msg->regex_str, &spec->unpriv);
|
||||
err = push_msg(msg->substr, msg->regex_str,
|
||||
&spec->unpriv.expect_msgs);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
if (spec->unpriv.expect_xlated.cnt == 0) {
|
||||
for (i = 0; i < spec->priv.expect_xlated.cnt; i++) {
|
||||
struct expect_msg *msg = &spec->priv.expect_xlated.patterns[i];
|
||||
|
||||
err = push_msg(msg->substr, msg->regex_str,
|
||||
&spec->unpriv.expect_xlated);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
@@ -434,7 +492,6 @@ static void prepare_case(struct test_loader *tester,
|
||||
bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
|
||||
|
||||
tester->log_buf[0] = '\0';
|
||||
tester->next_match_pos = 0;
|
||||
}
|
||||
|
||||
static void emit_verifier_log(const char *log_buf, bool force)
|
||||
@@ -444,39 +501,41 @@ static void emit_verifier_log(const char *log_buf, bool force)
|
||||
fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
|
||||
}
|
||||
|
||||
static void validate_case(struct test_loader *tester,
|
||||
struct test_subspec *subspec,
|
||||
struct bpf_object *obj,
|
||||
struct bpf_program *prog,
|
||||
int load_err)
|
||||
static void emit_xlated(const char *xlated, bool force)
|
||||
{
|
||||
int i, j, err;
|
||||
char *match;
|
||||
regmatch_t reg_match[1];
|
||||
if (!force && env.verbosity == VERBOSE_NONE)
|
||||
return;
|
||||
fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated);
|
||||
}
|
||||
|
||||
for (i = 0; i < subspec->expect_msg_cnt; i++) {
|
||||
struct expect_msg *msg = &subspec->expect_msgs[i];
|
||||
static void validate_msgs(char *log_buf, struct expected_msgs *msgs,
|
||||
void (*emit_fn)(const char *buf, bool force))
|
||||
{
|
||||
regmatch_t reg_match[1];
|
||||
const char *log = log_buf;
|
||||
int i, j, err;
|
||||
|
||||
for (i = 0; i < msgs->cnt; i++) {
|
||||
struct expect_msg *msg = &msgs->patterns[i];
|
||||
const char *match = NULL;
|
||||
|
||||
if (msg->substr) {
|
||||
match = strstr(tester->log_buf + tester->next_match_pos, msg->substr);
|
||||
match = strstr(log, msg->substr);
|
||||
if (match)
|
||||
tester->next_match_pos = match - tester->log_buf + strlen(msg->substr);
|
||||
log += strlen(msg->substr);
|
||||
} else {
|
||||
err = regexec(&msg->regex,
|
||||
tester->log_buf + tester->next_match_pos, 1, reg_match, 0);
|
||||
err = regexec(&msg->regex, log, 1, reg_match, 0);
|
||||
if (err == 0) {
|
||||
match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so;
|
||||
tester->next_match_pos += reg_match[0].rm_eo;
|
||||
} else {
|
||||
match = NULL;
|
||||
match = log + reg_match[0].rm_so;
|
||||
log += reg_match[0].rm_eo;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ASSERT_OK_PTR(match, "expect_msg")) {
|
||||
if (env.verbosity == VERBOSE_NONE)
|
||||
emit_verifier_log(tester->log_buf, true /*force*/);
|
||||
emit_fn(log_buf, true /*force*/);
|
||||
for (j = 0; j <= i; j++) {
|
||||
msg = &subspec->expect_msgs[j];
|
||||
msg = &msgs->patterns[j];
|
||||
fprintf(stderr, "%s %s: '%s'\n",
|
||||
j < i ? "MATCHED " : "EXPECTED",
|
||||
msg->substr ? "SUBSTR" : " REGEX",
|
||||
@@ -611,6 +670,51 @@ static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subs
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get a disassembly of BPF program after verifier applies all rewrites */
|
||||
static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz)
|
||||
{
|
||||
struct bpf_insn *insn_start = NULL, *insn, *insn_end;
|
||||
__u32 insns_cnt = 0, i;
|
||||
char buf[64];
|
||||
FILE *out = NULL;
|
||||
int err;
|
||||
|
||||
err = get_xlated_program(prog_fd, &insn_start, &insns_cnt);
|
||||
if (!ASSERT_OK(err, "get_xlated_program"))
|
||||
goto out;
|
||||
out = fmemopen(text, text_sz, "w");
|
||||
if (!ASSERT_OK_PTR(out, "open_memstream"))
|
||||
goto out;
|
||||
insn_end = insn_start + insns_cnt;
|
||||
insn = insn_start;
|
||||
while (insn < insn_end) {
|
||||
i = insn - insn_start;
|
||||
insn = disasm_insn(insn, buf, sizeof(buf));
|
||||
fprintf(out, "%d: %s\n", i, buf);
|
||||
}
|
||||
fflush(out);
|
||||
|
||||
out:
|
||||
free(insn_start);
|
||||
if (out)
|
||||
fclose(out);
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool run_on_current_arch(int arch_mask)
|
||||
{
|
||||
if (arch_mask == 0)
|
||||
return true;
|
||||
#if defined(__x86_64__)
|
||||
return arch_mask & ARCH_X86_64;
|
||||
#elif defined(__aarch64__)
|
||||
return arch_mask & ARCH_ARM64;
|
||||
#elif defined(__riscv) && __riscv_xlen == 64
|
||||
return arch_mask & ARCH_RISCV64;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
/* this function is forced noinline and has short generic name to look better
|
||||
* in test_progs output (in case of a failure)
|
||||
*/
|
||||
@@ -635,6 +739,11 @@ void run_subtest(struct test_loader *tester,
|
||||
if (!test__start_subtest(subspec->name))
|
||||
return;
|
||||
|
||||
if (!run_on_current_arch(spec->arch_mask)) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
if (unpriv) {
|
||||
if (!can_execute_unpriv(tester, spec)) {
|
||||
test__skip();
|
||||
@@ -695,9 +804,17 @@ void run_subtest(struct test_loader *tester,
|
||||
goto tobj_cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
emit_verifier_log(tester->log_buf, false /*force*/);
|
||||
validate_case(tester, subspec, tobj, tprog, err);
|
||||
validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log);
|
||||
|
||||
if (subspec->expect_xlated.cnt) {
|
||||
err = get_xlated_program_text(bpf_program__fd(tprog),
|
||||
tester->log_buf, tester->log_buf_sz);
|
||||
if (err)
|
||||
goto tobj_cleanup;
|
||||
emit_xlated(tester->log_buf, false /*force*/);
|
||||
validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated);
|
||||
}
|
||||
|
||||
if (should_do_test_run(spec, subspec)) {
|
||||
/* For some reason test_verifier executes programs
|
||||
|
||||
@@ -447,7 +447,6 @@ typedef int (*pre_execution_cb)(struct bpf_object *obj);
|
||||
struct test_loader {
|
||||
char *log_buf;
|
||||
size_t log_buf_sz;
|
||||
size_t next_match_pos;
|
||||
pre_execution_cb pre_execution_cb;
|
||||
|
||||
struct bpf_object *obj;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <errno.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include "disasm.h"
|
||||
#include "test_progs.h"
|
||||
#include "testing_helpers.h"
|
||||
#include <linux/membarrier.h>
|
||||
|
||||
Reference in New Issue
Block a user