mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-21 21:04:26 -05:00
Use instruction (jump) history to record instructions that performed register spill/fill to/from stack, regardless if this was done through read-only r10 register, or any other register after copying r10 into it *and* potentially adjusting offset. To make this work reliably, we push extra per-instruction flags into instruction history, encoding stack slot index (spi) and stack frame number in extra 10 bit flags we take away from prev_idx in instruction history. We don't touch idx field for maximum performance, as it's checked most frequently during backtracking. This change removes basically the last remaining practical limitation of precision backtracking logic in BPF verifier. It fixes known deficiencies, but also opens up new opportunities to reduce number of verified states, explored in the subsequent patches. There are only three differences in selftests' BPF object files according to veristat, all in the positive direction (less states). File Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF) -------------------------------------- ------------- --------- --------- ------------- ---------- ---------- ------------- test_cls_redirect_dynptr.bpf.linked3.o cls_redirect 2987 2864 -123 (-4.12%) 240 231 -9 (-3.75%) xdp_synproxy_kern.bpf.linked3.o syncookie_tc 82848 82661 -187 (-0.23%) 5107 5073 -34 (-0.67%) xdp_synproxy_kern.bpf.linked3.o syncookie_xdp 85116 84964 -152 (-0.18%) 5162 5130 -32 (-0.62%) Note, I avoided renaming jmp_history to more generic insn_hist to minimize number of lines changed and potential merge conflicts between bpf and bpf-next trees. Notice also cur_hist_entry pointer reset to NULL at the beginning of instruction verification loop. This pointer avoids the problem of relying on last jump history entry's insn_idx to determine whether we already have entry for current instruction or not. It can happen that we added jump history entry because current instruction is_jmp_point(), but also we need to add instruction flags for stack access. In this case, we don't want to entries, so we need to reuse last added entry, if it is present. Relying on insn_idx comparison has the same ambiguity problem as the one that was fixed recently in [0], so we avoid that. [0] https://patchwork.kernel.org/project/netdevbpf/patch/20231110002638.4168352-3-andrii@kernel.org/ Acked-by: Eduard Zingerman <eddyz87@gmail.com> Reported-by: Tao Lyu <tao.lyu@epfl.ch> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20231205184248.1502704-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
265 lines
9.5 KiB
C
265 lines
9.5 KiB
C
{
|
|
"precise: test 1",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_LD_MAP_FD(BPF_REG_6, 0),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
|
|
|
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
|
|
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
.fixup_map_array_48b = { 1 },
|
|
.result = VERBOSE_ACCEPT,
|
|
.errstr =
|
|
"mark_precise: frame0: last_idx 26 first_idx 20\
|
|
mark_precise: frame0: regs=r2 stack= before 25\
|
|
mark_precise: frame0: regs=r2 stack= before 24\
|
|
mark_precise: frame0: regs=r2 stack= before 23\
|
|
mark_precise: frame0: regs=r2 stack= before 22\
|
|
mark_precise: frame0: regs=r2 stack= before 20\
|
|
mark_precise: frame0: parent state regs=r2 stack=:\
|
|
mark_precise: frame0: last_idx 19 first_idx 10\
|
|
mark_precise: frame0: regs=r2,r9 stack= before 19\
|
|
mark_precise: frame0: regs=r9 stack= before 18\
|
|
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
|
mark_precise: frame0: regs=r0,r9 stack= before 15\
|
|
mark_precise: frame0: regs=r0,r9 stack= before 14\
|
|
mark_precise: frame0: regs=r9 stack= before 13\
|
|
mark_precise: frame0: regs=r9 stack= before 12\
|
|
mark_precise: frame0: regs=r9 stack= before 11\
|
|
mark_precise: frame0: regs=r9 stack= before 10\
|
|
mark_precise: frame0: parent state regs= stack=:",
|
|
},
|
|
{
|
|
"precise: test 2",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_LD_MAP_FD(BPF_REG_6, 0),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
|
|
|
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
|
|
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
.fixup_map_array_48b = { 1 },
|
|
.result = VERBOSE_ACCEPT,
|
|
.flags = BPF_F_TEST_STATE_FREQ,
|
|
.errstr =
|
|
"26: (85) call bpf_probe_read_kernel#113\
|
|
mark_precise: frame0: last_idx 26 first_idx 22\
|
|
mark_precise: frame0: regs=r2 stack= before 25\
|
|
mark_precise: frame0: regs=r2 stack= before 24\
|
|
mark_precise: frame0: regs=r2 stack= before 23\
|
|
mark_precise: frame0: regs=r2 stack= before 22\
|
|
mark_precise: frame0: parent state regs=r2 stack=:\
|
|
mark_precise: frame0: last_idx 20 first_idx 20\
|
|
mark_precise: frame0: regs=r2,r9 stack= before 20\
|
|
mark_precise: frame0: parent state regs=r2,r9 stack=:\
|
|
mark_precise: frame0: last_idx 19 first_idx 17\
|
|
mark_precise: frame0: regs=r2,r9 stack= before 19\
|
|
mark_precise: frame0: regs=r9 stack= before 18\
|
|
mark_precise: frame0: regs=r8,r9 stack= before 17\
|
|
mark_precise: frame0: parent state regs= stack=:",
|
|
},
|
|
{
|
|
"precise: cross frame pruning",
|
|
.insns = {
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
BPF_MOV64_IMM(BPF_REG_8, 0),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_MOV64_IMM(BPF_REG_8, 1),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
BPF_MOV64_IMM(BPF_REG_9, 0),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_MOV64_IMM(BPF_REG_9, 1),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
.flags = BPF_F_TEST_STATE_FREQ,
|
|
.errstr = "!read_ok",
|
|
.result = REJECT,
|
|
},
|
|
{
|
|
"precise: ST zero to stack insn is supported",
|
|
.insns = {
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
|
|
/* not a register spill, so we stop precision propagation for R4 here */
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
BPF_MOV64_IMM(BPF_REG_0, -1),
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
.flags = BPF_F_TEST_STATE_FREQ,
|
|
.errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
|
|
mark_precise: frame0: parent state regs=r4 stack=:\
|
|
mark_precise: frame0: last_idx 4 first_idx 2\
|
|
mark_precise: frame0: regs=r4 stack= before 4\
|
|
mark_precise: frame0: regs=r4 stack= before 3\
|
|
mark_precise: frame0: last_idx 5 first_idx 5\
|
|
mark_precise: frame0: parent state regs=r0 stack=:\
|
|
mark_precise: frame0: last_idx 4 first_idx 2\
|
|
mark_precise: frame0: regs=r0 stack= before 4\
|
|
5: R0=-1 R4=0",
|
|
.result = VERBOSE_ACCEPT,
|
|
.retval = -1,
|
|
},
|
|
{
|
|
"precise: STX insn causing spi > allocated_stack",
|
|
.insns = {
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
|
|
/* make later reg spill more interesting by having somewhat known scalar */
|
|
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
|
|
BPF_MOV64_IMM(BPF_REG_0, -1),
|
|
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
.flags = BPF_F_TEST_STATE_FREQ,
|
|
.errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
|
|
mark_precise: frame0: parent state regs=r4 stack=:\
|
|
mark_precise: frame0: last_idx 6 first_idx 4\
|
|
mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
|
|
mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
|
|
mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
|
|
mark_precise: frame0: parent state regs=r0 stack=:\
|
|
mark_precise: frame0: last_idx 3 first_idx 3\
|
|
mark_precise: frame0: regs=r0 stack= before 3: (55) if r3 != 0x7b goto pc+0\
|
|
mark_precise: frame0: regs=r0 stack= before 2: (bf) r3 = r10\
|
|
mark_precise: frame0: regs=r0 stack= before 1: (57) r0 &= 255\
|
|
mark_precise: frame0: parent state regs=r0 stack=:\
|
|
mark_precise: frame0: last_idx 0 first_idx 0\
|
|
mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7\
|
|
mark_precise: frame0: last_idx 7 first_idx 7\
|
|
mark_precise: frame0: parent state regs= stack=:",
|
|
.result = VERBOSE_ACCEPT,
|
|
.retval = -1,
|
|
},
|
|
{
|
|
"precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
|
|
BPF_LD_MAP_FD(BPF_REG_6, 0),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_IMM(BPF_REG_2, 1),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
|
|
BPF_MOV64_IMM(BPF_REG_2, 0x1000),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_ringbuf = { 1 },
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
.flags = BPF_F_TEST_STATE_FREQ | F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
|
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
|
|
.result = REJECT,
|
|
},
|
|
{
|
|
"precise: program doesn't prematurely prune branches",
|
|
.insns = {
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
|
|
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
|
|
BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
|
|
BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
|
|
BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
|
|
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
|
|
BPF_LD_MAP_FD(BPF_REG_4, 0),
|
|
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
|
|
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
|
|
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
|
|
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_array_48b = { 13 },
|
|
.prog_type = BPF_PROG_TYPE_XDP,
|
|
.result = REJECT,
|
|
.errstr = "register with unbounded min value is not allowed",
|
|
},
|