mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-13 13:03:15 -05:00
bpf: use the least significant byte for the nr_args in trampoline
For now, ((u64 *)ctx)[-1] is used to store the nr_args in the trampoline. However, 1 byte is enough to store such information. Therefore, we use only the least significant byte of ((u64 *)ctx)[-1] to store the nr_args, and reserve the rest for other usages. Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn> Link: https://lore.kernel.org/r/20260124062008.8657-3-dongml2@chinatelecom.cn Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
2d419c4465
commit
f1b56b3cbd
@@ -23747,19 +23747,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
|
||||
/* skip 'void *__data' in btf_trace_##name() and save to reg0 */
|
||||
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, nr_args - 1);
|
||||
cnt = 1;
|
||||
} else {
|
||||
/* Load nr_args from ctx - 8 */
|
||||
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
|
||||
insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF);
|
||||
cnt = 2;
|
||||
}
|
||||
insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
|
||||
insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
|
||||
insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
|
||||
insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
|
||||
insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
|
||||
insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
|
||||
insn_buf[7] = BPF_JMP_A(1);
|
||||
insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
|
||||
cnt = 9;
|
||||
insn_buf[cnt++] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
|
||||
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
|
||||
insn_buf[cnt++] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
|
||||
insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
|
||||
insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
|
||||
insn_buf[cnt++] = BPF_MOV64_IMM(BPF_REG_0, 0);
|
||||
insn_buf[cnt++] = BPF_JMP_A(1);
|
||||
insn_buf[cnt++] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
||||
if (!new_prog)
|
||||
@@ -23779,12 +23781,13 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
eatype == BPF_MODIFY_RETURN) {
|
||||
/* Load nr_args from ctx - 8 */
|
||||
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
|
||||
insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
|
||||
insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
|
||||
insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
|
||||
insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
|
||||
insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
|
||||
cnt = 6;
|
||||
insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF);
|
||||
insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
|
||||
insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
|
||||
insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
|
||||
insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
|
||||
insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
|
||||
cnt = 7;
|
||||
} else {
|
||||
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
|
||||
cnt = 1;
|
||||
@@ -23808,15 +23811,19 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
|
||||
/* skip 'void *__data' in btf_trace_##name() and save to reg0 */
|
||||
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, nr_args - 1);
|
||||
cnt = 1;
|
||||
} else {
|
||||
/* Load nr_args from ctx - 8 */
|
||||
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
|
||||
insn_buf[1] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFF);
|
||||
cnt = 2;
|
||||
}
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
goto next_insn;
|
||||
|
||||
@@ -1194,7 +1194,7 @@ const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
|
||||
BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
|
||||
{
|
||||
/* This helper call is inlined by verifier. */
|
||||
u64 nr_args = ((u64 *)ctx)[-1];
|
||||
u64 nr_args = ((u64 *)ctx)[-1] & 0xFF;
|
||||
|
||||
if ((u64) n >= nr_args)
|
||||
return -EINVAL;
|
||||
@@ -1214,7 +1214,7 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
|
||||
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
|
||||
{
|
||||
/* This helper call is inlined by verifier. */
|
||||
u64 nr_args = ((u64 *)ctx)[-1];
|
||||
u64 nr_args = ((u64 *)ctx)[-1] & 0xFF;
|
||||
|
||||
*value = ((u64 *)ctx)[nr_args];
|
||||
return 0;
|
||||
@@ -1231,7 +1231,7 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
|
||||
BPF_CALL_1(get_func_arg_cnt, void *, ctx)
|
||||
{
|
||||
/* This helper call is inlined by verifier. */
|
||||
return ((u64 *)ctx)[-1];
|
||||
return ((u64 *)ctx)[-1] & 0xFF;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
|
||||
|
||||
Reference in New Issue
Block a user