mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-05 12:16:29 -04:00
Merge branch 'bpf-arm64-add-support-for-bpf-arena'
Puranjay Mohan says: ==================== bpf,arm64: Add support for BPF Arena Changes in V4 V3: https://lore.kernel.org/bpf/20240323103057.26499-1-puranjay12@gmail.com/ - Use more descriptive variable names. - Use insn_is_cast_user() helper. Changes in V3 V2: https://lore.kernel.org/bpf/20240321153102.103832-1-puranjay12@gmail.com/ - Optimize bpf_addr_space_cast as suggested by Xu Kuohai Changes in V2 V1: https://lore.kernel.org/bpf/20240314150003.123020-1-puranjay12@gmail.com/ - Fix build warnings by using 5 in place of 32 as DONT_CLEAR marker. R5 is not mapped to any BPF register so it can safely be used here. This series adds the support for PROBE_MEM32 and bpf_addr_space_cast instructions to the ARM64 BPF JIT. These two instructions allow the enablement of BPF Arena. All arena related selftests are passing. [root@ip-172-31-6-62 bpf]# ./test_progs -a "*arena*" #3/1 arena_htab/arena_htab_llvm:OK #3/2 arena_htab/arena_htab_asm:OK #3 arena_htab:OK #4/1 arena_list/arena_list_1:OK #4/2 arena_list/arena_list_1000:OK #4 arena_list:OK #434/1 verifier_arena/basic_alloc1:OK #434/2 verifier_arena/basic_alloc2:OK #434/3 verifier_arena/basic_alloc3:OK #434/4 verifier_arena/iter_maps1:OK #434/5 verifier_arena/iter_maps2:OK #434/6 verifier_arena/iter_maps3:OK #434 verifier_arena:OK Summary: 3/10 PASSED, 0 SKIPPED, 0 FAILED This will need the patch [1] that introduced insn_is_cast_user() helper to build. The verifier_arena selftest could fail in the CI because the following commit[2] is missing from bpf-next: [1] https://lore.kernel.org/bpf/20240324183226.29674-1-puranjay12@gmail.com/ [2] https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/commit/?id=fa3550dca8f02ec312727653a94115ef3ab68445 Here is a CI run with all dependencies added: https://github.com/kernel-patches/bpf/pull/6641 ==================== Link: https://lore.kernel.org/r/20240325150716.4387-1-puranjay12@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -29,6 +29,7 @@
|
||||
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
|
||||
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
|
||||
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
|
||||
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
|
||||
|
||||
#define check_imm(bits, imm) do { \
|
||||
if ((((imm) > 0) && ((imm) >> (bits))) || \
|
||||
@@ -67,6 +68,8 @@ static const int bpf2a64[] = {
|
||||
/* temporary register for blinding constants */
|
||||
[BPF_REG_AX] = A64_R(9),
|
||||
[FP_BOTTOM] = A64_R(27),
|
||||
/* callee saved register for kern_vm_start address */
|
||||
[ARENA_VM_START] = A64_R(28),
|
||||
};
|
||||
|
||||
struct jit_ctx {
|
||||
@@ -79,6 +82,7 @@ struct jit_ctx {
|
||||
__le32 *ro_image;
|
||||
u32 stack_size;
|
||||
int fpb_offset;
|
||||
u64 user_vm_start;
|
||||
};
|
||||
|
||||
struct bpf_plt {
|
||||
@@ -295,7 +299,7 @@ static bool is_lsi_offset(int offset, int scale)
|
||||
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
|
||||
|
||||
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
||||
bool is_exception_cb)
|
||||
bool is_exception_cb, u64 arena_vm_start)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
const bool is_main_prog = !bpf_is_subprog(prog);
|
||||
@@ -306,6 +310,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 tcc = bpf2a64[TCALL_CNT];
|
||||
const u8 fpb = bpf2a64[FP_BOTTOM];
|
||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||
const int idx0 = ctx->idx;
|
||||
int cur_offset;
|
||||
|
||||
@@ -411,6 +416,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
||||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
|
||||
|
||||
if (arena_vm_start)
|
||||
emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -738,6 +747,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
|
||||
|
||||
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
|
||||
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
|
||||
#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
|
||||
|
||||
bool ex_handler_bpf(const struct exception_table_entry *ex,
|
||||
struct pt_regs *regs)
|
||||
@@ -745,7 +755,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
|
||||
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
|
||||
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
|
||||
|
||||
regs->regs[dst_reg] = 0;
|
||||
if (dst_reg != DONT_CLEAR)
|
||||
regs->regs[dst_reg] = 0;
|
||||
regs->pc = (unsigned long)&ex->fixup - offset;
|
||||
return true;
|
||||
}
|
||||
@@ -765,7 +776,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
||||
return 0;
|
||||
|
||||
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEM32)
|
||||
return 0;
|
||||
|
||||
if (!ctx->prog->aux->extable ||
|
||||
@@ -810,6 +822,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
||||
|
||||
ex->insn = ins_offset;
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_LDX)
|
||||
dst_reg = DONT_CLEAR;
|
||||
|
||||
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
|
||||
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
|
||||
|
||||
@@ -829,12 +844,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
bool extra_pass)
|
||||
{
|
||||
const u8 code = insn->code;
|
||||
const u8 dst = bpf2a64[insn->dst_reg];
|
||||
const u8 src = bpf2a64[insn->src_reg];
|
||||
u8 dst = bpf2a64[insn->dst_reg];
|
||||
u8 src = bpf2a64[insn->src_reg];
|
||||
const u8 tmp = bpf2a64[TMP_REG_1];
|
||||
const u8 tmp2 = bpf2a64[TMP_REG_2];
|
||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 fpb = bpf2a64[FP_BOTTOM];
|
||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||
const s16 off = insn->off;
|
||||
const s32 imm = insn->imm;
|
||||
const int i = insn - ctx->prog->insnsi;
|
||||
@@ -853,6 +869,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
/* dst = src */
|
||||
case BPF_ALU | BPF_MOV | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOV | BPF_X:
|
||||
if (insn_is_cast_user(insn)) {
|
||||
emit(A64_MOV(0, tmp, src), ctx); // 32-bit mov clears the upper 32 bits
|
||||
emit_a64_mov_i(0, dst, ctx->user_vm_start >> 32, ctx);
|
||||
emit(A64_LSL(1, dst, dst, 32), ctx);
|
||||
emit(A64_CBZ(1, tmp, 2), ctx);
|
||||
emit(A64_ORR(1, tmp, dst, tmp), ctx);
|
||||
emit(A64_MOV(1, dst, tmp), ctx);
|
||||
break;
|
||||
}
|
||||
switch (insn->off) {
|
||||
case 0:
|
||||
emit(A64_MOV(is64, dst, src), ctx);
|
||||
@@ -1237,7 +1262,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
||||
if (ctx->fpb_offset > 0 && src == fp) {
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
|
||||
src = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
src_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
@@ -1322,7 +1355,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
case BPF_ST | BPF_MEM | BPF_DW:
|
||||
if (ctx->fpb_offset > 0 && dst == fp) {
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
||||
dst = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
dst_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
@@ -1365,6 +1406,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ret = add_exception_handler(insn, ctx, dst);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
/* STX: *(size *)(dst + off) = src */
|
||||
@@ -1372,7 +1417,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
case BPF_STX | BPF_MEM | BPF_H:
|
||||
case BPF_STX | BPF_MEM | BPF_B:
|
||||
case BPF_STX | BPF_MEM | BPF_DW:
|
||||
if (ctx->fpb_offset > 0 && dst == fp) {
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
||||
dst = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
dst_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
@@ -1413,6 +1466,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ret = add_exception_handler(insn, ctx, dst);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
@@ -1594,6 +1651,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct jit_ctx ctx;
|
||||
u64 arena_vm_start;
|
||||
u8 *image_ptr;
|
||||
u8 *ro_image_ptr;
|
||||
|
||||
@@ -1611,6 +1669,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
prog = tmp;
|
||||
}
|
||||
|
||||
arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
|
||||
@@ -1641,6 +1700,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
}
|
||||
|
||||
ctx.fpb_offset = find_fpb_offset(prog);
|
||||
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
|
||||
|
||||
/*
|
||||
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
|
||||
@@ -1648,7 +1708,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
* BPF line info needs ctx->offset[i] to be the offset of
|
||||
* instruction[i] in jited image, so build prologue first.
|
||||
*/
|
||||
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
|
||||
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
|
||||
arena_vm_start)) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
}
|
||||
@@ -1696,7 +1757,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
ctx.idx = 0;
|
||||
ctx.exentry_idx = 0;
|
||||
|
||||
build_prologue(&ctx, was_classic, prog->aux->exception_cb);
|
||||
build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
|
||||
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
@@ -2461,6 +2522,11 @@ bool bpf_jit_supports_exceptions(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_arena(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void bpf_jit_free(struct bpf_prog *prog)
|
||||
{
|
||||
if (prog->jited) {
|
||||
|
||||
@@ -10,5 +10,3 @@ fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_mu
|
||||
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
|
||||
verifier_arena # JIT does not support arena
|
||||
arena_htab # JIT does not support arena
|
||||
|
||||
Reference in New Issue
Block a user