selftests/bpf: Enable private stack tests for arm64

As arm64 JIT now supports private stack, make sure all relevant tests
run on arm64 architecture.

Relevant tests:

 #415/1   struct_ops_private_stack/private_stack:OK
 #415/2   struct_ops_private_stack/private_stack_fail:OK
 #415/3   struct_ops_private_stack/private_stack_recur:OK
 #415     struct_ops_private_stack:OK
 #549/1   verifier_private_stack/Private stack, single prog:OK
 #549/2   verifier_private_stack/Private stack, subtree > MAX_BPF_STACK:OK
 #549/3   verifier_private_stack/No private stack:OK
 #549/4   verifier_private_stack/Private stack, callback:OK
 #549/5   verifier_private_stack/Private stack, exception in mainprog:OK
 #549/6   verifier_private_stack/Private stack, exception in subprog:OK
 #549/7   verifier_private_stack/Private stack, async callback, not nested:OK
 #549/8   verifier_private_stack/Private stack, async callback, potential nesting:OK
 #549     verifier_private_stack:OK
 Summary: 2/11 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20250724120257.7299-4-puranjay@kernel.org
This commit is contained in:
Puranjay Mohan
2025-07-24 12:02:55 +00:00
committed by Daniel Borkmann
parent 6c17a882d3
commit e9f545d0d3
4 changed files with 91 additions and 4 deletions

View File

@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
#if defined(__TARGET_ARCH_x86)
#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;

View File

@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
#if defined(__TARGET_ARCH_x86)
#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;

View File

@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL";
#if defined(__TARGET_ARCH_x86)
#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
bool skip __attribute((__section__(".data"))) = false;
#else
bool skip = true;

View File

@@ -8,7 +8,7 @@
/* From include/linux/filter.h */
#define MAX_BPF_STACK 512
#if defined(__TARGET_ARCH_x86)
#if defined(__TARGET_ARCH_x86) || defined(__TARGET_ARCH_arm64)
struct elem {
struct bpf_timer t;
@@ -30,6 +30,18 @@ __jited(" movabsq $0x{{.*}}, %r9")
__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x100(%r9)")
__arch_arm64
__jited(" stp x25, x27, [sp, {{.*}}]!")
__jited(" mov x27, {{.*}}")
__jited(" movk x27, {{.*}}, lsl #16")
__jited(" movk x27, {{.*}}")
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited("...")
__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_single_prog(void)
{
asm volatile (" \
@@ -45,6 +57,9 @@ __description("No private stack")
__success
__arch_x86_64
__jited(" subq $0x8, %rsp")
__arch_arm64
__jited(" mov x25, sp")
__jited(" sub sp, sp, #0x10")
__naked void no_private_stack_nested(void)
{
asm volatile (" \
@@ -81,6 +96,19 @@ __jited(" pushq %r9")
__jited(" callq 0x{{.*}}")
__jited(" popq %r9")
__jited(" xorl %eax, %eax")
__arch_arm64
__jited(" stp x25, x27, [sp, {{.*}}]!")
__jited(" mov x27, {{.*}}")
__jited(" movk x27, {{.*}}, lsl #16")
__jited(" movk x27, {{.*}}")
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" bl {{.*}}")
__jited("...")
__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_nested_1(void)
{
asm volatile (" \
@@ -131,6 +159,24 @@ __jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
__arch_arm64
__jited("func #1")
__jited("...")
__jited(" stp x25, x27, [sp, {{.*}}]!")
__jited(" mov x27, {{.*}}")
__jited(" movk x27, {{.*}}, lsl #16")
__jited(" movk x27, {{.*}}")
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" bl 0x{{.*}}")
__jited(" add x7, x0, #0x0")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" bl 0x{{.*}}")
__jited(" add x7, x0, #0x0")
__jited(" mov x7, #0x0")
__jited(" ldp x25, x27, [sp], {{.*}}")
__naked void private_stack_callback(void)
{
asm volatile (" \
@@ -154,6 +200,28 @@ __arch_x86_64
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
__arch_arm64
__jited(" stp x29, x30, [sp, #-0x10]!")
__jited(" mov x29, sp")
__jited(" stp xzr, x26, [sp, #-0x10]!")
__jited(" mov x26, sp")
__jited(" stp x19, x20, [sp, #-0x10]!")
__jited(" stp x21, x22, [sp, #-0x10]!")
__jited(" stp x23, x24, [sp, #-0x10]!")
__jited(" stp x25, x26, [sp, #-0x10]!")
__jited(" stp x27, x28, [sp, #-0x10]!")
__jited(" mov x27, {{.*}}")
__jited(" movk x27, {{.*}}, lsl #16")
__jited(" movk x27, {{.*}}")
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" mov x0, #0x0")
__jited(" bl 0x{{.*}}")
__jited(" add x7, x0, #0x0")
__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_main_prog(void)
{
asm volatile (" \
@@ -179,6 +247,19 @@ __jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
__arch_arm64
__jited(" stp x27, x28, [sp, #-0x10]!")
__jited(" mov x27, {{.*}}")
__jited(" movk x27, {{.*}}, lsl #16")
__jited(" movk x27, {{.*}}")
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
__jited(" mov x0, #0x2a")
__jited(" str x0, [x27]")
__jited(" bl 0x{{.*}}")
__jited(" add x7, x0, #0x0")
__jited(" ldp x27, x28, [sp], #0x10")
int private_stack_exception_sub_prog(void)
{
asm volatile (" \
@@ -220,6 +301,10 @@ __description("Private stack, async callback, not nested")
__success __retval(0)
__arch_x86_64
__jited(" movabsq $0x{{.*}}, %r9")
__arch_arm64
__jited(" mrs x10, TPIDR_EL{{[0-1]}}")
__jited(" add x27, x27, x10")
__jited(" add x25, x27, {{.*}}")
int private_stack_async_callback_1(void)
{
struct bpf_timer *arr_timer;
@@ -241,6 +326,8 @@ __description("Private stack, async callback, potential nesting")
__success __retval(0)
__arch_x86_64
__jited(" subq $0x100, %rsp")
__arch_arm64
__jited(" sub sp, sp, #0x100")
int private_stack_async_callback_2(void)
{
struct bpf_timer *arr_timer;