Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix how linked registers track zero extension of subregisters (Daniel
   Borkmann)

 - Fix unsound scalar fork for OR instructions (Daniel Wade)

 - Fix exception exit lock check for subprogs (Ihor Solodrai)

 - Fix undefined behavior in interpreter for SDIV/SMOD instructions
   (Jenny Guanni Qu)

 - Release module's BTF when module is unloaded (Kumar Kartikeya
   Dwivedi)

 - Fix constant blinding for PROBE_MEM32 instructions (Sachin Kumar)

 - Reset register ID for END instructions to prevent incorrect value
   tracking (Yazhou Tang)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Add a test cases for sync_linked_regs regarding zext propagation
  bpf: Fix sync_linked_regs regarding BPF_ADD_CONST32 zext propagation
  selftests/bpf: Add tests for maybe_fork_scalars() OR vs AND handling
  bpf: Fix unsound scalar forking in maybe_fork_scalars() for BPF_OR
  selftests/bpf: Add tests for sdiv32/smod32 with INT_MIN dividend
  bpf: Fix undefined behavior in interpreter sdiv/smod for INT_MIN
  selftests/bpf: Add tests for bpf_throw lock leak from subprogs
  bpf: Fix exception exit lock checking for subprogs
  bpf: Release module BTF IDR before module unload
  selftests/bpf: Fix pkg-config call on static builds
  bpf: Fix constant blinding for PROBE_MEM32 stores
  selftests/bpf: Add test for BPF_END register ID reset
  bpf: Reset register ID for BPF_END value tracking
This commit is contained in:
Linus Torvalds
2026-03-22 11:16:06 -07:00
9 changed files with 416 additions and 24 deletions

View File

@@ -1787,7 +1787,16 @@ static void btf_free_id(struct btf *btf)
* of the _bh() version.
*/
spin_lock_irqsave(&btf_idr_lock, flags);
idr_remove(&btf_idr, btf->id);
if (btf->id) {
idr_remove(&btf_idr, btf->id);
/*
* Clear the id here to make this function idempotent, since it will get
* called a couple of times for module BTFs: on module unload, and then
* the final btf_put(). btf_alloc_id() starts IDs with 1, so we can use
* 0 as sentinel value.
*/
WRITE_ONCE(btf->id, 0);
}
spin_unlock_irqrestore(&btf_idr_lock, flags);
}
@@ -8115,7 +8124,7 @@ static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct btf *btf = filp->private_data;
seq_printf(m, "btf_id:\t%u\n", btf->id);
seq_printf(m, "btf_id:\t%u\n", READ_ONCE(btf->id));
}
#endif
@@ -8197,7 +8206,7 @@ int btf_get_info_by_fd(const struct btf *btf,
if (copy_from_user(&info, uinfo, info_copy))
return -EFAULT;
info.id = btf->id;
info.id = READ_ONCE(btf->id);
ubtf = u64_to_user_ptr(info.btf);
btf_copy = min_t(u32, btf->data_size, info.btf_size);
if (copy_to_user(ubtf, btf->data, btf_copy))
@@ -8260,7 +8269,7 @@ int btf_get_fd_by_id(u32 id)
u32 btf_obj_id(const struct btf *btf)
{
return btf->id;
return READ_ONCE(btf->id);
}
bool btf_is_kernel(const struct btf *btf)
@@ -8382,6 +8391,13 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
if (btf_mod->module != module)
continue;
/*
* For modules, we do the freeing of BTF IDR as soon as
* module goes away to disable BTF discovery, since the
* btf_try_get_module() on such BTFs will fail. This may
* be called again on btf_put(), but it's ok to do so.
*/
btf_free_id(btf_mod->btf);
list_del(&btf_mod->list);
if (btf_mod->sysfs_attr)
sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);

View File

@@ -1422,6 +1422,27 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
break;
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^
from->imm);
*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
/*
* Cannot use BPF_STX_MEM() macro here as it
* hardcodes BPF_MEM mode, losing PROBE_MEM32
* and breaking arena addressing in the JIT.
*/
*to++ = (struct bpf_insn) {
.code = BPF_STX | BPF_PROBE_MEM32 |
BPF_SIZE(from->code),
.dst_reg = from->dst_reg,
.src_reg = BPF_REG_AX,
.off = from->off,
};
break;
}
out:
return to - to_buff;
@@ -1736,6 +1757,12 @@ bool bpf_opcode_in_insntable(u8 code)
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
/* Absolute value of s32 without undefined behavior for S32_MIN */
static u32 abs_s32(s32 x)
{
return x >= 0 ? (u32)x : -(u32)x;
}
/**
* ___bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
@@ -1900,8 +1927,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
DST = do_div(AX, (u32) SRC);
break;
case 1:
AX = abs((s32)DST);
AX = do_div(AX, abs((s32)SRC));
AX = abs_s32((s32)DST);
AX = do_div(AX, abs_s32((s32)SRC));
if ((s32)DST < 0)
DST = (u32)-AX;
else
@@ -1928,8 +1955,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
DST = do_div(AX, (u32) IMM);
break;
case 1:
AX = abs((s32)DST);
AX = do_div(AX, abs((s32)IMM));
AX = abs_s32((s32)DST);
AX = do_div(AX, abs_s32((s32)IMM));
if ((s32)DST < 0)
DST = (u32)-AX;
else
@@ -1955,8 +1982,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
DST = (u32) AX;
break;
case 1:
AX = abs((s32)DST);
do_div(AX, abs((s32)SRC));
AX = abs_s32((s32)DST);
do_div(AX, abs_s32((s32)SRC));
if (((s32)DST < 0) == ((s32)SRC < 0))
DST = (u32)AX;
else
@@ -1982,8 +2009,8 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
DST = (u32) AX;
break;
case 1:
AX = abs((s32)DST);
do_div(AX, abs((s32)IMM));
AX = abs_s32((s32)DST);
do_div(AX, abs_s32((s32)IMM));
if (((s32)DST < 0) == ((s32)IMM < 0))
DST = (u32)AX;
else

View File

@@ -15910,6 +15910,13 @@ static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *ins
/* Apply bswap if alu64 or switch between big-endian and little-endian machines */
bool need_bswap = alu64 || (to_le == is_big_endian);
/*
* If the register is mutated, manually reset its scalar ID to break
* any existing ties and avoid incorrect bounds propagation.
*/
if (need_bswap || insn->imm == 16 || insn->imm == 32)
dst_reg->id = 0;
if (need_bswap) {
if (insn->imm == 16)
dst_reg->var_off = tnum_bswap16(dst_reg->var_off);
@@ -15992,7 +15999,7 @@ static int maybe_fork_scalars(struct bpf_verifier_env *env, struct bpf_insn *ins
else
return 0;
branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
branch = push_stack(env, env->insn_idx, env->insn_idx, false);
if (IS_ERR(branch))
return PTR_ERR(branch);
@@ -17408,6 +17415,12 @@ static void sync_linked_regs(struct bpf_verifier_env *env, struct bpf_verifier_s
continue;
if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
continue;
/*
* Skip mixed 32/64-bit links: the delta relationship doesn't
* hold across different ALU widths.
*/
if (((reg->id ^ known_reg->id) & BPF_ADD_CONST) == BPF_ADD_CONST)
continue;
if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
reg->off == known_reg->off) {
s32 saved_subreg_def = reg->subreg_def;
@@ -17435,7 +17448,7 @@ static void sync_linked_regs(struct bpf_verifier_env *env, struct bpf_verifier_s
scalar32_min_max_add(reg, &fake_reg);
scalar_min_max_add(reg, &fake_reg);
reg->var_off = tnum_add(reg->var_off, fake_reg.var_off);
if (known_reg->id & BPF_ADD_CONST32)
if ((reg->id | known_reg->id) & BPF_ADD_CONST32)
zext_32_to_64(reg);
reg_bounds_sync(reg);
}
@@ -19863,11 +19876,14 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
* Also verify that new value satisfies old value range knowledge.
*/
/* ADD_CONST mismatch: different linking semantics */
if ((rold->id & BPF_ADD_CONST) && !(rcur->id & BPF_ADD_CONST))
return false;
if (rold->id && !(rold->id & BPF_ADD_CONST) && (rcur->id & BPF_ADD_CONST))
/*
* ADD_CONST flags must match exactly: BPF_ADD_CONST32 and
* BPF_ADD_CONST64 have different linking semantics in
* sync_linked_regs() (alu32 zero-extends, alu64 does not),
* so pruning across different flag types is unsafe.
*/
if (rold->id &&
(rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST))
return false;
/* Both have offset linkage: offsets must match */
@@ -20904,7 +20920,8 @@ static int process_bpf_exit_full(struct bpf_verifier_env *env,
* state when it exits.
*/
int err = check_resource_leak(env, exception_exit,
!env->cur_state->curframe,
exception_exit || !env->cur_state->curframe,
exception_exit ? "bpf_throw" :
"BPF_EXIT instruction in main prog");
if (err)
return err;

View File

@@ -409,7 +409,7 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \
CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
HOSTPKG_CONFIG=$(PKG_CONFIG) \
HOSTPKG_CONFIG='$(PKG_CONFIG)' \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by

View File

@@ -8,6 +8,11 @@
#include "bpf_experimental.h"
extern void bpf_rcu_read_lock(void) __ksym;
extern void bpf_rcu_read_unlock(void) __ksym;
extern void bpf_preempt_disable(void) __ksym;
extern void bpf_preempt_enable(void) __ksym;
extern void bpf_local_irq_save(unsigned long *) __ksym;
extern void bpf_local_irq_restore(unsigned long *) __ksym;
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
@@ -131,7 +136,7 @@ int reject_subprog_with_lock(void *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
__failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region")
int reject_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
@@ -147,11 +152,13 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
__failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region")
int reject_subprog_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
return throwing_subprog(ctx);
throwing_subprog(ctx);
bpf_rcu_read_unlock();
return 0;
}
static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
@@ -346,4 +353,47 @@ int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
return 0;
}
__noinline static int always_throws(void)
{
bpf_throw(0);
return 0;
}
__noinline static int rcu_lock_then_throw(void)
{
bpf_rcu_read_lock();
bpf_throw(0);
return 0;
}
SEC("?tc")
__failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region")
int reject_subprog_rcu_lock_throw(void *ctx)
{
rcu_lock_then_throw();
return 0;
}
SEC("?tc")
__failure __msg("bpf_throw cannot be used inside bpf_preempt_disable-ed region")
int reject_subprog_throw_preempt_lock(void *ctx)
{
bpf_preempt_disable();
always_throws();
bpf_preempt_enable();
return 0;
}
SEC("?tc")
__failure __msg("bpf_throw cannot be used inside bpf_local_irq_save-ed region")
int reject_subprog_throw_irq_lock(void *ctx)
{
unsigned long flags;
bpf_local_irq_save(&flags);
always_throws();
bpf_local_irq_restore(&flags);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -2037,4 +2037,98 @@ __naked void signed_unsigned_intersection32_case2(void *ctx)
: __clobber_all);
}
SEC("socket")
__description("maybe_fork_scalars: OR with constant rejects OOB")
__failure __msg("invalid access to map value")
__naked void or_scalar_fork_rejects_oob(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r9 = r0; \
r6 = *(u64*)(r9 + 0); \
r6 s>>= 63; \
r6 |= 8; \
/* r6 is -1 (current) or 8 (pushed) */ \
if r6 s< 0 goto l0_%=; \
/* pushed path: r6 = 8, OOB for value_size=8 */ \
r9 += r6; \
r0 = *(u8*)(r9 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("maybe_fork_scalars: AND with constant still works")
__success __retval(0)
__naked void and_scalar_fork_still_works(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r9 = r0; \
r6 = *(u64*)(r9 + 0); \
r6 s>>= 63; \
r6 &= 4; \
/* \
* r6 is 0 (pushed, 0&4==0) or 4 (current) \
* both within value_size=8 \
*/ \
if r6 s< 0 goto l0_%=; \
r9 += r6; \
r0 = *(u8*)(r9 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
SEC("socket")
__description("maybe_fork_scalars: OR with constant allows in-bounds")
__success __retval(0)
__naked void or_scalar_fork_allows_inbounds(void)
{
asm volatile (" \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_8b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r9 = r0; \
r6 = *(u64*)(r9 + 0); \
r6 s>>= 63; \
r6 |= 4; \
/* \
* r6 is -1 (current) or 4 (pushed) \
* pushed path: r6 = 4, within value_size=8 \
*/ \
if r6 s< 0 goto l0_%=; \
r9 += r6; \
r0 = *(u8*)(r9 + 0); \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_map_lookup_elem),
__imm_addr(map_hash_8b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View File

@@ -91,6 +91,28 @@ BSWAP_RANGE_TEST(le32_range, "le32", 0x3f00, 0x3f0000)
BSWAP_RANGE_TEST(le64_range, "le64", 0x3f00, 0x3f000000000000)
#endif
SEC("socket")
__description("BSWAP, reset reg id")
__failure __msg("math between fp pointer and register with unbounded min value is not allowed")
__naked void bswap_reset_reg_id(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
r1 = r0; \
r0 = be16 r0; \
if r0 != 1 goto l0_%=; \
r2 = r10; \
r2 += -512; \
r2 += r1; \
*(u8 *)(r2 + 0) = 0; \
l0_%=: \
r0 = 0; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
#else
SEC("socket")

View File

@@ -348,6 +348,114 @@ l0_%=: \
: __clobber_all);
}
/*
* Test that sync_linked_regs() checks reg->id (the linked target register)
* for BPF_ADD_CONST32 rather than known_reg->id (the branch register).
*/
SEC("socket")
__success
__naked void scalars_alu32_zext_linked_reg(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
r7 = r6; /* linked: same id as r6 */ \
w7 += 1; /* alu32: r7.id |= BPF_ADD_CONST32 */ \
r8 = 0xFFFFffff ll; \
if r6 < r8 goto l0_%=; \
/* r6 in [0xFFFFFFFF, 0xFFFFFFFF] */ \
/* sync_linked_regs: known_reg=r6, reg=r7 */ \
/* CPU: w7 = (u32)(0xFFFFFFFF + 1) = 0, zext -> r7 = 0 */ \
/* With fix: r7 64-bit = [0, 0] (zext applied) */ \
/* Without fix: r7 64-bit = [0x100000000] (no zext) */ \
r7 >>= 32; \
if r7 == 0 goto l0_%=; \
r0 /= 0; /* unreachable with fix */ \
l0_%=: \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/*
* Test that sync_linked_regs() skips propagation when one register used
* alu32 (BPF_ADD_CONST32) and the other used alu64 (BPF_ADD_CONST64).
* The delta relationship doesn't hold across different ALU widths.
*/
SEC("socket")
__failure __msg("div by zero")
__naked void scalars_alu32_alu64_cross_type(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
r7 = r6; /* linked: same id as r6 */ \
w7 += 1; /* alu32: BPF_ADD_CONST32, delta = 1 */ \
r8 = r6; /* linked: same id as r6 */ \
r8 += 2; /* alu64: BPF_ADD_CONST64, delta = 2 */ \
r9 = 0xFFFFffff ll; \
if r7 < r9 goto l0_%=; \
/* r7 = 0xFFFFFFFF */ \
/* sync: known_reg=r7 (ADD_CONST32), reg=r8 (ADD_CONST64) */ \
/* Without fix: r8 = zext(0xFFFFFFFF + 1) = 0 */ \
/* With fix: r8 stays [2, 0x100000001] (r8 >= 2) */ \
if r8 > 0 goto l1_%=; \
goto l0_%=; \
l1_%=: \
r0 /= 0; /* div by zero */ \
l0_%=: \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
/*
* Test that regsafe() prevents pruning when two paths reach the same program
* point with linked registers carrying different ADD_CONST flags (one
* BPF_ADD_CONST32 from alu32, another BPF_ADD_CONST64 from alu64).
*/
SEC("socket")
__failure __msg("div by zero")
__flag(BPF_F_TEST_STATE_FREQ)
__naked void scalars_alu32_alu64_regsafe_pruning(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \
r7 = r6; /* linked: same id as r6 */ \
/* Get another random value for the path branch */ \
call %[bpf_get_prandom_u32]; \
if r0 > 0 goto l_pathb_%=; \
/* Path A: alu32 */ \
w7 += 1; /* BPF_ADD_CONST32, delta = 1 */\
goto l_merge_%=; \
l_pathb_%=: \
/* Path B: alu64 */ \
r7 += 1; /* BPF_ADD_CONST64, delta = 1 */\
l_merge_%=: \
/* Merge point: regsafe() compares path B against cached path A. */ \
/* Narrow r6 to trigger sync_linked_regs for r7 */ \
r9 = 0xFFFFffff ll; \
if r6 < r9 goto l0_%=; \
/* r6 = 0xFFFFFFFF */ \
/* sync: r7 = 0xFFFFFFFF + 1 = 0x100000000 */ \
/* Path A: zext -> r7 = 0 */ \
/* Path B: no zext -> r7 = 0x100000000 */ \
r7 >>= 32; \
if r7 == 0 goto l0_%=; \
r0 /= 0; /* div by zero on path B */ \
l0_%=: \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__success
void alu32_negative_offset(void)

View File

@@ -1209,6 +1209,64 @@ __naked void smod32_ri_divisor_neg_1(void)
: __clobber_all);
}
SEC("socket")
__description("SDIV32, INT_MIN divided by 2, imm")
__success __success_unpriv __retval(-1073741824)
__naked void sdiv32_int_min_div_2_imm(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 s/= 2; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("socket")
__description("SDIV32, INT_MIN divided by 2, reg")
__success __success_unpriv __retval(-1073741824)
__naked void sdiv32_int_min_div_2_reg(void)
{
asm volatile (" \
w0 = %[int_min]; \
w1 = 2; \
w0 s/= w1; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("socket")
__description("SMOD32, INT_MIN modulo 2, imm")
__success __success_unpriv __retval(0)
__naked void smod32_int_min_mod_2_imm(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 s%%= 2; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
SEC("socket")
__description("SMOD32, INT_MIN modulo -2, imm")
__success __success_unpriv __retval(0)
__naked void smod32_int_min_mod_neg2_imm(void)
{
asm volatile (" \
w0 = %[int_min]; \
w0 s%%= -2; \
exit; \
" :
: __imm_const(int_min, INT_MIN)
: __clobber_all);
}
#else
SEC("socket")