mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-04 12:16:51 -04:00
Merge branch 'use-overflow-h-helpers-to-check-for-overflows'
Shung-Hsi Yu says: ==================== Use overflow.h helpers to check for overflows This patch set refactors kernel/bpf/verifier.c to use type-agnostic, generic overflow-check helpers defined in include/linux/overflow.h to check for addition and subtraction overflow, and drop the signed_*_overflows() helpers we currently have in kernel/bpf/verifier.c; with a fix for overflow check in adjust_jmp_off() in patch 1. There should be no functional change in how the verifier works and the main motivation is to make future refactoring[1] easier. While check_mul_overflow() also exists and could potentially replace what we have in scalar*_min_max_mul(), it does not help with refactoring and would either change how the verifier works (e.g. lifting restriction on umax<=U32_MAX and u32_max<=U16_MAX) or make the code slightly harder to read, so it is left for future endeavour. Changes from v2 <https://lore.kernel.org/r/20240701055907.82481-1-shung-hsi.yu@suse.com> - add fix for5337ac4c9b("bpf: Fix the corner case with may_goto and jump to the 1st insn.") to correct the overflow check for general jump instructions - adapt to changes in commit5337ac4c9b("bpf: Fix the corner case with may_goto and jump to the 1st insn.") - refactor in adjust_jmp_off() as well and remove signed_add16_overflow() Changes from v1 <https://lore.kernel.org/r/20240623070324.12634-1-shung-hsi.yu@suse.com>: - use pointers to values in dst_reg directly as the sum/diff pointer and remove the else branch (Jiri) - change local variables to be dst_reg pointers instead of src_reg values - include comparison of generated assembly before & after the change (Alexei) 1: https://github.com/kernel-patches/bpf/pull/7205/commits ==================== Link: https://lore.kernel.org/r/20240712080127.136608-1-shung-hsi.yu@suse.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -12726,56 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool signed_add_overflows(s64 a, s64 b)
|
||||
{
|
||||
/* Do the add in u64, where overflow is well-defined */
|
||||
s64 res = (s64)((u64)a + (u64)b);
|
||||
|
||||
if (b < 0)
|
||||
return res > a;
|
||||
return res < a;
|
||||
}
|
||||
|
||||
static bool signed_add32_overflows(s32 a, s32 b)
|
||||
{
|
||||
/* Do the add in u32, where overflow is well-defined */
|
||||
s32 res = (s32)((u32)a + (u32)b);
|
||||
|
||||
if (b < 0)
|
||||
return res > a;
|
||||
return res < a;
|
||||
}
|
||||
|
||||
static bool signed_add16_overflows(s16 a, s16 b)
|
||||
{
|
||||
/* Do the add in u16, where overflow is well-defined */
|
||||
s16 res = (s16)((u16)a + (u16)b);
|
||||
|
||||
if (b < 0)
|
||||
return res > a;
|
||||
return res < a;
|
||||
}
|
||||
|
||||
static bool signed_sub_overflows(s64 a, s64 b)
|
||||
{
|
||||
/* Do the sub in u64, where overflow is well-defined */
|
||||
s64 res = (s64)((u64)a - (u64)b);
|
||||
|
||||
if (b < 0)
|
||||
return res < a;
|
||||
return res > a;
|
||||
}
|
||||
|
||||
static bool signed_sub32_overflows(s32 a, s32 b)
|
||||
{
|
||||
/* Do the sub in u32, where overflow is well-defined */
|
||||
s32 res = (s32)((u32)a - (u32)b);
|
||||
|
||||
if (b < 0)
|
||||
return res < a;
|
||||
return res > a;
|
||||
}
|
||||
|
||||
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
enum bpf_reg_type type)
|
||||
@@ -13257,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
* added into the variable offset, and we copy the fixed offset
|
||||
* from ptr_reg.
|
||||
*/
|
||||
if (signed_add_overflows(smin_ptr, smin_val) ||
|
||||
signed_add_overflows(smax_ptr, smax_val)) {
|
||||
if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
|
||||
check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
} else {
|
||||
dst_reg->smin_value = smin_ptr + smin_val;
|
||||
dst_reg->smax_value = smax_ptr + smax_val;
|
||||
}
|
||||
if (umin_ptr + umin_val < umin_ptr ||
|
||||
umax_ptr + umax_val < umax_ptr) {
|
||||
if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
|
||||
check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
|
||||
dst_reg->umin_value = 0;
|
||||
dst_reg->umax_value = U64_MAX;
|
||||
} else {
|
||||
dst_reg->umin_value = umin_ptr + umin_val;
|
||||
dst_reg->umax_value = umax_ptr + umax_val;
|
||||
}
|
||||
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
|
||||
dst_reg->off = ptr_reg->off;
|
||||
@@ -13314,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
/* A new variable offset is created. If the subtrahend is known
|
||||
* nonnegative, then any reg->range we had before is still good.
|
||||
*/
|
||||
if (signed_sub_overflows(smin_ptr, smax_val) ||
|
||||
signed_sub_overflows(smax_ptr, smin_val)) {
|
||||
if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
|
||||
check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
|
||||
/* Overflow possible, we know nothing */
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
} else {
|
||||
dst_reg->smin_value = smin_ptr - smax_val;
|
||||
dst_reg->smax_value = smax_ptr - smin_val;
|
||||
}
|
||||
if (umin_ptr < umax_val) {
|
||||
/* Overflow possible, we know nothing */
|
||||
@@ -13374,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
|
||||
struct bpf_reg_state *src_reg)
|
||||
{
|
||||
s32 smin_val = src_reg->s32_min_value;
|
||||
s32 smax_val = src_reg->s32_max_value;
|
||||
u32 umin_val = src_reg->u32_min_value;
|
||||
u32 umax_val = src_reg->u32_max_value;
|
||||
s32 *dst_smin = &dst_reg->s32_min_value;
|
||||
s32 *dst_smax = &dst_reg->s32_max_value;
|
||||
u32 *dst_umin = &dst_reg->u32_min_value;
|
||||
u32 *dst_umax = &dst_reg->u32_max_value;
|
||||
|
||||
if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
|
||||
signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
|
||||
dst_reg->s32_min_value = S32_MIN;
|
||||
dst_reg->s32_max_value = S32_MAX;
|
||||
} else {
|
||||
dst_reg->s32_min_value += smin_val;
|
||||
dst_reg->s32_max_value += smax_val;
|
||||
if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
|
||||
check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
|
||||
*dst_smin = S32_MIN;
|
||||
*dst_smax = S32_MAX;
|
||||
}
|
||||
if (dst_reg->u32_min_value + umin_val < umin_val ||
|
||||
dst_reg->u32_max_value + umax_val < umax_val) {
|
||||
dst_reg->u32_min_value = 0;
|
||||
dst_reg->u32_max_value = U32_MAX;
|
||||
} else {
|
||||
dst_reg->u32_min_value += umin_val;
|
||||
dst_reg->u32_max_value += umax_val;
|
||||
if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
|
||||
check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {
|
||||
*dst_umin = 0;
|
||||
*dst_umax = U32_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
|
||||
struct bpf_reg_state *src_reg)
|
||||
{
|
||||
s64 smin_val = src_reg->smin_value;
|
||||
s64 smax_val = src_reg->smax_value;
|
||||
u64 umin_val = src_reg->umin_value;
|
||||
u64 umax_val = src_reg->umax_value;
|
||||
s64 *dst_smin = &dst_reg->smin_value;
|
||||
s64 *dst_smax = &dst_reg->smax_value;
|
||||
u64 *dst_umin = &dst_reg->umin_value;
|
||||
u64 *dst_umax = &dst_reg->umax_value;
|
||||
|
||||
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
|
||||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
} else {
|
||||
dst_reg->smin_value += smin_val;
|
||||
dst_reg->smax_value += smax_val;
|
||||
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
|
||||
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
|
||||
*dst_smin = S64_MIN;
|
||||
*dst_smax = S64_MAX;
|
||||
}
|
||||
if (dst_reg->umin_value + umin_val < umin_val ||
|
||||
dst_reg->umax_value + umax_val < umax_val) {
|
||||
dst_reg->umin_value = 0;
|
||||
dst_reg->umax_value = U64_MAX;
|
||||
} else {
|
||||
dst_reg->umin_value += umin_val;
|
||||
dst_reg->umax_value += umax_val;
|
||||
if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
|
||||
check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {
|
||||
*dst_umin = 0;
|
||||
*dst_umax = U64_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
|
||||
struct bpf_reg_state *src_reg)
|
||||
{
|
||||
s32 smin_val = src_reg->s32_min_value;
|
||||
s32 smax_val = src_reg->s32_max_value;
|
||||
s32 *dst_smin = &dst_reg->s32_min_value;
|
||||
s32 *dst_smax = &dst_reg->s32_max_value;
|
||||
u32 umin_val = src_reg->u32_min_value;
|
||||
u32 umax_val = src_reg->u32_max_value;
|
||||
|
||||
if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
|
||||
signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
|
||||
if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
|
||||
check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
|
||||
/* Overflow possible, we know nothing */
|
||||
dst_reg->s32_min_value = S32_MIN;
|
||||
dst_reg->s32_max_value = S32_MAX;
|
||||
} else {
|
||||
dst_reg->s32_min_value -= smax_val;
|
||||
dst_reg->s32_max_value -= smin_val;
|
||||
*dst_smin = S32_MIN;
|
||||
*dst_smax = S32_MAX;
|
||||
}
|
||||
if (dst_reg->u32_min_value < umax_val) {
|
||||
/* Overflow possible, we know nothing */
|
||||
@@ -13454,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
|
||||
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
|
||||
struct bpf_reg_state *src_reg)
|
||||
{
|
||||
s64 smin_val = src_reg->smin_value;
|
||||
s64 smax_val = src_reg->smax_value;
|
||||
s64 *dst_smin = &dst_reg->smin_value;
|
||||
s64 *dst_smax = &dst_reg->smax_value;
|
||||
u64 umin_val = src_reg->umin_value;
|
||||
u64 umax_val = src_reg->umax_value;
|
||||
|
||||
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
|
||||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
|
||||
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
|
||||
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
|
||||
/* Overflow possible, we know nothing */
|
||||
dst_reg->smin_value = S64_MIN;
|
||||
dst_reg->smax_value = S64_MAX;
|
||||
} else {
|
||||
dst_reg->smin_value -= smax_val;
|
||||
dst_reg->smax_value -= smin_val;
|
||||
*dst_smin = S64_MIN;
|
||||
*dst_smax = S64_MAX;
|
||||
}
|
||||
if (dst_reg->umin_value < umax_val) {
|
||||
/* Overflow possible, we know nothing */
|
||||
@@ -18835,6 +18758,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
|
||||
{
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
u32 insn_cnt = prog->len, i;
|
||||
s32 imm;
|
||||
s16 off;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
u8 code = insn->code;
|
||||
@@ -18846,15 +18771,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
|
||||
if (insn->code == (BPF_JMP32 | BPF_JA)) {
|
||||
if (i + 1 + insn->imm != tgt_idx)
|
||||
continue;
|
||||
if (signed_add32_overflows(insn->imm, delta))
|
||||
if (check_add_overflow(insn->imm, delta, &imm))
|
||||
return -ERANGE;
|
||||
insn->imm += delta;
|
||||
insn->imm = imm;
|
||||
} else {
|
||||
if (i + 1 + insn->off != tgt_idx)
|
||||
continue;
|
||||
if (signed_add16_overflows(insn->imm, delta))
|
||||
if (check_add_overflow(insn->off, delta, &off))
|
||||
return -ERANGE;
|
||||
insn->off += delta;
|
||||
insn->off = off;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user