mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-09 21:04:42 -04:00
Merge branch 'bpf-allow-bpf_for_each_map_elem-helper-with-different-input-maps'
Philo Lu says:
====================
bpf: allow bpf_for_each_map_elem() helper with different input maps
Currently, taking different maps within a single bpf_for_each_map_elem
call is not allowed. For example the following codes cannot pass the
verifier (with error "tail_call abusing map_ptr"):
```
static void test_by_pid(int pid)
{
if (pid <= 100)
bpf_for_each_map_elem(&map1, map_elem_cb, NULL, 0);
else
bpf_for_each_map_elem(&map2, map_elem_cb, NULL, 0);
}
```
This is because during bpf_for_each_map_elem verifying,
bpf_insn_aux_data->map_ptr_state is expected as map_ptr (instead of poison
state), which is then needed by set_map_elem_callback_state. However, as
there are two different map ptr input, map_ptr_state is marked as
BPF_MAP_PTR_POISON, and thus the second map_ptr would be lost.
BPF_MAP_PTR_POISON is also needed by bpf_for_each_map_elem to skip
retpoline optimization in do_misc_fixups(). Therefore, map_ptr_state and
map_ptr are both needed for bpf_for_each_map_elem.
This patchset solves it by transform bpf_insn_aux_data->map_ptr_state as a
new struct, storing poison/unpriv state and map pointer together without
additional memory overhead. Then bpf_for_each_map_elem works well with
different input maps. It also makes map_ptr_state logic clearer.
A test case is added to selftest, which would fail to load without this
patchset.
Changelogs
-> v1:
- PATCH 1/3:
- make the commit log clearer
- change poison and unpriv to bool in struct bpf_map_ptr_state, also the
return value in bpf_map_ptr_poisoned() and bpf_map_ptr_unpriv()
- PATCH 2/3:
- change the comments in set_map_elem_callback_state()
- PATCH 3/3:
- remove the "skipping the last element" logic during map updating
- change if() to ASSERT_OK()
Please review, thanks.
====================
Link: https://lore.kernel.org/r/20240405025536.18113-1-lulie@linux.alibaba.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -502,6 +502,13 @@ struct bpf_loop_inline_state {
|
||||
u32 callback_subprogno; /* valid when fit_for_inline is true */
|
||||
};
|
||||
|
||||
/* pointer and state for maps */
|
||||
struct bpf_map_ptr_state {
|
||||
struct bpf_map *map_ptr;
|
||||
bool poison;
|
||||
bool unpriv;
|
||||
};
|
||||
|
||||
/* Possible states for alu_state member. */
|
||||
#define BPF_ALU_SANITIZE_SRC (1U << 0)
|
||||
#define BPF_ALU_SANITIZE_DST (1U << 1)
|
||||
@@ -514,7 +521,7 @@ struct bpf_loop_inline_state {
|
||||
struct bpf_insn_aux_data {
|
||||
union {
|
||||
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
||||
unsigned long map_ptr_state; /* pointer/poison value for maps */
|
||||
struct bpf_map_ptr_state map_ptr_state;
|
||||
s32 call_imm; /* saved imm field of call insn */
|
||||
u32 alu_limit; /* limit for add/sub register with pointer */
|
||||
struct {
|
||||
|
||||
@@ -190,11 +190,6 @@ struct bpf_verifier_stack_elem {
|
||||
#define BPF_MAP_KEY_POISON (1ULL << 63)
|
||||
#define BPF_MAP_KEY_SEEN (1ULL << 62)
|
||||
|
||||
#define BPF_MAP_PTR_UNPRIV 1UL
|
||||
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
|
||||
POISON_POINTER_DELTA))
|
||||
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
|
||||
|
||||
#define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512
|
||||
|
||||
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
|
||||
@@ -209,21 +204,22 @@ static bool is_trusted_reg(const struct bpf_reg_state *reg);
|
||||
|
||||
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
|
||||
return aux->map_ptr_state.poison;
|
||||
}
|
||||
|
||||
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
|
||||
return aux->map_ptr_state.unpriv;
|
||||
}
|
||||
|
||||
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
|
||||
const struct bpf_map *map, bool unpriv)
|
||||
struct bpf_map *map,
|
||||
bool unpriv, bool poison)
|
||||
{
|
||||
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
|
||||
unpriv |= bpf_map_ptr_unpriv(aux);
|
||||
aux->map_ptr_state = (unsigned long)map |
|
||||
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
|
||||
aux->map_ptr_state.unpriv = unpriv;
|
||||
aux->map_ptr_state.poison = poison;
|
||||
aux->map_ptr_state.map_ptr = map;
|
||||
}
|
||||
|
||||
static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
|
||||
@@ -9655,12 +9651,8 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env,
|
||||
struct bpf_map *map;
|
||||
int err;
|
||||
|
||||
if (bpf_map_ptr_poisoned(insn_aux)) {
|
||||
verbose(env, "tail_call abusing map_ptr\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = BPF_MAP_PTR(insn_aux->map_ptr_state);
|
||||
/* valid map_ptr and poison value does not matter */
|
||||
map = insn_aux->map_ptr_state.map_ptr;
|
||||
if (!map->ops->map_set_for_each_callback_args ||
|
||||
!map->ops->map_for_each_callback) {
|
||||
verbose(env, "callback function not allowed for map\n");
|
||||
@@ -10019,12 +10011,12 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!BPF_MAP_PTR(aux->map_ptr_state))
|
||||
if (!aux->map_ptr_state.map_ptr)
|
||||
bpf_map_ptr_store(aux, meta->map_ptr,
|
||||
!meta->map_ptr->bypass_spec_v1);
|
||||
else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
|
||||
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
|
||||
!meta->map_ptr->bypass_spec_v1);
|
||||
!meta->map_ptr->bypass_spec_v1, false);
|
||||
else if (aux->map_ptr_state.map_ptr != meta->map_ptr)
|
||||
bpf_map_ptr_store(aux, meta->map_ptr,
|
||||
!meta->map_ptr->bypass_spec_v1, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -19840,7 +19832,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
!bpf_map_ptr_unpriv(aux)) {
|
||||
struct bpf_jit_poke_descriptor desc = {
|
||||
.reason = BPF_POKE_REASON_TAIL_CALL,
|
||||
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
|
||||
.tail_call.map = aux->map_ptr_state.map_ptr,
|
||||
.tail_call.key = bpf_map_key_immediate(aux),
|
||||
.insn_idx = i + delta,
|
||||
};
|
||||
@@ -19869,7 +19861,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
|
||||
map_ptr = aux->map_ptr_state.map_ptr;
|
||||
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
|
||||
map_ptr->max_entries, 2);
|
||||
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
|
||||
@@ -19977,7 +19969,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
if (bpf_map_ptr_poisoned(aux))
|
||||
goto patch_call_imm;
|
||||
|
||||
map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
|
||||
map_ptr = aux->map_ptr_state.map_ptr;
|
||||
ops = map_ptr->ops;
|
||||
if (insn->imm == BPF_FUNC_map_lookup_elem &&
|
||||
ops->map_gen_lookup) {
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "for_each_hash_map_elem.skel.h"
|
||||
#include "for_each_array_map_elem.skel.h"
|
||||
#include "for_each_map_elem_write_key.skel.h"
|
||||
#include "for_each_multi_maps.skel.h"
|
||||
|
||||
static unsigned int duration;
|
||||
|
||||
@@ -143,6 +144,65 @@ static void test_write_map_key(void)
|
||||
for_each_map_elem_write_key__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_multi_maps(void)
|
||||
{
|
||||
struct for_each_multi_maps *skel;
|
||||
__u64 val, array_total, hash_total;
|
||||
__u32 key, max_entries;
|
||||
int i, err;
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
skel = for_each_multi_maps__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "for_each_multi_maps__open_and_load"))
|
||||
return;
|
||||
|
||||
array_total = 0;
|
||||
max_entries = bpf_map__max_entries(skel->maps.arraymap);
|
||||
for (i = 0; i < max_entries; i++) {
|
||||
key = i;
|
||||
val = i + 1;
|
||||
array_total += val;
|
||||
err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
|
||||
&val, sizeof(val), BPF_ANY);
|
||||
if (!ASSERT_OK(err, "array_map_update"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
hash_total = 0;
|
||||
max_entries = bpf_map__max_entries(skel->maps.hashmap);
|
||||
for (i = 0; i < max_entries; i++) {
|
||||
key = i + 100;
|
||||
val = i + 1;
|
||||
hash_total += val;
|
||||
err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
|
||||
&val, sizeof(val), BPF_ANY);
|
||||
if (!ASSERT_OK(err, "hash_map_update"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
skel->bss->data_output = 0;
|
||||
skel->bss->use_array = 1;
|
||||
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
|
||||
ASSERT_OK(err, "bpf_prog_test_run_opts");
|
||||
ASSERT_OK(topts.retval, "retval");
|
||||
ASSERT_EQ(skel->bss->data_output, array_total, "array output");
|
||||
|
||||
skel->bss->data_output = 0;
|
||||
skel->bss->use_array = 0;
|
||||
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
|
||||
ASSERT_OK(err, "bpf_prog_test_run_opts");
|
||||
ASSERT_OK(topts.retval, "retval");
|
||||
ASSERT_EQ(skel->bss->data_output, hash_total, "hash output");
|
||||
|
||||
out:
|
||||
for_each_multi_maps__destroy(skel);
|
||||
}
|
||||
|
||||
void test_for_each(void)
|
||||
{
|
||||
if (test__start_subtest("hash_map"))
|
||||
@@ -151,4 +211,6 @@ void test_for_each(void)
|
||||
test_array_map();
|
||||
if (test__start_subtest("write_map_key"))
|
||||
test_write_map_key();
|
||||
if (test__start_subtest("multi_maps"))
|
||||
test_multi_maps();
|
||||
}
|
||||
|
||||
49
tools/testing/selftests/bpf/progs/for_each_multi_maps.c
Normal file
49
tools/testing/selftests/bpf/progs/for_each_multi_maps.c
Normal file
@@ -0,0 +1,49 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 3);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} arraymap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 5);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} hashmap SEC(".maps");
|
||||
|
||||
struct callback_ctx {
|
||||
int output;
|
||||
};
|
||||
|
||||
u32 data_output = 0;
|
||||
int use_array = 0;
|
||||
|
||||
static __u64
|
||||
check_map_elem(struct bpf_map *map, __u32 *key, __u64 *val,
|
||||
struct callback_ctx *data)
|
||||
{
|
||||
data->output += *val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int test_pkt_access(struct __sk_buff *skb)
|
||||
{
|
||||
struct callback_ctx data;
|
||||
|
||||
data.output = 0;
|
||||
if (use_array)
|
||||
bpf_for_each_map_elem(&arraymap, check_map_elem, &data, 0);
|
||||
else
|
||||
bpf_for_each_map_elem(&hashmap, check_map_elem, &data, 0);
|
||||
data_output = data.output;
|
||||
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user