selftests/bpf: move get_ksyms and get_addrs to trace_helpers.c

We need to get all the kernel function that can be traced sometimes, so we
move the get_syms() and get_addrs() in kprobe_multi_test.c to
trace_helpers.c and rename it to bpf_get_ksyms() and bpf_get_addrs().

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Link: https://lore.kernel.org/r/20250904021011.14069-2-dongml2@chinatelecom.cn
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Menglong Dong
2025-09-04 10:10:09 +08:00
committed by Alexei Starovoitov
parent c9110e6f72
commit 8bad31edf5
3 changed files with 220 additions and 217 deletions

View File

@@ -422,220 +422,6 @@ static void test_unique_match(void)
kprobe_multi__destroy(skel);
}
static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
}
static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
{
return strcmp((const char *) key1, (const char *) key2) == 0;
}
static bool is_invalid_entry(char *buf, bool kernel)
{
if (kernel && strchr(buf, '['))
return true;
if (!kernel && !strchr(buf, '['))
return true;
return false;
}
static bool skip_entry(char *name)
{
/*
* We attach to almost all kernel functions and some of them
* will cause 'suspicious RCU usage' when fprobe is attached
* to them. Filter out the current culprits - arch_cpu_idle
* default_idle and rcu_* functions.
*/
if (!strcmp(name, "arch_cpu_idle"))
return true;
if (!strcmp(name, "default_idle"))
return true;
if (!strncmp(name, "rcu_", 4))
return true;
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
return true;
if (!strncmp(name, "__ftrace_invalid_address__",
sizeof("__ftrace_invalid_address__") - 1))
return true;
return false;
}
/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
static int compare_name(const char *name1, const char *name2)
{
const char *res1, *res2;
int len1, len2;
res1 = strstr(name1, ".llvm.");
res2 = strstr(name2, ".llvm.");
len1 = res1 ? res1 - name1 : strlen(name1);
len2 = res2 ? res2 - name2 : strlen(name2);
if (len1 == len2)
return strncmp(name1, name2, len1);
if (len1 < len2)
return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
}
static int load_kallsyms_compare(const void *p1, const void *p2)
{
return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
}
static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
{
return compare_name(p1, p2->name);
}
static int get_syms(char ***symsp, size_t *cntp, bool kernel)
{
size_t cap = 0, cnt = 0;
char *name = NULL, *ksym_name, **syms = NULL;
struct hashmap *map;
struct ksyms *ksyms;
struct ksym *ks;
char buf[256];
FILE *f;
int err = 0;
ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
return -EINVAL;
/*
* The available_filter_functions contains many duplicates,
* but other than that all symbols are usable in kprobe multi
* interface.
* Filtering out duplicates by using hashmap__add, which won't
* add existing entry.
*/
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
if (!f)
return -EINVAL;
map = hashmap__new(symbol_hash, symbol_equal, NULL);
if (IS_ERR(map)) {
err = libbpf_get_error(map);
goto error;
}
while (fgets(buf, sizeof(buf), f)) {
if (is_invalid_entry(buf, kernel))
continue;
free(name);
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
continue;
if (skip_entry(name))
continue;
ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
if (!ks) {
err = -EINVAL;
goto error;
}
ksym_name = ks->name;
err = hashmap__add(map, ksym_name, 0);
if (err == -EEXIST) {
err = 0;
continue;
}
if (err)
goto error;
err = libbpf_ensure_mem((void **) &syms, &cap,
sizeof(*syms), cnt + 1);
if (err)
goto error;
syms[cnt++] = ksym_name;
}
*symsp = syms;
*cntp = cnt;
error:
free(name);
fclose(f);
hashmap__free(map);
if (err)
free(syms);
return err;
}
static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
{
unsigned long *addr, *addrs, *tmp_addrs;
int err = 0, max_cnt, inc_cnt;
char *name = NULL;
size_t cnt = 0;
char buf[256];
FILE *f;
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
if (!f)
return -ENOENT;
/* In my local setup, the number of entries is 50k+ so Let us initially
* allocate space to hold 64k entries. If 64k is not enough, incrementally
* increase 1k each time.
*/
max_cnt = 65536;
inc_cnt = 1024;
addrs = malloc(max_cnt * sizeof(long));
if (addrs == NULL) {
err = -ENOMEM;
goto error;
}
while (fgets(buf, sizeof(buf), f)) {
if (is_invalid_entry(buf, kernel))
continue;
free(name);
if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
continue;
if (skip_entry(name))
continue;
if (cnt == max_cnt) {
max_cnt += inc_cnt;
tmp_addrs = realloc(addrs, max_cnt);
if (!tmp_addrs) {
err = -ENOMEM;
goto error;
}
addrs = tmp_addrs;
}
addrs[cnt++] = (unsigned long)addr;
}
*addrsp = addrs;
*cntp = cnt;
error:
free(name);
fclose(f);
if (err)
free(addrs);
return err;
}
static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
{
long attach_start_ns, attach_end_ns;
@@ -670,7 +456,7 @@ static void test_kprobe_multi_bench_attach(bool kernel)
char **syms = NULL;
size_t cnt = 0;
if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
return;
skel = kprobe_multi_empty__open_and_load();
@@ -696,13 +482,13 @@ static void test_kprobe_multi_bench_attach_addr(bool kernel)
size_t cnt = 0;
int err;
err = get_addrs(&addrs, &cnt, kernel);
err = bpf_get_addrs(&addrs, &cnt, kernel);
if (err == -ENOENT) {
test__skip();
return;
}
if (!ASSERT_OK(err, "get_addrs"))
if (!ASSERT_OK(err, "bpf_get_addrs"))
return;
skel = kprobe_multi_empty__open_and_load();

View File

@@ -17,6 +17,7 @@
#include <linux/limits.h>
#include <libelf.h>
#include <gelf.h>
#include "bpf/hashmap.h"
#include "bpf/libbpf_internal.h"
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
@@ -519,3 +520,216 @@ void read_trace_pipe(void)
{
read_trace_pipe_iter(trace_pipe_cb, NULL, 0);
}
static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
}
static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
{
return strcmp((const char *) key1, (const char *) key2) == 0;
}
static bool is_invalid_entry(char *buf, bool kernel)
{
if (kernel && strchr(buf, '['))
return true;
if (!kernel && !strchr(buf, '['))
return true;
return false;
}
static bool skip_entry(char *name)
{
/*
* We attach to almost all kernel functions and some of them
* will cause 'suspicious RCU usage' when fprobe is attached
* to them. Filter out the current culprits - arch_cpu_idle
* default_idle and rcu_* functions.
*/
if (!strcmp(name, "arch_cpu_idle"))
return true;
if (!strcmp(name, "default_idle"))
return true;
if (!strncmp(name, "rcu_", 4))
return true;
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
return true;
if (!strncmp(name, "__ftrace_invalid_address__",
sizeof("__ftrace_invalid_address__") - 1))
return true;
return false;
}
/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
static int compare_name(const char *name1, const char *name2)
{
const char *res1, *res2;
int len1, len2;
res1 = strstr(name1, ".llvm.");
res2 = strstr(name2, ".llvm.");
len1 = res1 ? res1 - name1 : strlen(name1);
len2 = res2 ? res2 - name2 : strlen(name2);
if (len1 == len2)
return strncmp(name1, name2, len1);
if (len1 < len2)
return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
}
static int load_kallsyms_compare(const void *p1, const void *p2)
{
return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
}
static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
{
return compare_name(p1, p2->name);
}
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
{
size_t cap = 0, cnt = 0;
char *name = NULL, *ksym_name, **syms = NULL;
struct hashmap *map;
struct ksyms *ksyms;
struct ksym *ks;
char buf[256];
FILE *f;
int err = 0;
ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
if (!ksyms)
return -EINVAL;
/*
* The available_filter_functions contains many duplicates,
* but other than that all symbols are usable to trace.
* Filtering out duplicates by using hashmap__add, which won't
* add existing entry.
*/
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
if (!f)
return -EINVAL;
map = hashmap__new(symbol_hash, symbol_equal, NULL);
if (IS_ERR(map)) {
err = libbpf_get_error(map);
goto error;
}
while (fgets(buf, sizeof(buf), f)) {
if (is_invalid_entry(buf, kernel))
continue;
free(name);
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
continue;
if (skip_entry(name))
continue;
ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
if (!ks) {
err = -EINVAL;
goto error;
}
ksym_name = ks->name;
err = hashmap__add(map, ksym_name, 0);
if (err == -EEXIST) {
err = 0;
continue;
}
if (err)
goto error;
err = libbpf_ensure_mem((void **) &syms, &cap,
sizeof(*syms), cnt + 1);
if (err)
goto error;
syms[cnt++] = ksym_name;
}
*symsp = syms;
*cntp = cnt;
error:
free(name);
fclose(f);
hashmap__free(map);
if (err)
free(syms);
return err;
}
int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
{
unsigned long *addr, *addrs, *tmp_addrs;
int err = 0, max_cnt, inc_cnt;
char *name = NULL;
size_t cnt = 0;
char buf[256];
FILE *f;
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
else
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
if (!f)
return -ENOENT;
/* In my local setup, the number of entries is 50k+ so Let us initially
* allocate space to hold 64k entries. If 64k is not enough, incrementally
* increase 1k each time.
*/
max_cnt = 65536;
inc_cnt = 1024;
addrs = malloc(max_cnt * sizeof(long));
if (addrs == NULL) {
err = -ENOMEM;
goto error;
}
while (fgets(buf, sizeof(buf), f)) {
if (is_invalid_entry(buf, kernel))
continue;
free(name);
if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
continue;
if (skip_entry(name))
continue;
if (cnt == max_cnt) {
max_cnt += inc_cnt;
tmp_addrs = realloc(addrs, max_cnt);
if (!tmp_addrs) {
err = -ENOMEM;
goto error;
}
addrs = tmp_addrs;
}
addrs[cnt++] = (unsigned long)addr;
}
*addrsp = addrs;
*cntp = cnt;
error:
free(name);
fclose(f);
if (err)
free(addrs);
return err;
}

View File

@@ -41,4 +41,7 @@ ssize_t get_rel_offset(uintptr_t addr);
int read_build_id(const char *path, char *build_id, size_t size);
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel);
int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel);
#endif