mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 07:59:42 -04:00
Merge branch 'selftests-bpf-benchmark-all-symbols-for-kprobe-multi'
Menglong Dong says: ==================== selftests/bpf: benchmark all symbols for kprobe-multi Add the benchmark testcase "kprobe-multi-all", which will hook all the kernel functions during the testing. This series is separated out from [1]. Changes since V2: * add some comment to attach_ksyms_all, which notes that don't run the testing on a debug kernel Changes since V1: * introduce trace_blacklist instead of copy-pasting strcmp in the 2nd patch * use fprintf() instead of printf() in 3rd patch Link: https://lore.kernel.org/bpf/20250817024607.296117-1-dongml2@chinatelecom.cn/ [1] ==================== Link: https://patch.msgid.link/20250904021011.14069-1-dongml2@chinatelecom.cn Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -512,6 +512,8 @@ extern const struct bench bench_trig_kretprobe;
|
||||
extern const struct bench bench_trig_kprobe_multi;
|
||||
extern const struct bench bench_trig_kretprobe_multi;
|
||||
extern const struct bench bench_trig_fentry;
|
||||
extern const struct bench bench_trig_kprobe_multi_all;
|
||||
extern const struct bench bench_trig_kretprobe_multi_all;
|
||||
extern const struct bench bench_trig_fexit;
|
||||
extern const struct bench bench_trig_fmodret;
|
||||
extern const struct bench bench_trig_tp;
|
||||
@@ -587,6 +589,8 @@ static const struct bench *benchs[] = {
|
||||
&bench_trig_kprobe_multi,
|
||||
&bench_trig_kretprobe_multi,
|
||||
&bench_trig_fentry,
|
||||
&bench_trig_kprobe_multi_all,
|
||||
&bench_trig_kretprobe_multi_all,
|
||||
&bench_trig_fexit,
|
||||
&bench_trig_fmodret,
|
||||
&bench_trig_tp,
|
||||
|
||||
@@ -226,6 +226,65 @@ static void trigger_fentry_setup(void)
|
||||
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
|
||||
}
|
||||
|
||||
static void attach_ksyms_all(struct bpf_program *empty, bool kretprobe)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
|
||||
char **syms = NULL;
|
||||
size_t cnt = 0;
|
||||
|
||||
/* Some recursive functions will be skipped in
|
||||
* bpf_get_ksyms -> skip_entry, as they can introduce sufficient
|
||||
* overhead. However, it's difficut to skip all the recursive
|
||||
* functions for a debug kernel.
|
||||
*
|
||||
* So, don't run the kprobe-multi-all and kretprobe-multi-all on
|
||||
* a debug kernel.
|
||||
*/
|
||||
if (bpf_get_ksyms(&syms, &cnt, true)) {
|
||||
fprintf(stderr, "failed to get ksyms\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
opts.syms = (const char **) syms;
|
||||
opts.cnt = cnt;
|
||||
opts.retprobe = kretprobe;
|
||||
/* attach empty to all the kernel functions except bpf_get_numa_node_id. */
|
||||
if (!bpf_program__attach_kprobe_multi_opts(empty, NULL, &opts)) {
|
||||
fprintf(stderr, "failed to attach bpf_program__attach_kprobe_multi_opts to all\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void trigger_kprobe_multi_all_setup(void)
|
||||
{
|
||||
struct bpf_program *prog, *empty;
|
||||
|
||||
setup_ctx();
|
||||
empty = ctx.skel->progs.bench_kprobe_multi_empty;
|
||||
prog = ctx.skel->progs.bench_trigger_kprobe_multi;
|
||||
bpf_program__set_autoload(empty, true);
|
||||
bpf_program__set_autoload(prog, true);
|
||||
load_ctx();
|
||||
|
||||
attach_ksyms_all(empty, false);
|
||||
attach_bpf(prog);
|
||||
}
|
||||
|
||||
static void trigger_kretprobe_multi_all_setup(void)
|
||||
{
|
||||
struct bpf_program *prog, *empty;
|
||||
|
||||
setup_ctx();
|
||||
empty = ctx.skel->progs.bench_kretprobe_multi_empty;
|
||||
prog = ctx.skel->progs.bench_trigger_kretprobe_multi;
|
||||
bpf_program__set_autoload(empty, true);
|
||||
bpf_program__set_autoload(prog, true);
|
||||
load_ctx();
|
||||
|
||||
attach_ksyms_all(empty, true);
|
||||
attach_bpf(prog);
|
||||
}
|
||||
|
||||
static void trigger_fexit_setup(void)
|
||||
{
|
||||
setup_ctx();
|
||||
@@ -512,6 +571,8 @@ BENCH_TRIG_KERNEL(kretprobe, "kretprobe");
|
||||
BENCH_TRIG_KERNEL(kprobe_multi, "kprobe-multi");
|
||||
BENCH_TRIG_KERNEL(kretprobe_multi, "kretprobe-multi");
|
||||
BENCH_TRIG_KERNEL(fentry, "fentry");
|
||||
BENCH_TRIG_KERNEL(kprobe_multi_all, "kprobe-multi-all");
|
||||
BENCH_TRIG_KERNEL(kretprobe_multi_all, "kretprobe-multi-all");
|
||||
BENCH_TRIG_KERNEL(fexit, "fexit");
|
||||
BENCH_TRIG_KERNEL(fmodret, "fmodret");
|
||||
BENCH_TRIG_KERNEL(tp, "tp");
|
||||
|
||||
@@ -6,8 +6,8 @@ def_tests=( \
|
||||
usermode-count kernel-count syscall-count \
|
||||
fentry fexit fmodret \
|
||||
rawtp tp \
|
||||
kprobe kprobe-multi \
|
||||
kretprobe kretprobe-multi \
|
||||
kprobe kprobe-multi kprobe-multi-all \
|
||||
kretprobe kretprobe-multi kretprobe-multi-all \
|
||||
)
|
||||
|
||||
tests=("$@")
|
||||
|
||||
@@ -422,220 +422,6 @@ static void test_unique_match(void)
|
||||
kprobe_multi__destroy(skel);
|
||||
}
|
||||
|
||||
static size_t symbol_hash(long key, void *ctx __maybe_unused)
|
||||
{
|
||||
return str_hash((const char *) key);
|
||||
}
|
||||
|
||||
static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
|
||||
{
|
||||
return strcmp((const char *) key1, (const char *) key2) == 0;
|
||||
}
|
||||
|
||||
static bool is_invalid_entry(char *buf, bool kernel)
|
||||
{
|
||||
if (kernel && strchr(buf, '['))
|
||||
return true;
|
||||
if (!kernel && !strchr(buf, '['))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool skip_entry(char *name)
|
||||
{
|
||||
/*
|
||||
* We attach to almost all kernel functions and some of them
|
||||
* will cause 'suspicious RCU usage' when fprobe is attached
|
||||
* to them. Filter out the current culprits - arch_cpu_idle
|
||||
* default_idle and rcu_* functions.
|
||||
*/
|
||||
if (!strcmp(name, "arch_cpu_idle"))
|
||||
return true;
|
||||
if (!strcmp(name, "default_idle"))
|
||||
return true;
|
||||
if (!strncmp(name, "rcu_", 4))
|
||||
return true;
|
||||
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
|
||||
return true;
|
||||
if (!strncmp(name, "__ftrace_invalid_address__",
|
||||
sizeof("__ftrace_invalid_address__") - 1))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
|
||||
static int compare_name(const char *name1, const char *name2)
|
||||
{
|
||||
const char *res1, *res2;
|
||||
int len1, len2;
|
||||
|
||||
res1 = strstr(name1, ".llvm.");
|
||||
res2 = strstr(name2, ".llvm.");
|
||||
len1 = res1 ? res1 - name1 : strlen(name1);
|
||||
len2 = res2 ? res2 - name2 : strlen(name2);
|
||||
|
||||
if (len1 == len2)
|
||||
return strncmp(name1, name2, len1);
|
||||
if (len1 < len2)
|
||||
return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
|
||||
return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
|
||||
}
|
||||
|
||||
static int load_kallsyms_compare(const void *p1, const void *p2)
|
||||
{
|
||||
return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
|
||||
}
|
||||
|
||||
static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
|
||||
{
|
||||
return compare_name(p1, p2->name);
|
||||
}
|
||||
|
||||
static int get_syms(char ***symsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
size_t cap = 0, cnt = 0;
|
||||
char *name = NULL, *ksym_name, **syms = NULL;
|
||||
struct hashmap *map;
|
||||
struct ksyms *ksyms;
|
||||
struct ksym *ks;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
int err = 0;
|
||||
|
||||
ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
|
||||
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The available_filter_functions contains many duplicates,
|
||||
* but other than that all symbols are usable in kprobe multi
|
||||
* interface.
|
||||
* Filtering out duplicates by using hashmap__add, which won't
|
||||
* add existing entry.
|
||||
*/
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
map = hashmap__new(symbol_hash, symbol_equal, NULL);
|
||||
if (IS_ERR(map)) {
|
||||
err = libbpf_get_error(map);
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
|
||||
continue;
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
|
||||
if (!ks) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ksym_name = ks->name;
|
||||
err = hashmap__add(map, ksym_name, 0);
|
||||
if (err == -EEXIST) {
|
||||
err = 0;
|
||||
continue;
|
||||
}
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
err = libbpf_ensure_mem((void **) &syms, &cap,
|
||||
sizeof(*syms), cnt + 1);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
syms[cnt++] = ksym_name;
|
||||
}
|
||||
|
||||
*symsp = syms;
|
||||
*cntp = cnt;
|
||||
|
||||
error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
hashmap__free(map);
|
||||
if (err)
|
||||
free(syms);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
unsigned long *addr, *addrs, *tmp_addrs;
|
||||
int err = 0, max_cnt, inc_cnt;
|
||||
char *name = NULL;
|
||||
size_t cnt = 0;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
|
||||
|
||||
if (!f)
|
||||
return -ENOENT;
|
||||
|
||||
/* In my local setup, the number of entries is 50k+ so Let us initially
|
||||
* allocate space to hold 64k entries. If 64k is not enough, incrementally
|
||||
* increase 1k each time.
|
||||
*/
|
||||
max_cnt = 65536;
|
||||
inc_cnt = 1024;
|
||||
addrs = malloc(max_cnt * sizeof(long));
|
||||
if (addrs == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
|
||||
continue;
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
if (cnt == max_cnt) {
|
||||
max_cnt += inc_cnt;
|
||||
tmp_addrs = realloc(addrs, max_cnt);
|
||||
if (!tmp_addrs) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
addrs = tmp_addrs;
|
||||
}
|
||||
|
||||
addrs[cnt++] = (unsigned long)addr;
|
||||
}
|
||||
|
||||
*addrsp = addrs;
|
||||
*cntp = cnt;
|
||||
|
||||
error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
if (err)
|
||||
free(addrs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
|
||||
{
|
||||
long attach_start_ns, attach_end_ns;
|
||||
@@ -670,7 +456,7 @@ static void test_kprobe_multi_bench_attach(bool kernel)
|
||||
char **syms = NULL;
|
||||
size_t cnt = 0;
|
||||
|
||||
if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
|
||||
if (!ASSERT_OK(bpf_get_ksyms(&syms, &cnt, kernel), "bpf_get_ksyms"))
|
||||
return;
|
||||
|
||||
skel = kprobe_multi_empty__open_and_load();
|
||||
@@ -696,13 +482,13 @@ static void test_kprobe_multi_bench_attach_addr(bool kernel)
|
||||
size_t cnt = 0;
|
||||
int err;
|
||||
|
||||
err = get_addrs(&addrs, &cnt, kernel);
|
||||
err = bpf_get_addrs(&addrs, &cnt, kernel);
|
||||
if (err == -ENOENT) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ASSERT_OK(err, "get_addrs"))
|
||||
if (!ASSERT_OK(err, "bpf_get_addrs"))
|
||||
return;
|
||||
|
||||
skel = kprobe_multi_empty__open_and_load();
|
||||
|
||||
@@ -97,6 +97,12 @@ int bench_trigger_kprobe_multi(void *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?kprobe.multi/bpf_get_numa_node_id")
|
||||
int bench_kprobe_multi_empty(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?kretprobe.multi/bpf_get_numa_node_id")
|
||||
int bench_trigger_kretprobe_multi(void *ctx)
|
||||
{
|
||||
@@ -104,6 +110,12 @@ int bench_trigger_kretprobe_multi(void *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?kretprobe.multi/bpf_get_numa_node_id")
|
||||
int bench_kretprobe_multi_empty(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/bpf_get_numa_node_id")
|
||||
int bench_trigger_fentry(void *ctx)
|
||||
{
|
||||
|
||||
@@ -17,7 +17,9 @@
|
||||
#include <linux/limits.h>
|
||||
#include <libelf.h>
|
||||
#include <gelf.h>
|
||||
#include "bpf/hashmap.h"
|
||||
#include "bpf/libbpf_internal.h"
|
||||
#include "bpf_util.h"
|
||||
|
||||
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
|
||||
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
|
||||
@@ -519,3 +521,235 @@ void read_trace_pipe(void)
|
||||
{
|
||||
read_trace_pipe_iter(trace_pipe_cb, NULL, 0);
|
||||
}
|
||||
|
||||
static size_t symbol_hash(long key, void *ctx __maybe_unused)
|
||||
{
|
||||
return str_hash((const char *) key);
|
||||
}
|
||||
|
||||
static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
|
||||
{
|
||||
return strcmp((const char *) key1, (const char *) key2) == 0;
|
||||
}
|
||||
|
||||
static bool is_invalid_entry(char *buf, bool kernel)
|
||||
{
|
||||
if (kernel && strchr(buf, '['))
|
||||
return true;
|
||||
if (!kernel && !strchr(buf, '['))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static const char * const trace_blacklist[] = {
|
||||
"migrate_disable",
|
||||
"migrate_enable",
|
||||
"rcu_read_unlock_strict",
|
||||
"preempt_count_add",
|
||||
"preempt_count_sub",
|
||||
"__rcu_read_lock",
|
||||
"__rcu_read_unlock",
|
||||
"bpf_get_numa_node_id",
|
||||
};
|
||||
|
||||
static bool skip_entry(char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We attach to almost all kernel functions and some of them
|
||||
* will cause 'suspicious RCU usage' when fprobe is attached
|
||||
* to them. Filter out the current culprits - arch_cpu_idle
|
||||
* default_idle and rcu_* functions.
|
||||
*/
|
||||
if (!strcmp(name, "arch_cpu_idle"))
|
||||
return true;
|
||||
if (!strcmp(name, "default_idle"))
|
||||
return true;
|
||||
if (!strncmp(name, "rcu_", 4))
|
||||
return true;
|
||||
if (!strcmp(name, "bpf_dispatcher_xdp_func"))
|
||||
return true;
|
||||
if (!strncmp(name, "__ftrace_invalid_address__",
|
||||
sizeof("__ftrace_invalid_address__") - 1))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(trace_blacklist); i++) {
|
||||
if (!strcmp(name, trace_blacklist[i]))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
|
||||
static int compare_name(const char *name1, const char *name2)
|
||||
{
|
||||
const char *res1, *res2;
|
||||
int len1, len2;
|
||||
|
||||
res1 = strstr(name1, ".llvm.");
|
||||
res2 = strstr(name2, ".llvm.");
|
||||
len1 = res1 ? res1 - name1 : strlen(name1);
|
||||
len2 = res2 ? res2 - name2 : strlen(name2);
|
||||
|
||||
if (len1 == len2)
|
||||
return strncmp(name1, name2, len1);
|
||||
if (len1 < len2)
|
||||
return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
|
||||
return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
|
||||
}
|
||||
|
||||
static int load_kallsyms_compare(const void *p1, const void *p2)
|
||||
{
|
||||
return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
|
||||
}
|
||||
|
||||
static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
|
||||
{
|
||||
return compare_name(p1, p2->name);
|
||||
}
|
||||
|
||||
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
size_t cap = 0, cnt = 0;
|
||||
char *name = NULL, *ksym_name, **syms = NULL;
|
||||
struct hashmap *map;
|
||||
struct ksyms *ksyms;
|
||||
struct ksym *ks;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
int err = 0;
|
||||
|
||||
ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
|
||||
if (!ksyms)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The available_filter_functions contains many duplicates,
|
||||
* but other than that all symbols are usable to trace.
|
||||
* Filtering out duplicates by using hashmap__add, which won't
|
||||
* add existing entry.
|
||||
*/
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
map = hashmap__new(symbol_hash, symbol_equal, NULL);
|
||||
if (IS_ERR(map)) {
|
||||
err = libbpf_get_error(map);
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
|
||||
continue;
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
|
||||
if (!ks) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
ksym_name = ks->name;
|
||||
err = hashmap__add(map, ksym_name, 0);
|
||||
if (err == -EEXIST) {
|
||||
err = 0;
|
||||
continue;
|
||||
}
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
err = libbpf_ensure_mem((void **) &syms, &cap,
|
||||
sizeof(*syms), cnt + 1);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
syms[cnt++] = ksym_name;
|
||||
}
|
||||
|
||||
*symsp = syms;
|
||||
*cntp = cnt;
|
||||
|
||||
error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
hashmap__free(map);
|
||||
if (err)
|
||||
free(syms);
|
||||
return err;
|
||||
}
|
||||
|
||||
int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
|
||||
{
|
||||
unsigned long *addr, *addrs, *tmp_addrs;
|
||||
int err = 0, max_cnt, inc_cnt;
|
||||
char *name = NULL;
|
||||
size_t cnt = 0;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
|
||||
if (access("/sys/kernel/tracing/trace", F_OK) == 0)
|
||||
f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
|
||||
else
|
||||
f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
|
||||
|
||||
if (!f)
|
||||
return -ENOENT;
|
||||
|
||||
/* In my local setup, the number of entries is 50k+ so Let us initially
|
||||
* allocate space to hold 64k entries. If 64k is not enough, incrementally
|
||||
* increase 1k each time.
|
||||
*/
|
||||
max_cnt = 65536;
|
||||
inc_cnt = 1024;
|
||||
addrs = malloc(max_cnt * sizeof(long));
|
||||
if (addrs == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (is_invalid_entry(buf, kernel))
|
||||
continue;
|
||||
|
||||
free(name);
|
||||
if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
|
||||
continue;
|
||||
if (skip_entry(name))
|
||||
continue;
|
||||
|
||||
if (cnt == max_cnt) {
|
||||
max_cnt += inc_cnt;
|
||||
tmp_addrs = realloc(addrs, max_cnt);
|
||||
if (!tmp_addrs) {
|
||||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
addrs = tmp_addrs;
|
||||
}
|
||||
|
||||
addrs[cnt++] = (unsigned long)addr;
|
||||
}
|
||||
|
||||
*addrsp = addrs;
|
||||
*cntp = cnt;
|
||||
|
||||
error:
|
||||
free(name);
|
||||
fclose(f);
|
||||
if (err)
|
||||
free(addrs);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -41,4 +41,7 @@ ssize_t get_rel_offset(uintptr_t addr);
|
||||
|
||||
int read_build_id(const char *path, char *build_id, size_t size);
|
||||
|
||||
int bpf_get_ksyms(char ***symsp, size_t *cntp, bool kernel);
|
||||
int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel);
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user