mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-08 17:35:54 -04:00
Reduce the API surface that is in bpf_counter.h, this helps compiler analysis like unused static function, makes it easier to set a breakpoint and just makes it easier to see the code is self contained. When code is shared between BPF C code, put it inside HAVE_BPF_SKEL. Move transitively found #includes into appropriate C files. No functional change. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Gabriele Monaco <gmonaco@redhat.com> Cc: Howard Chu <howardchu95@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@linaro.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <songliubraving@fb.com> Cc: Tengda Wu <wutengda@huaweicloud.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
76 lines
1.8 KiB
C
76 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_BPF_COUNTER_H
|
|
#define __PERF_BPF_COUNTER_H 1
|
|
|
|
struct evsel;
|
|
struct target;
|
|
|
|
#ifdef HAVE_BPF_SKEL
|
|
|
|
typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
|
|
typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
|
|
struct target *target);
|
|
typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
|
|
int cpu_map_idx,
|
|
int fd);
|
|
|
|
/* Shared ops between bpf_counter, bpf_counter_cgroup, etc. */
|
|
struct bpf_counter_ops {
|
|
bpf_counter_evsel_target_op load;
|
|
bpf_counter_evsel_op enable;
|
|
bpf_counter_evsel_op disable;
|
|
bpf_counter_evsel_op read;
|
|
bpf_counter_evsel_op destroy;
|
|
bpf_counter_evsel_install_pe_op install_pe;
|
|
};
|
|
|
|
int bpf_counter__load(struct evsel *evsel, struct target *target);
|
|
int bpf_counter__enable(struct evsel *evsel);
|
|
int bpf_counter__disable(struct evsel *evsel);
|
|
int bpf_counter__read(struct evsel *evsel);
|
|
void bpf_counter__destroy(struct evsel *evsel);
|
|
int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd);
|
|
|
|
int bperf_trigger_reading(int prog_fd, int cpu);
|
|
void set_max_rlimit(void);
|
|
|
|
#else /* HAVE_BPF_SKEL */
|
|
|
|
#include <linux/err.h>
|
|
|
|
static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
|
|
struct target *target __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
|
|
{
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
|
|
{
|
|
}
|
|
|
|
static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
|
|
int cpu __maybe_unused,
|
|
int fd __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* HAVE_BPF_SKEL */
|
|
|
|
#endif /* __PERF_BPF_COUNTER_H */
|