mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-29 10:34:22 -04:00
Merge tag 'perf-core-for-mingo-5.4-20190729' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
perf trace:
Arnaldo Carvalho de Melo:
- Use BPF_MAP_TYPE_PROG_ARRAY + bpf_tail_call() for augmenting raw syscalls,
i.e. copy pointers passed to/from userspace. The use of a table per syscall
to tell the BPF program what to copy made the raw_syscalls:sys_enter/exit
programs a bit complex, the scratch space would have to be bigger to allow
for checking all args to see which ones were a pathname, so use a PROG_ARRAY
map instead, test it with syscalls that receive multiple pathnames at
different registers (rename, renameat, etc).
- Beautify various syscalls using this new infrastructure, and also add code
that looks for syscalls with BPF augmenters, such as "open", and then reuse
it with syscalls not yet having a specific augmenter, but that copies the
same argument with the same type, say "statfs" can initially reuse "open",
beautifier, as both have as its first arg a "const char *".
- Do not using fd->pathname beautifier when the 'close' syscall isn't enabled,
as we can't invalidate that mapping.
core:
Jiri Olsa:
- Introduce tools/perf/lib/, that eventually will move to tools/lib/perf/, to
allow other tools to use the abstractions and code perf uses to set up
the perf ring buffer and set up the many possible combinations in allowed
by the kernel, starting with 'struct perf_evsel' and 'struct perf_evlist'.
perf vendor events:
Michael Petlan:
- Add missing event description to power9 event definitions.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -277,6 +277,7 @@ ifeq ($(DEBUG),0)
|
||||
endif
|
||||
endif
|
||||
|
||||
INC_FLAGS += -I$(src-perf)/lib/include
|
||||
INC_FLAGS += -I$(src-perf)/util/include
|
||||
INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
|
||||
INC_FLAGS += -I$(srctree)/tools/include/uapi
|
||||
|
||||
@@ -224,6 +224,7 @@ LIB_DIR = $(srctree)/tools/lib/api/
|
||||
TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
|
||||
BPF_DIR = $(srctree)/tools/lib/bpf/
|
||||
SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
|
||||
LIBPERF_DIR = $(srctree)/tools/perf/lib/
|
||||
|
||||
# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
|
||||
# Without this setting the output feature dump file misses some features, for
|
||||
@@ -272,6 +273,7 @@ ifneq ($(OUTPUT),)
|
||||
TE_PATH=$(OUTPUT)
|
||||
BPF_PATH=$(OUTPUT)
|
||||
SUBCMD_PATH=$(OUTPUT)
|
||||
LIBPERF_PATH=$(OUTPUT)
|
||||
ifneq ($(subdir),)
|
||||
API_PATH=$(OUTPUT)/../lib/api/
|
||||
else
|
||||
@@ -282,6 +284,7 @@ else
|
||||
API_PATH=$(LIB_DIR)
|
||||
BPF_PATH=$(BPF_DIR)
|
||||
SUBCMD_PATH=$(SUBCMD_DIR)
|
||||
LIBPERF_PATH=$(LIBPERF_DIR)
|
||||
endif
|
||||
|
||||
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
|
||||
@@ -303,6 +306,9 @@ LIBBPF = $(BPF_PATH)libbpf.a
|
||||
|
||||
LIBSUBCMD = $(SUBCMD_PATH)libsubcmd.a
|
||||
|
||||
LIBPERF = $(LIBPERF_PATH)libperf.a
|
||||
export LIBPERF
|
||||
|
||||
# python extension build directories
|
||||
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
|
||||
PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
|
||||
@@ -348,9 +354,7 @@ endif
|
||||
|
||||
export PERL_PATH
|
||||
|
||||
LIBPERF_A=$(OUTPUT)libperf.a
|
||||
|
||||
PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
|
||||
PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD) $(LIBPERF)
|
||||
ifndef NO_LIBBPF
|
||||
PERFLIBS += $(LIBBPF)
|
||||
endif
|
||||
@@ -583,8 +587,6 @@ JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
|
||||
|
||||
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
|
||||
|
||||
LIBPERF_IN := $(OUTPUT)libperf-in.o
|
||||
|
||||
export JEVENTS
|
||||
|
||||
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
|
||||
@@ -601,12 +603,9 @@ $(JEVENTS): $(JEVENTS_IN)
|
||||
$(PMU_EVENTS_IN): $(JEVENTS) FORCE
|
||||
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
|
||||
|
||||
$(LIBPERF_IN): prepare FORCE
|
||||
$(Q)$(MAKE) $(build)=libperf
|
||||
|
||||
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
|
||||
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
|
||||
$(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBS) -o $@
|
||||
$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
|
||||
|
||||
$(GTK_IN): FORCE
|
||||
$(Q)$(MAKE) $(build)=gtk
|
||||
@@ -727,9 +726,6 @@ endif
|
||||
|
||||
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
|
||||
|
||||
$(LIBPERF_A): $(LIBPERF_IN)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
|
||||
|
||||
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
|
||||
|
||||
$(LIBTRACEEVENT): FORCE
|
||||
@@ -762,6 +758,13 @@ $(LIBBPF)-clean:
|
||||
$(call QUIET_CLEAN, libbpf)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
|
||||
|
||||
$(LIBPERF): FORCE
|
||||
$(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(OUTPUT) $(OUTPUT)libperf.a
|
||||
|
||||
$(LIBPERF)-clean:
|
||||
$(call QUIET_CLEAN, libperf)
|
||||
$(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(OUTPUT) clean >/dev/null
|
||||
|
||||
$(LIBSUBCMD): FORCE
|
||||
$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
|
||||
|
||||
@@ -948,7 +951,7 @@ config-clean:
|
||||
python-clean:
|
||||
$(python-clean)
|
||||
|
||||
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
|
||||
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean config-clean fixdep-clean python-clean
|
||||
$(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
|
||||
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
|
||||
$(Q)$(RM) $(OUTPUT).config-detected
|
||||
|
||||
@@ -50,10 +50,10 @@ static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
|
||||
}
|
||||
|
||||
struct auxtrace_record
|
||||
*auxtrace_record__init(struct perf_evlist *evlist, int *err)
|
||||
*auxtrace_record__init(struct evlist *evlist, int *err)
|
||||
{
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
bool found_etm = false;
|
||||
bool found_spe = false;
|
||||
static struct perf_pmu **arm_spe_pmus = NULL;
|
||||
@@ -70,14 +70,14 @@ struct auxtrace_record
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (cs_etm_pmu &&
|
||||
evsel->attr.type == cs_etm_pmu->type)
|
||||
evsel->core.attr.type == cs_etm_pmu->type)
|
||||
found_etm = true;
|
||||
|
||||
if (!nr_spes)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < nr_spes; i++) {
|
||||
if (evsel->attr.type == arm_spe_pmus[i]->type) {
|
||||
if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
|
||||
found_spe = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
struct cs_etm_recording {
|
||||
struct auxtrace_record itr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
int wrapped_cnt;
|
||||
bool *wrapped;
|
||||
bool snapshot_mode;
|
||||
@@ -55,7 +55,7 @@ static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
|
||||
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
|
||||
|
||||
static int cs_etm_set_context_id(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, int cpu)
|
||||
struct evsel *evsel, int cpu)
|
||||
{
|
||||
struct cs_etm_recording *ptr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
@@ -95,7 +95,7 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
/* All good, let the kernel know */
|
||||
evsel->attr.config |= (1 << ETM_OPT_CTXTID);
|
||||
evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
@@ -104,7 +104,7 @@ static int cs_etm_set_context_id(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
static int cs_etm_set_timestamp(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, int cpu)
|
||||
struct evsel *evsel, int cpu)
|
||||
{
|
||||
struct cs_etm_recording *ptr;
|
||||
struct perf_pmu *cs_etm_pmu;
|
||||
@@ -144,7 +144,7 @@ static int cs_etm_set_timestamp(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
/* All good, let the kernel know */
|
||||
evsel->attr.config |= (1 << ETM_OPT_TS);
|
||||
evsel->core.attr.config |= (1 << ETM_OPT_TS);
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
@@ -152,11 +152,11 @@ static int cs_etm_set_timestamp(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
static int cs_etm_set_option(struct auxtrace_record *itr,
|
||||
struct perf_evsel *evsel, u32 option)
|
||||
struct evsel *evsel, u32 option)
|
||||
{
|
||||
int i, err = -EINVAL;
|
||||
struct cpu_map *event_cpus = evsel->evlist->cpus;
|
||||
struct cpu_map *online_cpus = cpu_map__new(NULL);
|
||||
struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
|
||||
/* Set option of each CPU we have */
|
||||
for (i = 0; i < cpu__max_cpu(); i++) {
|
||||
@@ -181,7 +181,7 @@ static int cs_etm_set_option(struct auxtrace_record *itr,
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
cpu_map__put(online_cpus);
|
||||
perf_cpu_map__put(online_cpus);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -208,14 +208,14 @@ static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
char msg[BUFSIZ], path[PATH_MAX], *sink;
|
||||
struct perf_evsel_config_term *term;
|
||||
int ret = -EINVAL;
|
||||
u32 hash;
|
||||
|
||||
if (evsel->attr.config2 & GENMASK(31, 0))
|
||||
if (evsel->core.attr.config2 & GENMASK(31, 0))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(term, &evsel->config_terms, list) {
|
||||
@@ -233,7 +233,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
evsel->attr.config2 |= hash;
|
||||
evsel->core.attr.config2 |= hash;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -245,15 +245,15 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
|
||||
}
|
||||
|
||||
static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
struct perf_evlist *evlist,
|
||||
struct evlist *evlist,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
int ret;
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
struct perf_evsel *evsel, *cs_etm_evsel = NULL;
|
||||
struct cpu_map *cpus = evlist->cpus;
|
||||
struct evsel *evsel, *cs_etm_evsel = NULL;
|
||||
struct perf_cpu_map *cpus = evlist->core.cpus;
|
||||
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
|
||||
int err = 0;
|
||||
|
||||
@@ -264,14 +264,14 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
opts->record_switch_events = true;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == cs_etm_pmu->type) {
|
||||
if (evsel->core.attr.type == cs_etm_pmu->type) {
|
||||
if (cs_etm_evsel) {
|
||||
pr_err("There may be only one %s event\n",
|
||||
CORESIGHT_ETM_PMU_NAME);
|
||||
return -EINVAL;
|
||||
}
|
||||
evsel->attr.freq = 0;
|
||||
evsel->attr.sample_period = 1;
|
||||
evsel->core.attr.freq = 0;
|
||||
evsel->core.attr.sample_period = 1;
|
||||
cs_etm_evsel = evsel;
|
||||
opts->full_auxtrace = true;
|
||||
}
|
||||
@@ -407,7 +407,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
if (opts->full_auxtrace) {
|
||||
struct perf_evsel *tracking_evsel;
|
||||
struct evsel *tracking_evsel;
|
||||
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
@@ -416,8 +416,8 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->attr.freq = 0;
|
||||
tracking_evsel->attr.sample_period = 1;
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
tracking_evsel->core.attr.sample_period = 1;
|
||||
|
||||
/* In per-cpu case, always need the time of mmap events etc */
|
||||
if (!cpu_map__empty(cpus))
|
||||
@@ -434,11 +434,11 @@ static u64 cs_etm_get_config(struct auxtrace_record *itr)
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
struct perf_evlist *evlist = ptr->evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = ptr->evlist;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == cs_etm_pmu->type) {
|
||||
if (evsel->core.attr.type == cs_etm_pmu->type) {
|
||||
/*
|
||||
* Variable perf_event_attr::config is assigned to
|
||||
* ETMv3/PTM. The bit fields have been made to match
|
||||
@@ -447,7 +447,7 @@ static u64 cs_etm_get_config(struct auxtrace_record *itr)
|
||||
* drivers/hwtracing/coresight/coresight-perf.c for
|
||||
* details.
|
||||
*/
|
||||
config = evsel->attr.config;
|
||||
config = evsel->core.attr.config;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -485,12 +485,12 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
|
||||
|
||||
static size_t
|
||||
cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
struct perf_evlist *evlist __maybe_unused)
|
||||
struct evlist *evlist __maybe_unused)
|
||||
{
|
||||
int i;
|
||||
int etmv3 = 0, etmv4 = 0;
|
||||
struct cpu_map *event_cpus = evlist->cpus;
|
||||
struct cpu_map *online_cpus = cpu_map__new(NULL);
|
||||
struct perf_cpu_map *event_cpus = evlist->core.cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
|
||||
/* cpu map is not empty, we have specific CPUs to work with */
|
||||
if (!cpu_map__empty(event_cpus)) {
|
||||
@@ -517,7 +517,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
}
|
||||
}
|
||||
|
||||
cpu_map__put(online_cpus);
|
||||
perf_cpu_map__put(online_cpus);
|
||||
|
||||
return (CS_ETM_HEADER_SIZE +
|
||||
(etmv4 * CS_ETMV4_PRIV_SIZE) +
|
||||
@@ -635,9 +635,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
|
||||
int i;
|
||||
u32 offset;
|
||||
u64 nr_cpu, type;
|
||||
struct cpu_map *cpu_map;
|
||||
struct cpu_map *event_cpus = session->evlist->cpus;
|
||||
struct cpu_map *online_cpus = cpu_map__new(NULL);
|
||||
struct perf_cpu_map *cpu_map;
|
||||
struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
@@ -679,7 +679,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
|
||||
if (cpu_map__has(cpu_map, i))
|
||||
cs_etm_get_metadata(i, &offset, itr, info);
|
||||
|
||||
cpu_map__put(online_cpus);
|
||||
perf_cpu_map__put(online_cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -817,11 +817,11 @@ static int cs_etm_snapshot_start(struct auxtrace_record *itr)
|
||||
{
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->cs_etm_pmu->type)
|
||||
return perf_evsel__disable(evsel);
|
||||
if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
|
||||
return evsel__disable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -830,11 +830,11 @@ static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
|
||||
{
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->cs_etm_pmu->type)
|
||||
return perf_evsel__enable(evsel);
|
||||
if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -858,10 +858,10 @@ static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
|
||||
{
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->cs_etm_pmu->type)
|
||||
if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
|
||||
return perf_evlist__enable_event_idx(ptr->evlist,
|
||||
evsel, idx);
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
struct arm_spe_recording {
|
||||
struct auxtrace_record itr;
|
||||
struct perf_pmu *arm_spe_pmu;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
};
|
||||
|
||||
static size_t
|
||||
arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
struct perf_evlist *evlist __maybe_unused)
|
||||
struct evlist *evlist __maybe_unused)
|
||||
{
|
||||
return ARM_SPE_AUXTRACE_PRIV_SIZE;
|
||||
}
|
||||
@@ -59,27 +59,27 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
static int arm_spe_recording_options(struct auxtrace_record *itr,
|
||||
struct perf_evlist *evlist,
|
||||
struct evlist *evlist,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
struct arm_spe_recording *sper =
|
||||
container_of(itr, struct arm_spe_recording, itr);
|
||||
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
|
||||
struct perf_evsel *evsel, *arm_spe_evsel = NULL;
|
||||
struct evsel *evsel, *arm_spe_evsel = NULL;
|
||||
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
|
||||
struct perf_evsel *tracking_evsel;
|
||||
struct evsel *tracking_evsel;
|
||||
int err;
|
||||
|
||||
sper->evlist = evlist;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == arm_spe_pmu->type) {
|
||||
if (evsel->core.attr.type == arm_spe_pmu->type) {
|
||||
if (arm_spe_evsel) {
|
||||
pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
evsel->attr.freq = 0;
|
||||
evsel->attr.sample_period = 1;
|
||||
evsel->core.attr.freq = 0;
|
||||
evsel->core.attr.sample_period = 1;
|
||||
arm_spe_evsel = evsel;
|
||||
opts->full_auxtrace = true;
|
||||
}
|
||||
@@ -130,8 +130,8 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->attr.freq = 0;
|
||||
tracking_evsel->attr.sample_period = 1;
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
tracking_evsel->core.attr.sample_period = 1;
|
||||
perf_evsel__set_sample_bit(tracking_evsel, TIME);
|
||||
perf_evsel__set_sample_bit(tracking_evsel, CPU);
|
||||
perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
|
||||
@@ -160,10 +160,10 @@ static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
|
||||
{
|
||||
struct arm_spe_recording *sper =
|
||||
container_of(itr, struct arm_spe_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(sper->evlist, evsel) {
|
||||
if (evsel->attr.type == sper->arm_spe_pmu->type)
|
||||
if (evsel->core.attr.type == sper->arm_spe_pmu->type)
|
||||
return perf_evlist__enable_event_idx(sper->evlist,
|
||||
evsel, idx);
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ char *get_cpuid_str(struct perf_pmu *pmu)
|
||||
const char *sysfs = sysfs__mountpoint();
|
||||
int cpu;
|
||||
u64 midr = 0;
|
||||
struct cpu_map *cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
FILE *file;
|
||||
|
||||
if (!sysfs || !pmu || !pmu->cpus)
|
||||
@@ -27,7 +27,7 @@ char *get_cpuid_str(struct perf_pmu *pmu)
|
||||
return NULL;
|
||||
|
||||
/* read midr from list of cpus mapped to this pmu */
|
||||
cpus = cpu_map__get(pmu->cpus);
|
||||
cpus = perf_cpu_map__get(pmu->cpus);
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
|
||||
sysfs, cpus->map[cpu]);
|
||||
@@ -60,6 +60,6 @@ char *get_cpuid_str(struct perf_pmu *pmu)
|
||||
buf = NULL;
|
||||
}
|
||||
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
return buf;
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ const char *ppc_book3s_hv_kvm_tp[] = {
|
||||
const char *kvm_events_tp[NR_TPS + 1];
|
||||
const char *kvm_exit_reason;
|
||||
|
||||
static void hcall_event_get_key(struct perf_evsel *evsel,
|
||||
static void hcall_event_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -55,14 +55,14 @@ static const char *get_hcall_exit_reason(u64 exit_code)
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
static bool hcall_event_end(struct perf_evsel *evsel,
|
||||
static bool hcall_event_end(struct evsel *evsel,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct event_key *key __maybe_unused)
|
||||
{
|
||||
return (!strcmp(evsel->name, kvm_events_tp[3]));
|
||||
}
|
||||
|
||||
static bool hcall_event_begin(struct perf_evsel *evsel,
|
||||
static bool hcall_event_begin(struct evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key)
|
||||
{
|
||||
if (!strcmp(evsel->name, kvm_events_tp[2])) {
|
||||
@@ -106,7 +106,7 @@ const char * const kvm_skip_events[] = {
|
||||
};
|
||||
|
||||
|
||||
static int is_tracepoint_available(const char *str, struct perf_evlist *evlist)
|
||||
static int is_tracepoint_available(const char *str, struct evlist *evlist)
|
||||
{
|
||||
struct parse_events_error err;
|
||||
int ret;
|
||||
@@ -119,7 +119,7 @@ static int is_tracepoint_available(const char *str, struct perf_evlist *evlist)
|
||||
}
|
||||
|
||||
static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
|
||||
struct perf_evlist *evlist)
|
||||
struct evlist *evlist)
|
||||
{
|
||||
const char **events_ptr;
|
||||
int i, nr_tp = 0, err = -1;
|
||||
@@ -146,7 +146,7 @@ static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
|
||||
/* Wrapper to setup kvm tracepoints */
|
||||
static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
|
||||
{
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evlist *evlist = evlist__new();
|
||||
|
||||
if (evlist == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -20,7 +20,7 @@ static void cpumsf_free(struct auxtrace_record *itr)
|
||||
}
|
||||
|
||||
static size_t cpumsf_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
struct perf_evlist *evlist __maybe_unused)
|
||||
struct evlist *evlist __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -43,7 +43,7 @@ cpumsf_reference(struct auxtrace_record *itr __maybe_unused)
|
||||
|
||||
static int
|
||||
cpumsf_recording_options(struct auxtrace_record *ar __maybe_unused,
|
||||
struct perf_evlist *evlist __maybe_unused,
|
||||
struct evlist *evlist __maybe_unused,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
unsigned int factor = 1;
|
||||
@@ -82,19 +82,19 @@ cpumsf_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
|
||||
* auxtrace_record__init is called when perf record
|
||||
* check if the event really need auxtrace
|
||||
*/
|
||||
struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
|
||||
struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
|
||||
int *err)
|
||||
{
|
||||
struct auxtrace_record *aux;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
int diagnose = 0;
|
||||
|
||||
*err = 0;
|
||||
if (evlist->nr_entries == 0)
|
||||
if (evlist->core.nr_entries == 0)
|
||||
return NULL;
|
||||
|
||||
evlist__for_each_entry(evlist, pos) {
|
||||
if (pos->attr.config == PERF_EVENT_CPUM_SF_DIAG) {
|
||||
if (pos->core.attr.config == PERF_EVENT_CPUM_SF_DIAG) {
|
||||
diagnose = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ const char *kvm_exit_reason = "icptcode";
|
||||
const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
|
||||
const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
|
||||
|
||||
static void event_icpt_insn_get_key(struct perf_evsel *evsel,
|
||||
static void event_icpt_insn_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -34,7 +34,7 @@ static void event_icpt_insn_get_key(struct perf_evsel *evsel,
|
||||
key->exit_reasons = sie_icpt_insn_codes;
|
||||
}
|
||||
|
||||
static void event_sigp_get_key(struct perf_evsel *evsel,
|
||||
static void event_sigp_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -42,7 +42,7 @@ static void event_sigp_get_key(struct perf_evsel *evsel,
|
||||
key->exit_reasons = sie_sigp_order_codes;
|
||||
}
|
||||
|
||||
static void event_diag_get_key(struct perf_evsel *evsel,
|
||||
static void event_diag_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -50,7 +50,7 @@ static void event_diag_get_key(struct perf_evsel *evsel,
|
||||
key->exit_reasons = sie_diagnose_codes;
|
||||
}
|
||||
|
||||
static void event_icpt_prog_get_key(struct perf_evsel *evsel,
|
||||
static void event_icpt_prog_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
|
||||
@@ -40,8 +40,8 @@ static pid_t spawn(void)
|
||||
*/
|
||||
int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct perf_evsel *evsel = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel = NULL;
|
||||
struct perf_event_attr pe;
|
||||
int i, fd[2], flag, ret;
|
||||
size_t mmap_len;
|
||||
@@ -51,7 +51,7 @@ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subt
|
||||
|
||||
flag = perf_event_open_cloexec_flag();
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (!evlist) {
|
||||
pr_debug("perf_evlist__new failed\n");
|
||||
return TEST_FAIL;
|
||||
@@ -124,6 +124,6 @@ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subt
|
||||
kill(pid, SIGKILL);
|
||||
wait(NULL);
|
||||
out:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <unistd.h>
|
||||
#include <linux/types.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
#include "parse-events.h"
|
||||
#include "evlist.h"
|
||||
@@ -49,10 +51,10 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
},
|
||||
.sample_time = true,
|
||||
};
|
||||
struct thread_map *threads = NULL;
|
||||
struct cpu_map *cpus = NULL;
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct perf_evsel *evsel = NULL;
|
||||
struct perf_thread_map *threads = NULL;
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel = NULL;
|
||||
int err = -1, ret, i;
|
||||
const char *comm1, *comm2;
|
||||
struct perf_tsc_conversion tc;
|
||||
@@ -65,13 +67,13 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
|
||||
cpus = cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
CHECK_NOT_NULL__(cpus);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
CHECK_NOT_NULL__(evlist);
|
||||
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
|
||||
CHECK__(parse_events(evlist, "cycles:u", NULL));
|
||||
|
||||
@@ -79,11 +81,11 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
|
||||
evsel->attr.comm = 1;
|
||||
evsel->attr.disabled = 1;
|
||||
evsel->attr.enable_on_exec = 0;
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
evsel->core.attr.enable_on_exec = 0;
|
||||
|
||||
CHECK__(perf_evlist__open(evlist));
|
||||
CHECK__(evlist__open(evlist));
|
||||
|
||||
CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
|
||||
|
||||
@@ -97,7 +99,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
comm1 = "Test COMM 1";
|
||||
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
|
||||
@@ -107,7 +109,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
comm2 = "Test COMM 2";
|
||||
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
|
||||
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
@@ -163,6 +165,6 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
err = 0;
|
||||
|
||||
out_err:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
#include "../../util/evlist.h"
|
||||
|
||||
static
|
||||
struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
|
||||
struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
|
||||
int *err)
|
||||
{
|
||||
struct perf_pmu *intel_pt_pmu;
|
||||
struct perf_pmu *intel_bts_pmu;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
bool found_pt = false;
|
||||
bool found_bts = false;
|
||||
|
||||
@@ -29,9 +29,9 @@ struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
|
||||
intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (intel_pt_pmu && evsel->attr.type == intel_pt_pmu->type)
|
||||
if (intel_pt_pmu && evsel->core.attr.type == intel_pt_pmu->type)
|
||||
found_pt = true;
|
||||
if (intel_bts_pmu && evsel->attr.type == intel_bts_pmu->type)
|
||||
if (intel_bts_pmu && evsel->core.attr.type == intel_bts_pmu->type)
|
||||
found_bts = true;
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
|
||||
struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
|
||||
int *err)
|
||||
{
|
||||
char buffer[64];
|
||||
|
||||
@@ -35,7 +35,7 @@ struct intel_bts_snapshot_ref {
|
||||
struct intel_bts_recording {
|
||||
struct auxtrace_record itr;
|
||||
struct perf_pmu *intel_bts_pmu;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
bool snapshot_mode;
|
||||
size_t snapshot_size;
|
||||
int snapshot_ref_cnt;
|
||||
@@ -50,7 +50,7 @@ struct branch {
|
||||
|
||||
static size_t
|
||||
intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
struct perf_evlist *evlist __maybe_unused)
|
||||
struct evlist *evlist __maybe_unused)
|
||||
{
|
||||
return INTEL_BTS_AUXTRACE_PRIV_SIZE;
|
||||
}
|
||||
@@ -99,27 +99,27 @@ static int intel_bts_info_fill(struct auxtrace_record *itr,
|
||||
}
|
||||
|
||||
static int intel_bts_recording_options(struct auxtrace_record *itr,
|
||||
struct perf_evlist *evlist,
|
||||
struct evlist *evlist,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
struct intel_bts_recording *btsr =
|
||||
container_of(itr, struct intel_bts_recording, itr);
|
||||
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
|
||||
struct perf_evsel *evsel, *intel_bts_evsel = NULL;
|
||||
const struct cpu_map *cpus = evlist->cpus;
|
||||
struct evsel *evsel, *intel_bts_evsel = NULL;
|
||||
const struct perf_cpu_map *cpus = evlist->core.cpus;
|
||||
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
|
||||
|
||||
btsr->evlist = evlist;
|
||||
btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == intel_bts_pmu->type) {
|
||||
if (evsel->core.attr.type == intel_bts_pmu->type) {
|
||||
if (intel_bts_evsel) {
|
||||
pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
evsel->attr.freq = 0;
|
||||
evsel->attr.sample_period = 1;
|
||||
evsel->core.attr.freq = 0;
|
||||
evsel->core.attr.sample_period = 1;
|
||||
intel_bts_evsel = evsel;
|
||||
opts->full_auxtrace = true;
|
||||
}
|
||||
@@ -220,7 +220,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
if (opts->full_auxtrace) {
|
||||
struct perf_evsel *tracking_evsel;
|
||||
struct evsel *tracking_evsel;
|
||||
int err;
|
||||
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
@@ -231,8 +231,8 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->attr.freq = 0;
|
||||
tracking_evsel->attr.sample_period = 1;
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
tracking_evsel->core.attr.sample_period = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -313,11 +313,11 @@ static int intel_bts_snapshot_start(struct auxtrace_record *itr)
|
||||
{
|
||||
struct intel_bts_recording *btsr =
|
||||
container_of(itr, struct intel_bts_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(btsr->evlist, evsel) {
|
||||
if (evsel->attr.type == btsr->intel_bts_pmu->type)
|
||||
return perf_evsel__disable(evsel);
|
||||
if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
|
||||
return evsel__disable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -326,11 +326,11 @@ static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
|
||||
{
|
||||
struct intel_bts_recording *btsr =
|
||||
container_of(itr, struct intel_bts_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(btsr->evlist, evsel) {
|
||||
if (evsel->attr.type == btsr->intel_bts_pmu->type)
|
||||
return perf_evsel__enable(evsel);
|
||||
if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -408,10 +408,10 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
|
||||
{
|
||||
struct intel_bts_recording *btsr =
|
||||
container_of(itr, struct intel_bts_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(btsr->evlist, evsel) {
|
||||
if (evsel->attr.type == btsr->intel_bts_pmu->type)
|
||||
if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
|
||||
return perf_evlist__enable_event_idx(btsr->evlist,
|
||||
evsel, idx);
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ struct intel_pt_recording {
|
||||
struct auxtrace_record itr;
|
||||
struct perf_pmu *intel_pt_pmu;
|
||||
int have_sched_switch;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
bool snapshot_mode;
|
||||
bool snapshot_init_done;
|
||||
size_t snapshot_size;
|
||||
@@ -110,9 +110,9 @@ static u64 intel_pt_masked_bits(u64 mask, u64 bits)
|
||||
}
|
||||
|
||||
static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
|
||||
struct perf_evlist *evlist, u64 *res)
|
||||
struct evlist *evlist, u64 *res)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
u64 mask;
|
||||
|
||||
*res = 0;
|
||||
@@ -122,8 +122,8 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
|
||||
return -EINVAL;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == intel_pt_pmu->type) {
|
||||
*res = intel_pt_masked_bits(mask, evsel->attr.config);
|
||||
if (evsel->core.attr.type == intel_pt_pmu->type) {
|
||||
*res = intel_pt_masked_bits(mask, evsel->core.attr.config);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -132,7 +132,7 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
|
||||
}
|
||||
|
||||
static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
|
||||
struct perf_evlist *evlist)
|
||||
struct evlist *evlist)
|
||||
{
|
||||
u64 val;
|
||||
int err, topa_multiple_entries;
|
||||
@@ -268,13 +268,13 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
|
||||
return attr;
|
||||
}
|
||||
|
||||
static const char *intel_pt_find_filter(struct perf_evlist *evlist,
|
||||
static const char *intel_pt_find_filter(struct evlist *evlist,
|
||||
struct perf_pmu *intel_pt_pmu)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == intel_pt_pmu->type)
|
||||
if (evsel->core.attr.type == intel_pt_pmu->type)
|
||||
return evsel->filter;
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ static size_t intel_pt_filter_bytes(const char *filter)
|
||||
}
|
||||
|
||||
static size_t
|
||||
intel_pt_info_priv_size(struct auxtrace_record *itr, struct perf_evlist *evlist)
|
||||
intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
|
||||
{
|
||||
struct intel_pt_recording *ptr =
|
||||
container_of(itr, struct intel_pt_recording, itr);
|
||||
@@ -365,7 +365,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
|
||||
ui__warning("Intel Processor Trace: TSC not available\n");
|
||||
}
|
||||
|
||||
per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
|
||||
per_cpu_mmaps = !cpu_map__empty(session->evlist->core.cpus);
|
||||
|
||||
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
|
||||
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
|
||||
@@ -398,10 +398,10 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pt_track_switches(struct perf_evlist *evlist)
|
||||
static int intel_pt_track_switches(struct evlist *evlist)
|
||||
{
|
||||
const char *sched_switch = "sched:sched_switch";
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
int err;
|
||||
|
||||
if (!perf_evlist__can_select_event(evlist, sched_switch))
|
||||
@@ -513,7 +513,7 @@ static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
|
||||
}
|
||||
|
||||
static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
int err;
|
||||
char c;
|
||||
@@ -526,38 +526,38 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
|
||||
* sets pt=0, which avoids senseless kernel errors.
|
||||
*/
|
||||
if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
|
||||
!(evsel->attr.config & 1)) {
|
||||
!(evsel->core.attr.config & 1)) {
|
||||
pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
|
||||
evsel->attr.config |= 1;
|
||||
evsel->core.attr.config |= 1;
|
||||
}
|
||||
|
||||
err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
|
||||
"cyc_thresh", "caps/psb_cyc",
|
||||
evsel->attr.config);
|
||||
evsel->core.attr.config);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
|
||||
"mtc_period", "caps/mtc",
|
||||
evsel->attr.config);
|
||||
evsel->core.attr.config);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
|
||||
"psb_period", "caps/psb_cyc",
|
||||
evsel->attr.config);
|
||||
evsel->core.attr.config);
|
||||
}
|
||||
|
||||
static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
struct perf_evlist *evlist,
|
||||
struct evlist *evlist,
|
||||
struct record_opts *opts)
|
||||
{
|
||||
struct intel_pt_recording *ptr =
|
||||
container_of(itr, struct intel_pt_recording, itr);
|
||||
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
|
||||
bool have_timing_info, need_immediate = false;
|
||||
struct perf_evsel *evsel, *intel_pt_evsel = NULL;
|
||||
const struct cpu_map *cpus = evlist->cpus;
|
||||
struct evsel *evsel, *intel_pt_evsel = NULL;
|
||||
const struct perf_cpu_map *cpus = evlist->core.cpus;
|
||||
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
|
||||
u64 tsc_bit;
|
||||
int err;
|
||||
@@ -566,13 +566,13 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->attr.type == intel_pt_pmu->type) {
|
||||
if (evsel->core.attr.type == intel_pt_pmu->type) {
|
||||
if (intel_pt_evsel) {
|
||||
pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
evsel->attr.freq = 0;
|
||||
evsel->attr.sample_period = 1;
|
||||
evsel->core.attr.freq = 0;
|
||||
evsel->core.attr.sample_period = 1;
|
||||
intel_pt_evsel = evsel;
|
||||
opts->full_auxtrace = true;
|
||||
}
|
||||
@@ -670,7 +670,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
|
||||
|
||||
if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
|
||||
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
|
||||
have_timing_info = true;
|
||||
else
|
||||
have_timing_info = false;
|
||||
@@ -685,7 +685,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
!target__has_task(&opts->target);
|
||||
|
||||
if (!cpu_wide && perf_can_record_cpu_wide()) {
|
||||
struct perf_evsel *switch_evsel;
|
||||
struct evsel *switch_evsel;
|
||||
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
@@ -693,9 +693,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
switch_evsel = perf_evlist__last(evlist);
|
||||
|
||||
switch_evsel->attr.freq = 0;
|
||||
switch_evsel->attr.sample_period = 1;
|
||||
switch_evsel->attr.context_switch = 1;
|
||||
switch_evsel->core.attr.freq = 0;
|
||||
switch_evsel->core.attr.sample_period = 1;
|
||||
switch_evsel->core.attr.context_switch = 1;
|
||||
|
||||
switch_evsel->system_wide = true;
|
||||
switch_evsel->no_aux_samples = true;
|
||||
@@ -743,7 +743,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
if (opts->full_auxtrace) {
|
||||
struct perf_evsel *tracking_evsel;
|
||||
struct evsel *tracking_evsel;
|
||||
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
@@ -753,8 +753,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
||||
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->attr.freq = 0;
|
||||
tracking_evsel->attr.sample_period = 1;
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
tracking_evsel->core.attr.sample_period = 1;
|
||||
|
||||
tracking_evsel->no_aux_samples = true;
|
||||
if (need_immediate)
|
||||
@@ -784,11 +784,11 @@ static int intel_pt_snapshot_start(struct auxtrace_record *itr)
|
||||
{
|
||||
struct intel_pt_recording *ptr =
|
||||
container_of(itr, struct intel_pt_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->intel_pt_pmu->type)
|
||||
return perf_evsel__disable(evsel);
|
||||
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
|
||||
return evsel__disable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -797,11 +797,11 @@ static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
|
||||
{
|
||||
struct intel_pt_recording *ptr =
|
||||
container_of(itr, struct intel_pt_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->intel_pt_pmu->type)
|
||||
return perf_evsel__enable(evsel);
|
||||
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1070,10 +1070,10 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
|
||||
{
|
||||
struct intel_pt_recording *ptr =
|
||||
container_of(itr, struct intel_pt_recording, itr);
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(ptr->evlist, evsel) {
|
||||
if (evsel->attr.type == ptr->intel_pt_pmu->type)
|
||||
if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
|
||||
return perf_evlist__enable_event_idx(ptr->evlist, evsel,
|
||||
idx);
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ const char *kvm_exit_trace = "kvm:kvm_exit";
|
||||
* the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
|
||||
* the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
|
||||
*/
|
||||
static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
key->key = perf_evsel__intval(evsel, sample, "gpa");
|
||||
@@ -38,7 +38,7 @@ static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sam
|
||||
#define KVM_TRACE_MMIO_READ 1
|
||||
#define KVM_TRACE_MMIO_WRITE 2
|
||||
|
||||
static bool mmio_event_begin(struct perf_evsel *evsel,
|
||||
static bool mmio_event_begin(struct evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key)
|
||||
{
|
||||
/* MMIO read begin event in kernel. */
|
||||
@@ -55,7 +55,7 @@ static bool mmio_event_begin(struct perf_evsel *evsel,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
/* MMIO write end event in kernel. */
|
||||
@@ -89,7 +89,7 @@ static struct kvm_events_ops mmio_events = {
|
||||
};
|
||||
|
||||
/* The time of emulation pio access is from kvm_pio to kvm_entry. */
|
||||
static void ioport_event_get_key(struct perf_evsel *evsel,
|
||||
static void ioport_event_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -97,7 +97,7 @@ static void ioport_event_get_key(struct perf_evsel *evsel,
|
||||
key->info = perf_evsel__intval(evsel, sample, "rw");
|
||||
}
|
||||
|
||||
static bool ioport_event_begin(struct perf_evsel *evsel,
|
||||
static bool ioport_event_begin(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -109,7 +109,7 @@ static bool ioport_event_begin(struct perf_evsel *evsel,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool ioport_event_end(struct perf_evsel *evsel,
|
||||
static bool ioport_event_end(struct evsel *evsel,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct event_key *key __maybe_unused)
|
||||
{
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <sys/resource.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/eventfd.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
#include "../util/stat.h"
|
||||
#include <subcmd/parse-options.h>
|
||||
@@ -219,7 +220,7 @@ static void init_fdmaps(struct worker *w, int pct)
|
||||
}
|
||||
}
|
||||
|
||||
static int do_threads(struct worker *worker, struct cpu_map *cpu)
|
||||
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
{
|
||||
pthread_attr_t thread_attr, *attrp = NULL;
|
||||
cpu_set_t cpuset;
|
||||
@@ -301,7 +302,7 @@ int bench_epoll_ctl(int argc, const char **argv)
|
||||
int j, ret = 0;
|
||||
struct sigaction act;
|
||||
struct worker *worker = NULL;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
struct rlimit rl, prevrl;
|
||||
unsigned int i;
|
||||
|
||||
@@ -315,7 +316,7 @@ int bench_epoll_ctl(int argc, const char **argv)
|
||||
act.sa_sigaction = toggle_done;
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/eventfd.h>
|
||||
#include <sys/types.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
#include "../util/stat.h"
|
||||
#include <subcmd/parse-options.h>
|
||||
@@ -288,7 +289,7 @@ static void print_summary(void)
|
||||
(int) runtime.tv_sec);
|
||||
}
|
||||
|
||||
static int do_threads(struct worker *worker, struct cpu_map *cpu)
|
||||
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
{
|
||||
pthread_attr_t thread_attr, *attrp = NULL;
|
||||
cpu_set_t cpuset;
|
||||
@@ -415,7 +416,7 @@ int bench_epoll_wait(int argc, const char **argv)
|
||||
struct sigaction act;
|
||||
unsigned int i;
|
||||
struct worker *worker = NULL;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
pthread_t wthread;
|
||||
struct rlimit rl, prevrl;
|
||||
|
||||
@@ -429,7 +430,7 @@ int bench_epoll_wait(int argc, const char **argv)
|
||||
act.sa_sigaction = toggle_done;
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <sys/time.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
#include "../util/stat.h"
|
||||
#include <subcmd/parse-options.h>
|
||||
@@ -124,7 +125,7 @@ int bench_futex_hash(int argc, const char **argv)
|
||||
unsigned int i;
|
||||
pthread_attr_t thread_attr;
|
||||
struct worker *worker = NULL;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
|
||||
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
|
||||
if (argc) {
|
||||
@@ -132,7 +133,7 @@ int bench_futex_hash(int argc, const char **argv)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <errno.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "bench.h"
|
||||
#include "futex.h"
|
||||
#include "cpumap.h"
|
||||
@@ -116,7 +117,7 @@ static void *workerfn(void *arg)
|
||||
}
|
||||
|
||||
static void create_threads(struct worker *w, pthread_attr_t thread_attr,
|
||||
struct cpu_map *cpu)
|
||||
struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
unsigned int i;
|
||||
@@ -150,13 +151,13 @@ int bench_futex_lock_pi(int argc, const char **argv)
|
||||
unsigned int i;
|
||||
struct sigaction act;
|
||||
pthread_attr_t thread_attr;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
|
||||
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
|
||||
if (argc)
|
||||
goto err;
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/time64.h>
|
||||
#include <errno.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "bench.h"
|
||||
#include "futex.h"
|
||||
#include "cpumap.h"
|
||||
@@ -84,7 +85,7 @@ static void *workerfn(void *arg __maybe_unused)
|
||||
}
|
||||
|
||||
static void block_threads(pthread_t *w,
|
||||
pthread_attr_t thread_attr, struct cpu_map *cpu)
|
||||
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
unsigned int i;
|
||||
@@ -117,13 +118,13 @@ int bench_futex_requeue(int argc, const char **argv)
|
||||
unsigned int i, j;
|
||||
struct sigaction act;
|
||||
pthread_attr_t thread_attr;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
|
||||
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
|
||||
if (argc)
|
||||
goto err;
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "cpu_map__new");
|
||||
|
||||
|
||||
@@ -138,7 +138,7 @@ static void *blocked_workerfn(void *arg __maybe_unused)
|
||||
}
|
||||
|
||||
static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
|
||||
struct cpu_map *cpu)
|
||||
struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
unsigned int i;
|
||||
@@ -224,7 +224,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
|
||||
struct sigaction act;
|
||||
pthread_attr_t thread_attr;
|
||||
struct thread_data *waking_worker;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
|
||||
argc = parse_options(argc, argv, options,
|
||||
bench_futex_wake_parallel_usage, 0);
|
||||
@@ -237,7 +237,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
|
||||
act.sa_sigaction = toggle_done;
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/time64.h>
|
||||
#include <errno.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "bench.h"
|
||||
#include "futex.h"
|
||||
#include "cpumap.h"
|
||||
@@ -90,7 +91,7 @@ static void print_summary(void)
|
||||
}
|
||||
|
||||
static void block_threads(pthread_t *w,
|
||||
pthread_attr_t thread_attr, struct cpu_map *cpu)
|
||||
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
unsigned int i;
|
||||
@@ -123,7 +124,7 @@ int bench_futex_wake(int argc, const char **argv)
|
||||
unsigned int i, j;
|
||||
struct sigaction act;
|
||||
pthread_attr_t thread_attr;
|
||||
struct cpu_map *cpu;
|
||||
struct perf_cpu_map *cpu;
|
||||
|
||||
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
|
||||
if (argc) {
|
||||
@@ -131,7 +132,7 @@ int bench_futex_wake(int argc, const char **argv)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
cpu = cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
|
||||
struct hist_entry *he = iter->he;
|
||||
struct branch_info *bi;
|
||||
struct perf_sample *sample = iter->sample;
|
||||
struct perf_evsel *evsel = iter->evsel;
|
||||
struct evsel *evsel = iter->evsel;
|
||||
int err;
|
||||
|
||||
bi = he->branch_info;
|
||||
@@ -171,7 +171,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int process_branch_callback(struct perf_evsel *evsel,
|
||||
static int process_branch_callback(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct addr_location *al __maybe_unused,
|
||||
struct perf_annotate *ann,
|
||||
@@ -208,7 +208,7 @@ static bool has_annotation(struct perf_annotate *ann)
|
||||
return ui__has_annotation() || ann->use_stdio2;
|
||||
}
|
||||
|
||||
static int perf_evsel__add_sample(struct perf_evsel *evsel,
|
||||
static int perf_evsel__add_sample(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct addr_location *al,
|
||||
struct perf_annotate *ann,
|
||||
@@ -257,7 +257,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
|
||||
@@ -293,7 +293,7 @@ static int process_feature_event(struct perf_session *session,
|
||||
}
|
||||
|
||||
static int hist_entry__tty_annotate(struct hist_entry *he,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_annotate *ann)
|
||||
{
|
||||
if (!ann->use_stdio2)
|
||||
@@ -303,7 +303,7 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
|
||||
}
|
||||
|
||||
static void hists__find_annotations(struct hists *hists,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_annotate *ann)
|
||||
{
|
||||
struct rb_node *nd = rb_first_cached(&hists->entries), *next;
|
||||
@@ -333,7 +333,7 @@ static void hists__find_annotations(struct hists *hists,
|
||||
if (use_browser == 2) {
|
||||
int ret;
|
||||
int (*annotate)(struct hist_entry *he,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct hist_browser_timer *hbt);
|
||||
|
||||
annotate = dlsym(perf_gtk_handle,
|
||||
@@ -387,7 +387,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
|
||||
{
|
||||
int ret;
|
||||
struct perf_session *session = ann->session;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
u64 total_nr_samples;
|
||||
|
||||
if (ann->cpu_list) {
|
||||
|
||||
@@ -248,7 +248,7 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct c2c_hists *c2c_hists = &c2c.hists;
|
||||
@@ -2049,7 +2049,7 @@ static int setup_nodes(struct perf_session *session)
|
||||
c2c.cpu2node = cpu2node;
|
||||
|
||||
for (node = 0; node < c2c.nodes_cnt; node++) {
|
||||
struct cpu_map *map = n[node].map;
|
||||
struct perf_cpu_map *map = n[node].map;
|
||||
unsigned long *set;
|
||||
|
||||
set = bitmap_alloc(c2c.cpus_cnt);
|
||||
@@ -2236,8 +2236,8 @@ static void print_pareto(FILE *out)
|
||||
|
||||
static void print_c2c_info(FILE *out, struct perf_session *session)
|
||||
{
|
||||
struct perf_evlist *evlist = session->evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = session->evlist;
|
||||
struct evsel *evsel;
|
||||
bool first = true;
|
||||
|
||||
fprintf(out, "=================================================\n");
|
||||
@@ -2567,7 +2567,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
|
||||
return parse_callchain_report_opt(arg);
|
||||
}
|
||||
|
||||
static int setup_callchain(struct perf_evlist *evlist)
|
||||
static int setup_callchain(struct evlist *evlist)
|
||||
{
|
||||
u64 sample_type = perf_evlist__combined_sample_type(evlist);
|
||||
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
|
||||
|
||||
@@ -376,7 +376,7 @@ struct hist_entry_ops block_hist_ops = {
|
||||
static int diff__process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
|
||||
@@ -448,10 +448,10 @@ static struct perf_diff pdiff = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
|
||||
struct perf_evlist *evlist)
|
||||
static struct evsel *evsel_match(struct evsel *evsel,
|
||||
struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *e;
|
||||
struct evsel *e;
|
||||
|
||||
evlist__for_each_entry(evlist, e) {
|
||||
if (perf_evsel__match2(evsel, e))
|
||||
@@ -461,9 +461,9 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
|
||||
static void perf_evlist__collapse_resort(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -1009,8 +1009,8 @@ static void data__fprintf(void)
|
||||
|
||||
static void data_process(void)
|
||||
{
|
||||
struct perf_evlist *evlist_base = data__files[0].session->evlist;
|
||||
struct perf_evsel *evsel_base;
|
||||
struct evlist *evlist_base = data__files[0].session->evlist;
|
||||
struct evsel *evsel_base;
|
||||
bool first = true;
|
||||
|
||||
evlist__for_each_entry(evlist_base, evsel_base) {
|
||||
@@ -1019,8 +1019,8 @@ static void data_process(void)
|
||||
int i;
|
||||
|
||||
data__for_each_file_new(i, d) {
|
||||
struct perf_evlist *evlist = d->session->evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = d->session->evlist;
|
||||
struct evsel *evsel;
|
||||
struct hists *hists;
|
||||
|
||||
evsel = evsel_match(evsel_base, evlist);
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
|
||||
{
|
||||
struct perf_session *session;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
struct perf_data data = {
|
||||
.path = file_name,
|
||||
.mode = PERF_DATA_MODE_READ,
|
||||
@@ -36,7 +36,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
|
||||
evlist__for_each_entry(session->evlist, pos) {
|
||||
perf_evsel__fprintf(pos, details, stdout);
|
||||
|
||||
if (pos->attr.type == PERF_TYPE_TRACEPOINT)
|
||||
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
|
||||
has_tracepoint = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#define DEFAULT_TRACER "function_graph"
|
||||
|
||||
struct perf_ftrace {
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
struct target target;
|
||||
const char *tracer;
|
||||
struct list_head filters;
|
||||
@@ -156,16 +156,16 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
|
||||
if (target__has_cpu(&ftrace->target))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
|
||||
for (i = 0; i < thread_map__nr(ftrace->evlist->core.threads); i++) {
|
||||
scnprintf(buf, sizeof(buf), "%d",
|
||||
ftrace->evlist->threads->map[i]);
|
||||
ftrace->evlist->core.threads->map[i]);
|
||||
if (append_tracing_file("set_ftrace_pid", buf) < 0)
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_tracing_cpumask(struct cpu_map *cpumap)
|
||||
static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
|
||||
{
|
||||
char *cpumask;
|
||||
size_t mask_size;
|
||||
@@ -192,7 +192,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
|
||||
|
||||
static int set_tracing_cpu(struct perf_ftrace *ftrace)
|
||||
{
|
||||
struct cpu_map *cpumap = ftrace->evlist->cpus;
|
||||
struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
|
||||
|
||||
if (!target__has_cpu(&ftrace->target))
|
||||
return 0;
|
||||
@@ -202,11 +202,11 @@ static int set_tracing_cpu(struct perf_ftrace *ftrace)
|
||||
|
||||
static int reset_tracing_cpu(void)
|
||||
{
|
||||
struct cpu_map *cpumap = cpu_map__new(NULL);
|
||||
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
|
||||
int ret;
|
||||
|
||||
ret = set_tracing_cpumask(cpumap);
|
||||
cpu_map__put(cpumap);
|
||||
perf_cpu_map__put(cpumap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -495,7 +495,7 @@ int cmd_ftrace(int argc, const char **argv)
|
||||
goto out_delete_filters;
|
||||
}
|
||||
|
||||
ftrace.evlist = perf_evlist__new();
|
||||
ftrace.evlist = evlist__new();
|
||||
if (ftrace.evlist == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_delete_filters;
|
||||
@@ -508,7 +508,7 @@ int cmd_ftrace(int argc, const char **argv)
|
||||
ret = __cmd_ftrace(&ftrace, argc, argv);
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(ftrace.evlist);
|
||||
evlist__delete(ftrace.evlist);
|
||||
|
||||
out_delete_filters:
|
||||
delete_filter_func(&ftrace.filters);
|
||||
|
||||
@@ -96,7 +96,7 @@ static int perf_event__repipe_op2_synth(struct perf_session *session,
|
||||
|
||||
static int perf_event__repipe_attr(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_evlist **pevlist)
|
||||
struct evlist **pevlist)
|
||||
{
|
||||
struct perf_inject *inject = container_of(tool, struct perf_inject,
|
||||
tool);
|
||||
@@ -215,13 +215,13 @@ static int perf_event__drop_aux(struct perf_tool *tool,
|
||||
typedef int (*inject_handler)(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine);
|
||||
|
||||
static int perf_event__repipe_sample(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
if (evsel && evsel->handler) {
|
||||
@@ -424,7 +424,7 @@ static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
|
||||
static int perf_event__inject_buildid(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel __maybe_unused,
|
||||
struct evsel *evsel __maybe_unused,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct addr_location al;
|
||||
@@ -465,7 +465,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
|
||||
static int perf_inject__sched_process_exit(struct perf_tool *tool,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel __maybe_unused,
|
||||
struct evsel *evsel __maybe_unused,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
|
||||
@@ -485,7 +485,7 @@ static int perf_inject__sched_process_exit(struct perf_tool *tool,
|
||||
static int perf_inject__sched_switch(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
|
||||
@@ -509,7 +509,7 @@ static int perf_inject__sched_switch(struct perf_tool *tool,
|
||||
static int perf_inject__sched_stat(struct perf_tool *tool,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct event_entry *ent;
|
||||
@@ -530,8 +530,8 @@ static int perf_inject__sched_stat(struct perf_tool *tool,
|
||||
|
||||
sample_sw.period = sample->period;
|
||||
sample_sw.time = sample->time;
|
||||
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
|
||||
evsel->attr.read_format, &sample_sw);
|
||||
perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
|
||||
evsel->core.attr.read_format, &sample_sw);
|
||||
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
|
||||
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
|
||||
}
|
||||
@@ -541,10 +541,10 @@ static void sig_handler(int sig __maybe_unused)
|
||||
session_done = 1;
|
||||
}
|
||||
|
||||
static int perf_evsel__check_stype(struct perf_evsel *evsel,
|
||||
static int perf_evsel__check_stype(struct evsel *evsel,
|
||||
u64 sample_type, const char *sample_msg)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
const char *name = perf_evsel__name(evsel);
|
||||
|
||||
if (!(attr->sample_type & sample_type)) {
|
||||
@@ -559,7 +559,7 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
|
||||
static int drop_sample(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct perf_evsel *evsel __maybe_unused,
|
||||
struct evsel *evsel __maybe_unused,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
@@ -567,8 +567,8 @@ static int drop_sample(struct perf_tool *tool __maybe_unused,
|
||||
|
||||
static void strip_init(struct perf_inject *inject)
|
||||
{
|
||||
struct perf_evlist *evlist = inject->session->evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = inject->session->evlist;
|
||||
struct evsel *evsel;
|
||||
|
||||
inject->tool.context_switch = perf_event__drop;
|
||||
|
||||
@@ -576,10 +576,10 @@ static void strip_init(struct perf_inject *inject)
|
||||
evsel->handler = drop_sample;
|
||||
}
|
||||
|
||||
static bool has_tracking(struct perf_evsel *evsel)
|
||||
static bool has_tracking(struct evsel *evsel)
|
||||
{
|
||||
return evsel->attr.mmap || evsel->attr.mmap2 || evsel->attr.comm ||
|
||||
evsel->attr.task;
|
||||
return evsel->core.attr.mmap || evsel->core.attr.mmap2 || evsel->core.attr.comm ||
|
||||
evsel->core.attr.task;
|
||||
}
|
||||
|
||||
#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
|
||||
@@ -590,10 +590,10 @@ static bool has_tracking(struct perf_evsel *evsel)
|
||||
* their selected event to exist, except if there is only 1 selected event left
|
||||
* and it has a compatible sample type.
|
||||
*/
|
||||
static bool ok_to_remove(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel_to_remove)
|
||||
static bool ok_to_remove(struct evlist *evlist,
|
||||
struct evsel *evsel_to_remove)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
int cnt = 0;
|
||||
bool ok = false;
|
||||
|
||||
@@ -603,8 +603,8 @@ static bool ok_to_remove(struct perf_evlist *evlist,
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->handler != drop_sample) {
|
||||
cnt += 1;
|
||||
if ((evsel->attr.sample_type & COMPAT_MASK) ==
|
||||
(evsel_to_remove->attr.sample_type & COMPAT_MASK))
|
||||
if ((evsel->core.attr.sample_type & COMPAT_MASK) ==
|
||||
(evsel_to_remove->core.attr.sample_type & COMPAT_MASK))
|
||||
ok = true;
|
||||
}
|
||||
}
|
||||
@@ -614,16 +614,16 @@ static bool ok_to_remove(struct perf_evlist *evlist,
|
||||
|
||||
static void strip_fini(struct perf_inject *inject)
|
||||
{
|
||||
struct perf_evlist *evlist = inject->session->evlist;
|
||||
struct perf_evsel *evsel, *tmp;
|
||||
struct evlist *evlist = inject->session->evlist;
|
||||
struct evsel *evsel, *tmp;
|
||||
|
||||
/* Remove non-synthesized evsels if possible */
|
||||
evlist__for_each_entry_safe(evlist, tmp, evsel) {
|
||||
if (evsel->handler == drop_sample &&
|
||||
ok_to_remove(evlist, evsel)) {
|
||||
pr_debug("Deleting %s\n", perf_evsel__name(evsel));
|
||||
perf_evlist__remove(evlist, evsel);
|
||||
perf_evsel__delete(evsel);
|
||||
evlist__remove(evlist, evsel);
|
||||
evsel__delete(evsel);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -651,7 +651,7 @@ static int __cmd_inject(struct perf_inject *inject)
|
||||
if (inject->build_ids) {
|
||||
inject->tool.sample = perf_event__inject_buildid;
|
||||
} else if (inject->sched_stat) {
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(session->evlist, evsel) {
|
||||
const char *name = perf_evsel__name(evsel);
|
||||
@@ -712,7 +712,7 @@ static int __cmd_inject(struct perf_inject *inject)
|
||||
* remove the evsel.
|
||||
*/
|
||||
if (inject->itrace_synth_opts.set) {
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
perf_header__clear_feat(&session->header,
|
||||
HEADER_AUXTRACE);
|
||||
@@ -724,8 +724,8 @@ static int __cmd_inject(struct perf_inject *inject)
|
||||
if (evsel) {
|
||||
pr_debug("Deleting %s\n",
|
||||
perf_evsel__name(evsel));
|
||||
perf_evlist__remove(session->evlist, evsel);
|
||||
perf_evsel__delete(evsel);
|
||||
evlist__remove(session->evlist, evsel);
|
||||
evsel__delete(evsel);
|
||||
}
|
||||
if (inject->strip)
|
||||
strip_fini(inject);
|
||||
|
||||
@@ -166,7 +166,7 @@ static int insert_caller_stat(unsigned long call_site,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_alloc_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
|
||||
@@ -185,7 +185,7 @@ static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
int ret = perf_evsel__process_alloc_event(evsel, sample);
|
||||
@@ -229,7 +229,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_free_event(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_free_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
|
||||
@@ -381,7 +381,7 @@ static int build_alloc_func_list(void)
|
||||
* Find first non-memory allocation function from callchain.
|
||||
* The allocation functions are in the 'alloc_func_list'.
|
||||
*/
|
||||
static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
|
||||
static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct machine *machine = &kmem_session->machines.host;
|
||||
@@ -728,7 +728,7 @@ static char *compact_gfp_string(unsigned long gfp_flags)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
|
||||
unsigned int gfp_flags)
|
||||
{
|
||||
struct tep_record record = {
|
||||
@@ -779,7 +779,7 @@ static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
u64 page;
|
||||
@@ -852,7 +852,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_page_free_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
u64 page;
|
||||
@@ -930,13 +930,13 @@ static bool perf_kmem__skip_sample(struct perf_sample *sample)
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
||||
typedef int (*tracepoint_handler)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -1363,8 +1363,8 @@ static void sort_result(void)
|
||||
static int __cmd_kmem(struct perf_session *session)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
struct perf_evsel *evsel;
|
||||
const struct perf_evsel_str_handler kmem_tracepoints[] = {
|
||||
struct evsel *evsel;
|
||||
const struct evsel_str_handler kmem_tracepoints[] = {
|
||||
/* slab allocator */
|
||||
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
|
||||
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
|
||||
@@ -1967,7 +1967,7 @@ int cmd_kmem(int argc, const char **argv)
|
||||
}
|
||||
|
||||
if (kmem_page) {
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
|
||||
"kmem:mm_page_alloc");
|
||||
|
||||
@@ -57,7 +57,7 @@ static const char *get_filename_for_perf_kvm(void)
|
||||
#ifdef HAVE_KVM_STAT_SUPPORT
|
||||
#include "util/kvm-stat.h"
|
||||
|
||||
void exit_event_get_key(struct perf_evsel *evsel,
|
||||
void exit_event_get_key(struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -65,12 +65,12 @@ void exit_event_get_key(struct perf_evsel *evsel,
|
||||
key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason);
|
||||
}
|
||||
|
||||
bool kvm_exit_event(struct perf_evsel *evsel)
|
||||
bool kvm_exit_event(struct evsel *evsel)
|
||||
{
|
||||
return !strcmp(evsel->name, kvm_exit_trace);
|
||||
}
|
||||
|
||||
bool exit_event_begin(struct perf_evsel *evsel,
|
||||
bool exit_event_begin(struct evsel *evsel,
|
||||
struct perf_sample *sample, struct event_key *key)
|
||||
{
|
||||
if (kvm_exit_event(evsel)) {
|
||||
@@ -81,12 +81,12 @@ bool exit_event_begin(struct perf_evsel *evsel,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool kvm_entry_event(struct perf_evsel *evsel)
|
||||
bool kvm_entry_event(struct evsel *evsel)
|
||||
{
|
||||
return !strcmp(evsel->name, kvm_entry_trace);
|
||||
}
|
||||
|
||||
bool exit_event_end(struct perf_evsel *evsel,
|
||||
bool exit_event_end(struct evsel *evsel,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct event_key *key __maybe_unused)
|
||||
{
|
||||
@@ -286,7 +286,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
|
||||
}
|
||||
|
||||
static bool is_child_event(struct perf_kvm_stat *kvm,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct event_key *key)
|
||||
{
|
||||
@@ -396,7 +396,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
|
||||
|
||||
static
|
||||
struct vcpu_event_record *per_vcpu_record(struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
/* Only kvm_entry records vcpu id. */
|
||||
@@ -419,7 +419,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
|
||||
|
||||
static bool handle_kvm_event(struct perf_kvm_stat *kvm,
|
||||
struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
struct vcpu_event_record *vcpu_record;
|
||||
@@ -672,7 +672,7 @@ static bool skip_sample(struct perf_kvm_stat *kvm,
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -743,7 +743,7 @@ static bool verify_vcpu(int vcpu)
|
||||
static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
||||
u64 *mmap_time)
|
||||
{
|
||||
struct perf_evlist *evlist = kvm->evlist;
|
||||
struct evlist *evlist = kvm->evlist;
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
u64 timestamp;
|
||||
@@ -972,7 +972,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
|
||||
goto out;
|
||||
|
||||
/* everything is good - enable the events and process */
|
||||
perf_evlist__enable(kvm->evlist);
|
||||
evlist__enable(kvm->evlist);
|
||||
|
||||
while (!done) {
|
||||
struct fdarray *fda = &kvm->evlist->pollfd;
|
||||
@@ -993,7 +993,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
|
||||
err = fdarray__poll(fda, 100);
|
||||
}
|
||||
|
||||
perf_evlist__disable(kvm->evlist);
|
||||
evlist__disable(kvm->evlist);
|
||||
|
||||
if (err == 0) {
|
||||
sort_result(kvm);
|
||||
@@ -1011,8 +1011,8 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
|
||||
static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
||||
{
|
||||
int err, rc = -1;
|
||||
struct perf_evsel *pos;
|
||||
struct perf_evlist *evlist = kvm->evlist;
|
||||
struct evsel *pos;
|
||||
struct evlist *evlist = kvm->evlist;
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
|
||||
perf_evlist__config(evlist, &kvm->opts, NULL);
|
||||
@@ -1022,7 +1022,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
||||
* This command processes KVM tracepoints from host only
|
||||
*/
|
||||
evlist__for_each_entry(evlist, pos) {
|
||||
struct perf_event_attr *attr = &pos->attr;
|
||||
struct perf_event_attr *attr = &pos->core.attr;
|
||||
|
||||
/* make sure these *are* set */
|
||||
perf_evsel__set_sample_bit(pos, TID);
|
||||
@@ -1048,7 +1048,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
||||
attr->disabled = 1;
|
||||
}
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
printf("Couldn't create the events: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -1058,7 +1058,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
||||
if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
|
||||
ui__error("Failed to mmap the events: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
perf_evlist__close(evlist);
|
||||
evlist__close(evlist);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1283,14 +1283,14 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
|
||||
}
|
||||
|
||||
#ifdef HAVE_TIMERFD_SUPPORT
|
||||
static struct perf_evlist *kvm_live_event_list(void)
|
||||
static struct evlist *kvm_live_event_list(void)
|
||||
{
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
char *tp, *name, *sys;
|
||||
int err = -1;
|
||||
const char * const *events_tp;
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (evlist == NULL)
|
||||
return NULL;
|
||||
|
||||
@@ -1325,7 +1325,7 @@ static struct perf_evlist *kvm_live_event_list(void)
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
evlist = NULL;
|
||||
}
|
||||
|
||||
@@ -1450,7 +1450,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
|
||||
perf_session__set_id_hdr_size(kvm->session);
|
||||
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
|
||||
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
|
||||
kvm->evlist->threads, false, 1);
|
||||
kvm->evlist->core.threads, false, 1);
|
||||
err = kvm_live_open_events(kvm);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -1460,7 +1460,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
|
||||
out:
|
||||
perf_session__delete(kvm->session);
|
||||
kvm->session = NULL;
|
||||
perf_evlist__delete(kvm->evlist);
|
||||
evlist__delete(kvm->evlist);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -347,16 +347,16 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
|
||||
}
|
||||
|
||||
struct trace_lock_handler {
|
||||
int (*acquire_event)(struct perf_evsel *evsel,
|
||||
int (*acquire_event)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*acquired_event)(struct perf_evsel *evsel,
|
||||
int (*acquired_event)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*contended_event)(struct perf_evsel *evsel,
|
||||
int (*contended_event)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
int (*release_event)(struct perf_evsel *evsel,
|
||||
int (*release_event)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
};
|
||||
|
||||
@@ -396,7 +396,7 @@ enum acquire_flags {
|
||||
READ_LOCK = 2,
|
||||
};
|
||||
|
||||
static int report_lock_acquire_event(struct perf_evsel *evsel,
|
||||
static int report_lock_acquire_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
@@ -468,7 +468,7 @@ static int report_lock_acquire_event(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int report_lock_acquired_event(struct perf_evsel *evsel,
|
||||
static int report_lock_acquired_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
@@ -531,7 +531,7 @@ static int report_lock_acquired_event(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int report_lock_contended_event(struct perf_evsel *evsel,
|
||||
static int report_lock_contended_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
@@ -586,7 +586,7 @@ static int report_lock_contended_event(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int report_lock_release_event(struct perf_evsel *evsel,
|
||||
static int report_lock_release_event(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
void *addr;
|
||||
@@ -656,7 +656,7 @@ static struct trace_lock_handler report_lock_ops = {
|
||||
|
||||
static struct trace_lock_handler *trace_handler;
|
||||
|
||||
static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_lock_acquire(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (trace_handler->acquire_event)
|
||||
@@ -664,7 +664,7 @@ static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_lock_acquired(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (trace_handler->acquired_event)
|
||||
@@ -672,7 +672,7 @@ static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_lock_contended(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (trace_handler->contended_event)
|
||||
@@ -680,7 +680,7 @@ static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
|
||||
static int perf_evsel__process_lock_release(struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (trace_handler->release_event)
|
||||
@@ -806,13 +806,13 @@ static int dump_info(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
|
||||
typedef int (*tracepoint_handler)(struct evsel *evsel,
|
||||
struct perf_sample *sample);
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -847,7 +847,7 @@ static void sort_result(void)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct perf_evsel_str_handler lock_tracepoints[] = {
|
||||
static const struct evsel_str_handler lock_tracepoints[] = {
|
||||
{ "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
|
||||
{ "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
{ "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
|
||||
|
||||
@@ -230,7 +230,7 @@ dump_raw_samples(struct perf_tool *tool,
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel __maybe_unused,
|
||||
struct evsel *evsel __maybe_unused,
|
||||
struct machine *machine)
|
||||
{
|
||||
return dump_raw_samples(tool, event, sample, machine);
|
||||
|
||||
@@ -73,7 +73,7 @@ struct record {
|
||||
u64 bytes_written;
|
||||
struct perf_data data;
|
||||
struct auxtrace_record *itr;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
struct perf_session *session;
|
||||
int realtime_prio;
|
||||
bool no_buildid;
|
||||
@@ -346,7 +346,7 @@ static void record__aio_set_pos(int trace_fd, off_t pos)
|
||||
static void record__aio_mmap_read_sync(struct record *rec)
|
||||
{
|
||||
int i;
|
||||
struct perf_evlist *evlist = rec->evlist;
|
||||
struct evlist *evlist = rec->evlist;
|
||||
struct perf_mmap *maps = evlist->mmap;
|
||||
|
||||
if (!record__aio_enabled(rec))
|
||||
@@ -672,7 +672,7 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
|
||||
#endif
|
||||
|
||||
static int record__mmap_evlist(struct record *rec,
|
||||
struct perf_evlist *evlist)
|
||||
struct evlist *evlist)
|
||||
{
|
||||
struct record_opts *opts = &rec->opts;
|
||||
char msg[512];
|
||||
@@ -713,8 +713,8 @@ static int record__mmap(struct record *rec)
|
||||
static int record__open(struct record *rec)
|
||||
{
|
||||
char msg[BUFSIZ];
|
||||
struct perf_evsel *pos;
|
||||
struct perf_evlist *evlist = rec->evlist;
|
||||
struct evsel *pos;
|
||||
struct evlist *evlist = rec->evlist;
|
||||
struct perf_session *session = rec->session;
|
||||
struct record_opts *opts = &rec->opts;
|
||||
int rc = 0;
|
||||
@@ -732,14 +732,14 @@ static int record__open(struct record *rec)
|
||||
pos->tracking = 0;
|
||||
pos = perf_evlist__last(evlist);
|
||||
pos->tracking = 1;
|
||||
pos->attr.enable_on_exec = 1;
|
||||
pos->core.attr.enable_on_exec = 1;
|
||||
}
|
||||
|
||||
perf_evlist__config(evlist, opts, &callchain_param);
|
||||
|
||||
evlist__for_each_entry(evlist, pos) {
|
||||
try_again:
|
||||
if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
|
||||
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
|
||||
if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
|
||||
if (verbose > 0)
|
||||
ui__warning("%s\n", msg);
|
||||
@@ -782,7 +782,7 @@ static int record__open(struct record *rec)
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct record *rec = container_of(tool, struct record, tool);
|
||||
@@ -904,7 +904,7 @@ static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_
|
||||
return compressed;
|
||||
}
|
||||
|
||||
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
|
||||
static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
|
||||
bool overwrite, bool synch)
|
||||
{
|
||||
u64 bytes_written = rec->bytes_written;
|
||||
@@ -1002,7 +1002,7 @@ static void record__init_features(struct record *rec)
|
||||
if (rec->no_buildid)
|
||||
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
|
||||
|
||||
if (!have_tracepoints(&rec->evlist->entries))
|
||||
if (!have_tracepoints(&rec->evlist->core.entries))
|
||||
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
|
||||
|
||||
if (!rec->opts.branch_stack)
|
||||
@@ -1047,7 +1047,7 @@ record__finish_output(struct record *rec)
|
||||
static int record__synthesize_workload(struct record *rec, bool tail)
|
||||
{
|
||||
int err;
|
||||
struct thread_map *thread_map;
|
||||
struct perf_thread_map *thread_map;
|
||||
|
||||
if (rec->opts.tail_synthesize != tail)
|
||||
return 0;
|
||||
@@ -1060,7 +1060,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
|
||||
process_synthesized_event,
|
||||
&rec->session->machines.host,
|
||||
rec->opts.sample_address);
|
||||
thread_map__put(thread_map);
|
||||
perf_thread_map__put(thread_map);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1165,7 +1165,7 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
|
||||
}
|
||||
|
||||
static const struct perf_event_mmap_page *
|
||||
perf_evlist__pick_pc(struct perf_evlist *evlist)
|
||||
perf_evlist__pick_pc(struct evlist *evlist)
|
||||
{
|
||||
if (evlist) {
|
||||
if (evlist->mmap && evlist->mmap[0].base)
|
||||
@@ -1218,7 +1218,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (have_tracepoints(&rec->evlist->entries)) {
|
||||
if (have_tracepoints(&rec->evlist->core.entries)) {
|
||||
/*
|
||||
* FIXME err <= 0 here actually means that
|
||||
* there were no tracepoints so its not really
|
||||
@@ -1275,7 +1275,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
|
||||
err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
|
||||
process_synthesized_event,
|
||||
NULL);
|
||||
if (err < 0) {
|
||||
@@ -1283,7 +1283,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
|
||||
err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
|
||||
process_synthesized_event, NULL);
|
||||
if (err < 0) {
|
||||
pr_err("Couldn't synthesize cpu map.\n");
|
||||
@@ -1295,7 +1295,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||
if (err < 0)
|
||||
pr_warning("Couldn't synthesize bpf events.\n");
|
||||
|
||||
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
|
||||
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
|
||||
process_synthesized_event, opts->sample_address,
|
||||
1);
|
||||
out:
|
||||
@@ -1313,7 +1313,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
struct perf_data *data = &rec->data;
|
||||
struct perf_session *session;
|
||||
bool disabled = false, draining = false;
|
||||
struct perf_evlist *sb_evlist = NULL;
|
||||
struct evlist *sb_evlist = NULL;
|
||||
int fd;
|
||||
float ratio = 0;
|
||||
|
||||
@@ -1375,7 +1375,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
* because we synthesize event name through the pipe
|
||||
* and need the id for that.
|
||||
*/
|
||||
if (data->is_pipe && rec->evlist->nr_entries == 1)
|
||||
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
|
||||
rec->opts.sample_id = true;
|
||||
|
||||
if (record__open(rec) != 0) {
|
||||
@@ -1453,7 +1453,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
* so don't spoil it by prematurely enabling them.
|
||||
*/
|
||||
if (!target__none(&opts->target) && !opts->initial_delay)
|
||||
perf_evlist__enable(rec->evlist);
|
||||
evlist__enable(rec->evlist);
|
||||
|
||||
/*
|
||||
* Let the child rip
|
||||
@@ -1506,7 +1506,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
|
||||
if (opts->initial_delay) {
|
||||
usleep(opts->initial_delay * USEC_PER_MSEC);
|
||||
perf_evlist__enable(rec->evlist);
|
||||
evlist__enable(rec->evlist);
|
||||
}
|
||||
|
||||
trigger_ready(&auxtrace_snapshot_trigger);
|
||||
@@ -1605,7 +1605,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
||||
*/
|
||||
if (done && !disabled && !target__none(&opts->target)) {
|
||||
trigger_off(&auxtrace_snapshot_trigger);
|
||||
perf_evlist__disable(rec->evlist);
|
||||
evlist__disable(rec->evlist);
|
||||
disabled = true;
|
||||
}
|
||||
}
|
||||
@@ -2265,7 +2265,7 @@ int cmd_record(int argc, const char **argv)
|
||||
CPU_ZERO(&rec->affinity_mask);
|
||||
rec->opts.affinity = PERF_AFFINITY_SYS;
|
||||
|
||||
rec->evlist = perf_evlist__new();
|
||||
rec->evlist = evlist__new();
|
||||
if (rec->evlist == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2386,7 +2386,7 @@ int cmd_record(int argc, const char **argv)
|
||||
if (record.opts.overwrite)
|
||||
record.opts.tail_synthesize = true;
|
||||
|
||||
if (rec->evlist->nr_entries == 0 &&
|
||||
if (rec->evlist->core.nr_entries == 0 &&
|
||||
__perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
|
||||
pr_err("Not enough memory for event selector list\n");
|
||||
goto out;
|
||||
@@ -2449,7 +2449,7 @@ int cmd_record(int argc, const char **argv)
|
||||
|
||||
err = __cmd_record(&record, argc, argv);
|
||||
out:
|
||||
perf_evlist__delete(rec->evlist);
|
||||
evlist__delete(rec->evlist);
|
||||
symbol__exit();
|
||||
auxtrace_record__free(rec->itr);
|
||||
return err;
|
||||
|
||||
@@ -128,7 +128,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
|
||||
int err = 0;
|
||||
struct report *rep = arg;
|
||||
struct hist_entry *he = iter->he;
|
||||
struct perf_evsel *evsel = iter->evsel;
|
||||
struct evsel *evsel = iter->evsel;
|
||||
struct perf_sample *sample = iter->sample;
|
||||
struct mem_info *mi;
|
||||
struct branch_info *bi;
|
||||
@@ -172,7 +172,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
|
||||
struct report *rep = arg;
|
||||
struct branch_info *bi;
|
||||
struct perf_sample *sample = iter->sample;
|
||||
struct perf_evsel *evsel = iter->evsel;
|
||||
struct evsel *evsel = iter->evsel;
|
||||
int err;
|
||||
|
||||
if (!ui__has_annotation() && !rep->symbol_ipc)
|
||||
@@ -193,7 +193,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
|
||||
}
|
||||
|
||||
static void setup_forced_leader(struct report *report,
|
||||
struct perf_evlist *evlist)
|
||||
struct evlist *evlist)
|
||||
{
|
||||
if (report->group_set)
|
||||
perf_evlist__force_leader(evlist);
|
||||
@@ -225,7 +225,7 @@ static int process_feature_event(struct perf_session *session,
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct report *rep = container_of(tool, struct report, tool);
|
||||
@@ -292,7 +292,7 @@ static int process_sample_event(struct perf_tool *tool,
|
||||
static int process_read_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
struct report *rep = container_of(tool, struct report, tool);
|
||||
@@ -400,7 +400,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
|
||||
char unit;
|
||||
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
|
||||
u64 nr_events = hists->stats.total_period;
|
||||
struct perf_evsel *evsel = hists_to_evsel(hists);
|
||||
struct evsel *evsel = hists_to_evsel(hists);
|
||||
char buf[512];
|
||||
size_t size = sizeof(buf);
|
||||
int socked_id = hists->socket_filter;
|
||||
@@ -414,7 +414,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
|
||||
}
|
||||
|
||||
if (perf_evsel__is_group_event(evsel)) {
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
|
||||
perf_evsel__group_desc(evsel, buf, size);
|
||||
evname = buf;
|
||||
@@ -436,7 +436,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
|
||||
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
|
||||
if (evname != NULL) {
|
||||
ret += fprintf(fp, " of event%s '%s'",
|
||||
evsel->nr_members > 1 ? "s" : "", evname);
|
||||
evsel->core.nr_members > 1 ? "s" : "", evname);
|
||||
}
|
||||
|
||||
if (rep->time_str)
|
||||
@@ -459,11 +459,11 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
|
||||
return ret + fprintf(fp, "\n#\n");
|
||||
}
|
||||
|
||||
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
|
||||
static int perf_evlist__tty_browse_hists(struct evlist *evlist,
|
||||
struct report *rep,
|
||||
const char *help)
|
||||
{
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
|
||||
if (!quiet) {
|
||||
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
|
||||
@@ -532,7 +532,7 @@ static void report__warn_kptr_restrict(const struct report *rep)
|
||||
|
||||
static int report__gtk_browse_hists(struct report *rep, const char *help)
|
||||
{
|
||||
int (*hist_browser)(struct perf_evlist *evlist, const char *help,
|
||||
int (*hist_browser)(struct evlist *evlist, const char *help,
|
||||
struct hist_browser_timer *timer, float min_pcnt);
|
||||
|
||||
hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
|
||||
@@ -549,7 +549,7 @@ static int report__browse_hists(struct report *rep)
|
||||
{
|
||||
int ret;
|
||||
struct perf_session *session = rep->session;
|
||||
struct perf_evlist *evlist = session->evlist;
|
||||
struct evlist *evlist = session->evlist;
|
||||
const char *help = perf_tip(system_path(TIPDIR));
|
||||
|
||||
if (help == NULL) {
|
||||
@@ -586,7 +586,7 @@ static int report__browse_hists(struct report *rep)
|
||||
static int report__collapse_hists(struct report *rep)
|
||||
{
|
||||
struct ui_progress prog;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
int ret = 0;
|
||||
|
||||
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
|
||||
@@ -623,7 +623,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
|
||||
struct symbol *sym = he->ms.sym;
|
||||
|
||||
if (rep->symbol_ipc && sym && !sym->annotate2) {
|
||||
struct perf_evsel *evsel = hists_to_evsel(he->hists);
|
||||
struct evsel *evsel = hists_to_evsel(he->hists);
|
||||
|
||||
symbol__annotate2(sym, he->ms.map, evsel,
|
||||
&annotation__default_options, NULL);
|
||||
@@ -635,7 +635,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
|
||||
static void report__output_resort(struct report *rep)
|
||||
{
|
||||
struct ui_progress prog;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
|
||||
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
|
||||
|
||||
@@ -818,7 +818,7 @@ static int __cmd_report(struct report *rep)
|
||||
{
|
||||
int ret;
|
||||
struct perf_session *session = rep->session;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
struct perf_data *data = session->data;
|
||||
|
||||
signal(SIGINT, sig_handler);
|
||||
|
||||
@@ -133,13 +133,13 @@ typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
|
||||
struct perf_sched;
|
||||
|
||||
struct trace_sched_handler {
|
||||
int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
|
||||
int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
|
||||
struct perf_sample *sample, struct machine *machine);
|
||||
|
||||
int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
|
||||
int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
|
||||
struct perf_sample *sample, struct machine *machine);
|
||||
|
||||
int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
|
||||
int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
|
||||
struct perf_sample *sample, struct machine *machine);
|
||||
|
||||
/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
|
||||
@@ -147,7 +147,7 @@ struct trace_sched_handler {
|
||||
struct machine *machine);
|
||||
|
||||
int (*migrate_task_event)(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
};
|
||||
@@ -159,11 +159,11 @@ struct perf_sched_map {
|
||||
DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
|
||||
int *comp_cpus;
|
||||
bool comp;
|
||||
struct thread_map *color_pids;
|
||||
struct perf_thread_map *color_pids;
|
||||
const char *color_pids_str;
|
||||
struct cpu_map *color_cpus;
|
||||
struct perf_cpu_map *color_cpus;
|
||||
const char *color_cpus_str;
|
||||
struct cpu_map *cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
const char *cpus_str;
|
||||
};
|
||||
|
||||
@@ -799,7 +799,7 @@ static void test_calibrations(struct perf_sched *sched)
|
||||
|
||||
static int
|
||||
replay_wakeup_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel, struct perf_sample *sample,
|
||||
struct evsel *evsel, struct perf_sample *sample,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
const char *comm = perf_evsel__strval(evsel, sample, "comm");
|
||||
@@ -820,7 +820,7 @@ replay_wakeup_event(struct perf_sched *sched,
|
||||
}
|
||||
|
||||
static int replay_switch_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
@@ -1093,7 +1093,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
|
||||
}
|
||||
|
||||
static int latency_switch_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1163,7 +1163,7 @@ static int latency_switch_event(struct perf_sched *sched,
|
||||
}
|
||||
|
||||
static int latency_runtime_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1198,7 +1198,7 @@ static int latency_runtime_event(struct perf_sched *sched,
|
||||
}
|
||||
|
||||
static int latency_wakeup_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1259,7 +1259,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
|
||||
}
|
||||
|
||||
static int latency_migrate_task_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1470,7 +1470,7 @@ static void perf_sched__sort_lat(struct perf_sched *sched)
|
||||
}
|
||||
|
||||
static int process_sched_wakeup_event(struct perf_tool *tool,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1514,7 +1514,7 @@ map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid
|
||||
return thread;
|
||||
}
|
||||
|
||||
static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
|
||||
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
|
||||
struct perf_sample *sample, struct machine *machine)
|
||||
{
|
||||
const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
|
||||
@@ -1655,7 +1655,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
|
||||
}
|
||||
|
||||
static int process_sched_switch_event(struct perf_tool *tool,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1681,7 +1681,7 @@ static int process_sched_switch_event(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
static int process_sched_runtime_event(struct perf_tool *tool,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1711,7 +1711,7 @@ static int perf_sched__process_fork_event(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
static int process_sched_migrate_task_event(struct perf_tool *tool,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -1724,14 +1724,14 @@ static int process_sched_migrate_task_event(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
typedef int (*tracepoint_handler)(struct perf_tool *tool,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
|
||||
static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
int err = 0;
|
||||
@@ -1777,7 +1777,7 @@ static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
|
||||
|
||||
static int perf_sched__read_events(struct perf_sched *sched)
|
||||
{
|
||||
const struct perf_evsel_str_handler handlers[] = {
|
||||
const struct evsel_str_handler handlers[] = {
|
||||
{ "sched:sched_switch", process_sched_switch_event, },
|
||||
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
|
||||
{ "sched:sched_wakeup", process_sched_wakeup_event, },
|
||||
@@ -1839,7 +1839,7 @@ static inline void print_sched_time(unsigned long long nsecs, int width)
|
||||
* returns runtime data for event, allocating memory for it the
|
||||
* first time it is used.
|
||||
*/
|
||||
static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
|
||||
static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
|
||||
{
|
||||
struct evsel_runtime *r = evsel->priv;
|
||||
|
||||
@@ -1854,7 +1854,7 @@ static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
|
||||
/*
|
||||
* save last time event was seen per cpu
|
||||
*/
|
||||
static void perf_evsel__save_time(struct perf_evsel *evsel,
|
||||
static void perf_evsel__save_time(struct evsel *evsel,
|
||||
u64 timestamp, u32 cpu)
|
||||
{
|
||||
struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
|
||||
@@ -1881,7 +1881,7 @@ static void perf_evsel__save_time(struct perf_evsel *evsel,
|
||||
}
|
||||
|
||||
/* returns last time this event was seen on the given cpu */
|
||||
static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
|
||||
static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
|
||||
{
|
||||
struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
|
||||
|
||||
@@ -1988,7 +1988,7 @@ static char task_state_char(struct thread *thread, int state)
|
||||
}
|
||||
|
||||
static void timehist_print_sample(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct addr_location *al,
|
||||
struct thread *thread,
|
||||
@@ -2121,7 +2121,7 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
|
||||
}
|
||||
|
||||
static bool is_idle_sample(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
/* pid 0 == swapper == idle task */
|
||||
if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
|
||||
@@ -2132,7 +2132,7 @@ static bool is_idle_sample(struct perf_sample *sample,
|
||||
|
||||
static void save_task_callchain(struct perf_sched *sched,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct callchain_cursor *cursor = &callchain_cursor;
|
||||
@@ -2286,7 +2286,7 @@ static void save_idle_callchain(struct perf_sched *sched,
|
||||
static struct thread *timehist_get_thread(struct perf_sched *sched,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
struct thread *thread;
|
||||
|
||||
@@ -2332,7 +2332,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
|
||||
|
||||
static bool timehist_skip_sample(struct perf_sched *sched,
|
||||
struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
bool rc = false;
|
||||
@@ -2354,7 +2354,7 @@ static bool timehist_skip_sample(struct perf_sched *sched,
|
||||
}
|
||||
|
||||
static void timehist_print_wakeup_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine,
|
||||
struct thread *awakened)
|
||||
@@ -2389,7 +2389,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
|
||||
|
||||
static int timehist_sched_wakeup_event(struct perf_tool *tool,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -2419,7 +2419,7 @@ static int timehist_sched_wakeup_event(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
static void timehist_print_migration_event(struct perf_sched *sched,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine,
|
||||
struct thread *migrated)
|
||||
@@ -2473,7 +2473,7 @@ static void timehist_print_migration_event(struct perf_sched *sched,
|
||||
|
||||
static int timehist_migrate_task_event(struct perf_tool *tool,
|
||||
union perf_event *event __maybe_unused,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -2501,7 +2501,7 @@ static int timehist_migrate_task_event(struct perf_tool *tool,
|
||||
|
||||
static int timehist_sched_change_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -2627,7 +2627,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
|
||||
|
||||
static int timehist_sched_switch_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine __maybe_unused)
|
||||
{
|
||||
@@ -2897,14 +2897,14 @@ static void timehist_print_summary(struct perf_sched *sched,
|
||||
|
||||
typedef int (*sched_handler)(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine);
|
||||
|
||||
static int perf_timehist__process_sample(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
|
||||
@@ -2924,12 +2924,12 @@ static int perf_timehist__process_sample(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
static int timehist_check_attr(struct perf_sched *sched,
|
||||
struct perf_evlist *evlist)
|
||||
struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct evsel_runtime *er;
|
||||
|
||||
list_for_each_entry(evsel, &evlist->entries, node) {
|
||||
list_for_each_entry(evsel, &evlist->core.entries, core.node) {
|
||||
er = perf_evsel__get_runtime(evsel);
|
||||
if (er == NULL) {
|
||||
pr_err("Failed to allocate memory for evsel runtime data\n");
|
||||
@@ -2948,12 +2948,12 @@ static int timehist_check_attr(struct perf_sched *sched,
|
||||
|
||||
static int perf_sched__timehist(struct perf_sched *sched)
|
||||
{
|
||||
const struct perf_evsel_str_handler handlers[] = {
|
||||
const struct evsel_str_handler handlers[] = {
|
||||
{ "sched:sched_switch", timehist_sched_switch_event, },
|
||||
{ "sched:sched_wakeup", timehist_sched_wakeup_event, },
|
||||
{ "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
|
||||
};
|
||||
const struct perf_evsel_str_handler migrate_handlers[] = {
|
||||
const struct evsel_str_handler migrate_handlers[] = {
|
||||
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
|
||||
};
|
||||
struct perf_data data = {
|
||||
@@ -2963,7 +2963,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
|
||||
};
|
||||
|
||||
struct perf_session *session;
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
int err = -1;
|
||||
|
||||
/*
|
||||
@@ -3170,7 +3170,7 @@ static int perf_sched__lat(struct perf_sched *sched)
|
||||
|
||||
static int setup_map_cpus(struct perf_sched *sched)
|
||||
{
|
||||
struct cpu_map *map;
|
||||
struct perf_cpu_map *map;
|
||||
|
||||
sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
|
||||
|
||||
@@ -3183,7 +3183,7 @@ static int setup_map_cpus(struct perf_sched *sched)
|
||||
if (!sched->map.cpus_str)
|
||||
return 0;
|
||||
|
||||
map = cpu_map__new(sched->map.cpus_str);
|
||||
map = perf_cpu_map__new(sched->map.cpus_str);
|
||||
if (!map) {
|
||||
pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
|
||||
return -1;
|
||||
@@ -3195,7 +3195,7 @@ static int setup_map_cpus(struct perf_sched *sched)
|
||||
|
||||
static int setup_color_pids(struct perf_sched *sched)
|
||||
{
|
||||
struct thread_map *map;
|
||||
struct perf_thread_map *map;
|
||||
|
||||
if (!sched->map.color_pids_str)
|
||||
return 0;
|
||||
@@ -3212,12 +3212,12 @@ static int setup_color_pids(struct perf_sched *sched)
|
||||
|
||||
static int setup_color_cpus(struct perf_sched *sched)
|
||||
{
|
||||
struct cpu_map *map;
|
||||
struct perf_cpu_map *map;
|
||||
|
||||
if (!sched->map.color_cpus_str)
|
||||
return 0;
|
||||
|
||||
map = cpu_map__new(sched->map.color_cpus_str);
|
||||
map = perf_cpu_map__new(sched->map.color_cpus_str);
|
||||
if (!map) {
|
||||
pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
|
||||
return -1;
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <subcmd/pager.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
|
||||
@@ -242,7 +243,7 @@ static struct {
|
||||
},
|
||||
};
|
||||
|
||||
struct perf_evsel_script {
|
||||
struct evsel_script {
|
||||
char *filename;
|
||||
FILE *fp;
|
||||
u64 samples;
|
||||
@@ -251,15 +252,15 @@ struct perf_evsel_script {
|
||||
int gnum;
|
||||
};
|
||||
|
||||
static inline struct perf_evsel_script *evsel_script(struct perf_evsel *evsel)
|
||||
static inline struct evsel_script *evsel_script(struct evsel *evsel)
|
||||
{
|
||||
return (struct perf_evsel_script *)evsel->priv;
|
||||
return (struct evsel_script *)evsel->priv;
|
||||
}
|
||||
|
||||
static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
|
||||
static struct evsel_script *perf_evsel_script__new(struct evsel *evsel,
|
||||
struct perf_data *data)
|
||||
{
|
||||
struct perf_evsel_script *es = zalloc(sizeof(*es));
|
||||
struct evsel_script *es = zalloc(sizeof(*es));
|
||||
|
||||
if (es != NULL) {
|
||||
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
|
||||
@@ -277,7 +278,7 @@ static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void perf_evsel_script__delete(struct perf_evsel_script *es)
|
||||
static void perf_evsel_script__delete(struct evsel_script *es)
|
||||
{
|
||||
zfree(&es->filename);
|
||||
fclose(es->fp);
|
||||
@@ -285,7 +286,7 @@ static void perf_evsel_script__delete(struct perf_evsel_script *es)
|
||||
free(es);
|
||||
}
|
||||
|
||||
static int perf_evsel_script__fprintf(struct perf_evsel_script *es, FILE *fp)
|
||||
static int perf_evsel_script__fprintf(struct evsel_script *es, FILE *fp)
|
||||
{
|
||||
struct stat st;
|
||||
|
||||
@@ -340,12 +341,12 @@ static const char *output_field2str(enum perf_output_field field)
|
||||
|
||||
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
|
||||
|
||||
static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
|
||||
static int perf_evsel__do_check_stype(struct evsel *evsel,
|
||||
u64 sample_type, const char *sample_msg,
|
||||
enum perf_output_field field,
|
||||
bool allow_user_set)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
int type = output_type(attr->type);
|
||||
const char *evname;
|
||||
|
||||
@@ -372,7 +373,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__check_stype(struct perf_evsel *evsel,
|
||||
static int perf_evsel__check_stype(struct evsel *evsel,
|
||||
u64 sample_type, const char *sample_msg,
|
||||
enum perf_output_field field)
|
||||
{
|
||||
@@ -380,10 +381,10 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
|
||||
false);
|
||||
}
|
||||
|
||||
static int perf_evsel__check_attr(struct perf_evsel *evsel,
|
||||
static int perf_evsel__check_attr(struct evsel *evsel,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
bool allow_user_set;
|
||||
|
||||
if (perf_header__has_feat(&session->header, HEADER_STAT))
|
||||
@@ -418,7 +419,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
|
||||
return -EINVAL;
|
||||
|
||||
if (PRINT_FIELD(SYM) &&
|
||||
!(evsel->attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
|
||||
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
|
||||
pr_err("Display of symbols requested but neither sample IP nor "
|
||||
"sample address\navailable. Hence, no addresses to convert "
|
||||
"to symbols.\n");
|
||||
@@ -430,7 +431,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
|
||||
return -EINVAL;
|
||||
}
|
||||
if (PRINT_FIELD(DSO) &&
|
||||
!(evsel->attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
|
||||
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
|
||||
pr_err("Display of DSO requested but no address to convert.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -507,7 +508,7 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
|
||||
static int perf_session__check_output_opt(struct perf_session *session)
|
||||
{
|
||||
unsigned int j;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
|
||||
evsel = perf_session__find_first_evtype(session, attr_type(j));
|
||||
@@ -531,7 +532,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
|
||||
if (evsel == NULL)
|
||||
continue;
|
||||
|
||||
set_print_ip_opts(&evsel->attr);
|
||||
set_print_ip_opts(&evsel->core.attr);
|
||||
}
|
||||
|
||||
if (!no_callchain) {
|
||||
@@ -558,7 +559,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
|
||||
j = PERF_TYPE_TRACEPOINT;
|
||||
|
||||
evlist__for_each_entry(session->evlist, evsel) {
|
||||
if (evsel->attr.type != j)
|
||||
if (evsel->core.attr.type != j)
|
||||
continue;
|
||||
|
||||
if (evsel__has_callchain(evsel)) {
|
||||
@@ -566,7 +567,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
|
||||
output[j].fields |= PERF_OUTPUT_SYM;
|
||||
output[j].fields |= PERF_OUTPUT_SYMOFFSET;
|
||||
output[j].fields |= PERF_OUTPUT_DSO;
|
||||
set_print_ip_opts(&evsel->attr);
|
||||
set_print_ip_opts(&evsel->core.attr);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@@ -614,10 +615,10 @@ static int perf_sample__fprintf_uregs(struct perf_sample *sample,
|
||||
|
||||
static int perf_sample__fprintf_start(struct perf_sample *sample,
|
||||
struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
u32 type, FILE *fp)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
unsigned long secs;
|
||||
unsigned long long nsecs;
|
||||
int printed = 0;
|
||||
@@ -1162,13 +1163,13 @@ static int perf_sample__fprintf_addr(struct perf_sample *sample,
|
||||
}
|
||||
|
||||
static const char *resolve_branch_sym(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct thread *thread,
|
||||
struct addr_location *al,
|
||||
u64 *ip)
|
||||
{
|
||||
struct addr_location addr_al;
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
const char *name = NULL;
|
||||
|
||||
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
|
||||
@@ -1191,11 +1192,11 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
|
||||
}
|
||||
|
||||
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct thread *thread,
|
||||
struct addr_location *al, FILE *fp)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
size_t depth = thread_stack__depth(thread, sample->cpu);
|
||||
const char *name = NULL;
|
||||
static int spacing;
|
||||
@@ -1285,12 +1286,12 @@ static int perf_sample__fprintf_ipc(struct perf_sample *sample,
|
||||
}
|
||||
|
||||
static int perf_sample__fprintf_bts(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct thread *thread,
|
||||
struct addr_location *al,
|
||||
struct machine *machine, FILE *fp)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
unsigned int type = output_type(attr->type);
|
||||
bool print_srcline_last = false;
|
||||
int printed = 0;
|
||||
@@ -1322,7 +1323,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
|
||||
|
||||
/* print branch_to information */
|
||||
if (PRINT_FIELD(ADDR) ||
|
||||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
|
||||
((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
|
||||
!output[type].user_set)) {
|
||||
printed += fprintf(fp, " => ");
|
||||
printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
|
||||
@@ -1593,9 +1594,9 @@ static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
|
||||
}
|
||||
|
||||
static int perf_sample__fprintf_synth(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel, FILE *fp)
|
||||
struct evsel *evsel, FILE *fp)
|
||||
{
|
||||
switch (evsel->attr.config) {
|
||||
switch (evsel->core.attr.config) {
|
||||
case PERF_SYNTH_INTEL_PTWRITE:
|
||||
return perf_sample__fprintf_synth_ptwrite(sample, fp);
|
||||
case PERF_SYNTH_INTEL_MWAIT:
|
||||
@@ -1627,8 +1628,8 @@ struct perf_script {
|
||||
bool show_bpf_events;
|
||||
bool allocated;
|
||||
bool per_event_dump;
|
||||
struct cpu_map *cpus;
|
||||
struct thread_map *threads;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
int name_width;
|
||||
const char *time_str;
|
||||
struct perf_time_interval *ptime_range;
|
||||
@@ -1636,9 +1637,9 @@ struct perf_script {
|
||||
int range_num;
|
||||
};
|
||||
|
||||
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
|
||||
static int perf_evlist__max_name_len(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
int max = 0;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
@@ -1670,7 +1671,7 @@ static int data_src__fprintf(u64 data_src, FILE *fp)
|
||||
struct metric_ctx {
|
||||
struct perf_sample *sample;
|
||||
struct thread *thread;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
FILE *fp;
|
||||
};
|
||||
|
||||
@@ -1705,7 +1706,7 @@ static void script_new_line(struct perf_stat_config *config __maybe_unused,
|
||||
|
||||
static void perf_sample__fprint_metric(struct perf_script *script,
|
||||
struct thread *thread,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
FILE *fp)
|
||||
{
|
||||
@@ -1720,7 +1721,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
|
||||
},
|
||||
.force_header = false,
|
||||
};
|
||||
struct perf_evsel *ev2;
|
||||
struct evsel *ev2;
|
||||
u64 val;
|
||||
|
||||
if (!evsel->stats)
|
||||
@@ -1733,7 +1734,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
|
||||
sample->cpu,
|
||||
&rt_stat);
|
||||
evsel_script(evsel)->val = val;
|
||||
if (evsel_script(evsel->leader)->gnum == evsel->leader->nr_members) {
|
||||
if (evsel_script(evsel->leader)->gnum == evsel->leader->core.nr_members) {
|
||||
for_each_group_member (ev2, evsel->leader) {
|
||||
perf_stat__print_shadow_stats(&stat_config, ev2,
|
||||
evsel_script(ev2)->val,
|
||||
@@ -1747,7 +1748,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
|
||||
}
|
||||
|
||||
static bool show_event(struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct thread *thread,
|
||||
struct addr_location *al)
|
||||
{
|
||||
@@ -1788,14 +1789,14 @@ static bool show_event(struct perf_sample *sample,
|
||||
}
|
||||
|
||||
static void process_event(struct perf_script *script,
|
||||
struct perf_sample *sample, struct perf_evsel *evsel,
|
||||
struct perf_sample *sample, struct evsel *evsel,
|
||||
struct addr_location *al,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct thread *thread = al->thread;
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_event_attr *attr = &evsel->core.attr;
|
||||
unsigned int type = output_type(attr->type);
|
||||
struct perf_evsel_script *es = evsel->priv;
|
||||
struct evsel_script *es = evsel->priv;
|
||||
FILE *fp = es->fp;
|
||||
|
||||
if (output[type].fields == 0)
|
||||
@@ -1897,9 +1898,9 @@ static void process_event(struct perf_script *script,
|
||||
|
||||
static struct scripting_ops *scripting_ops;
|
||||
|
||||
static void __process_stat(struct perf_evsel *counter, u64 tstamp)
|
||||
static void __process_stat(struct evsel *counter, u64 tstamp)
|
||||
{
|
||||
int nthreads = thread_map__nr(counter->threads);
|
||||
int nthreads = thread_map__nr(counter->core.threads);
|
||||
int ncpus = perf_evsel__nr_cpus(counter);
|
||||
int cpu, thread;
|
||||
static int header_printed;
|
||||
@@ -1920,8 +1921,8 @@ static void __process_stat(struct perf_evsel *counter, u64 tstamp)
|
||||
counts = perf_counts(counter->counts, cpu, thread);
|
||||
|
||||
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
|
||||
counter->cpus->map[cpu],
|
||||
thread_map__pid(counter->threads, thread),
|
||||
counter->core.cpus->map[cpu],
|
||||
thread_map__pid(counter->core.threads, thread),
|
||||
counts->val,
|
||||
counts->ena,
|
||||
counts->run,
|
||||
@@ -1931,7 +1932,7 @@ static void __process_stat(struct perf_evsel *counter, u64 tstamp)
|
||||
}
|
||||
}
|
||||
|
||||
static void process_stat(struct perf_evsel *counter, u64 tstamp)
|
||||
static void process_stat(struct evsel *counter, u64 tstamp)
|
||||
{
|
||||
if (scripting_ops && scripting_ops->process_stat)
|
||||
scripting_ops->process_stat(&stat_config, counter, tstamp);
|
||||
@@ -1973,7 +1974,7 @@ static bool filter_cpu(struct perf_sample *sample)
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_script *scr = container_of(tool, struct perf_script, tool);
|
||||
@@ -2018,13 +2019,13 @@ static int process_sample_event(struct perf_tool *tool,
|
||||
}
|
||||
|
||||
static int process_attr(struct perf_tool *tool, union perf_event *event,
|
||||
struct perf_evlist **pevlist)
|
||||
struct evlist **pevlist)
|
||||
{
|
||||
struct perf_script *scr = container_of(tool, struct perf_script, tool);
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel, *pos;
|
||||
struct evlist *evlist;
|
||||
struct evsel *evsel, *pos;
|
||||
int err;
|
||||
static struct perf_evsel_script *es;
|
||||
static struct evsel_script *es;
|
||||
|
||||
err = perf_event__process_attr(tool, event, pevlist);
|
||||
if (err)
|
||||
@@ -2046,18 +2047,18 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
|
||||
}
|
||||
}
|
||||
|
||||
if (evsel->attr.type >= PERF_TYPE_MAX &&
|
||||
evsel->attr.type != PERF_TYPE_SYNTH)
|
||||
if (evsel->core.attr.type >= PERF_TYPE_MAX &&
|
||||
evsel->core.attr.type != PERF_TYPE_SYNTH)
|
||||
return 0;
|
||||
|
||||
evlist__for_each_entry(evlist, pos) {
|
||||
if (pos->attr.type == evsel->attr.type && pos != evsel)
|
||||
if (pos->core.attr.type == evsel->core.attr.type && pos != evsel)
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_print_ip_opts(&evsel->attr);
|
||||
set_print_ip_opts(&evsel->core.attr);
|
||||
|
||||
if (evsel->attr.sample_type)
|
||||
if (evsel->core.attr.sample_type)
|
||||
err = perf_evsel__check_attr(evsel, scr->session);
|
||||
|
||||
return err;
|
||||
@@ -2071,7 +2072,7 @@ static int process_comm_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
int ret = -1;
|
||||
|
||||
thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid);
|
||||
@@ -2083,7 +2084,7 @@ static int process_comm_event(struct perf_tool *tool,
|
||||
if (perf_event__process_comm(tool, event, sample, machine) < 0)
|
||||
goto out;
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = 0;
|
||||
sample->tid = event->comm.tid;
|
||||
@@ -2108,7 +2109,7 @@ static int process_namespaces_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
int ret = -1;
|
||||
|
||||
thread = machine__findnew_thread(machine, event->namespaces.pid,
|
||||
@@ -2121,7 +2122,7 @@ static int process_namespaces_event(struct perf_tool *tool,
|
||||
if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
|
||||
goto out;
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = 0;
|
||||
sample->tid = event->namespaces.tid;
|
||||
@@ -2146,7 +2147,7 @@ static int process_fork_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (perf_event__process_fork(tool, event, sample, machine) < 0)
|
||||
return -1;
|
||||
@@ -2157,7 +2158,7 @@ static int process_fork_event(struct perf_tool *tool,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = event->fork.time;
|
||||
sample->tid = event->fork.tid;
|
||||
@@ -2181,7 +2182,7 @@ static int process_exit_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
|
||||
if (thread == NULL) {
|
||||
@@ -2189,7 +2190,7 @@ static int process_exit_event(struct perf_tool *tool,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = 0;
|
||||
sample->tid = event->fork.tid;
|
||||
@@ -2216,7 +2217,7 @@ static int process_mmap_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (perf_event__process_mmap(tool, event, sample, machine) < 0)
|
||||
return -1;
|
||||
@@ -2227,7 +2228,7 @@ static int process_mmap_event(struct perf_tool *tool,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = 0;
|
||||
sample->tid = event->mmap.tid;
|
||||
@@ -2250,7 +2251,7 @@ static int process_mmap2_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
|
||||
return -1;
|
||||
@@ -2261,7 +2262,7 @@ static int process_mmap2_event(struct perf_tool *tool,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
sample->cpu = 0;
|
||||
sample->time = 0;
|
||||
sample->tid = event->mmap2.tid;
|
||||
@@ -2284,7 +2285,7 @@ static int process_switch_event(struct perf_tool *tool,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (perf_event__process_switch(tool, event, sample, machine) < 0)
|
||||
return -1;
|
||||
@@ -2319,7 +2320,7 @@ process_lost_event(struct perf_tool *tool,
|
||||
{
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct thread *thread;
|
||||
|
||||
thread = machine__findnew_thread(machine, sample->pid,
|
||||
@@ -2355,12 +2356,12 @@ process_bpf_events(struct perf_tool *tool __maybe_unused,
|
||||
struct thread *thread;
|
||||
struct perf_script *script = container_of(tool, struct perf_script, tool);
|
||||
struct perf_session *session = script->session;
|
||||
struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
|
||||
|
||||
if (machine__process_ksymbol(machine, event, sample) < 0)
|
||||
return -1;
|
||||
|
||||
if (!evsel->attr.sample_id_all) {
|
||||
if (!evsel->core.attr.sample_id_all) {
|
||||
perf_event__fprintf(event, stdout);
|
||||
return 0;
|
||||
}
|
||||
@@ -2388,8 +2389,8 @@ static void sig_handler(int sig __maybe_unused)
|
||||
|
||||
static void perf_script__fclose_per_event_dump(struct perf_script *script)
|
||||
{
|
||||
struct perf_evlist *evlist = script->session->evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = script->session->evlist;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (!evsel->priv)
|
||||
@@ -2401,7 +2402,7 @@ static void perf_script__fclose_per_event_dump(struct perf_script *script)
|
||||
|
||||
static int perf_script__fopen_per_event_dump(struct perf_script *script)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(script->session->evlist, evsel) {
|
||||
/*
|
||||
@@ -2428,8 +2429,8 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
|
||||
|
||||
static int perf_script__setup_per_event_dump(struct perf_script *script)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
static struct perf_evsel_script es_stdout;
|
||||
struct evsel *evsel;
|
||||
static struct evsel_script es_stdout;
|
||||
|
||||
if (script->per_event_dump)
|
||||
return perf_script__fopen_per_event_dump(script);
|
||||
@@ -2444,10 +2445,10 @@ static int perf_script__setup_per_event_dump(struct perf_script *script)
|
||||
|
||||
static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(script->session->evlist, evsel) {
|
||||
struct perf_evsel_script *es = evsel->priv;
|
||||
struct evsel_script *es = evsel->priv;
|
||||
|
||||
perf_evsel_script__fprintf(es, stdout);
|
||||
perf_evsel_script__delete(es);
|
||||
@@ -3003,7 +3004,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
|
||||
{
|
||||
char filename[MAXPATHLEN], evname[128];
|
||||
char line[BUFSIZ], *p;
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
int match, len;
|
||||
FILE *fp;
|
||||
|
||||
@@ -3236,7 +3237,7 @@ static int process_stat_round_event(struct perf_session *session,
|
||||
union perf_event *event)
|
||||
{
|
||||
struct stat_round_event *round = &event->stat_round;
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
|
||||
evlist__for_each_entry(session->evlist, counter) {
|
||||
perf_stat_process_counter(&stat_config, counter);
|
||||
@@ -3256,7 +3257,7 @@ static int process_stat_config_event(struct perf_session *session __maybe_unused
|
||||
|
||||
static int set_maps(struct perf_script *script)
|
||||
{
|
||||
struct perf_evlist *evlist = script->session->evlist;
|
||||
struct evlist *evlist = script->session->evlist;
|
||||
|
||||
if (!script->cpus || !script->threads)
|
||||
return 0;
|
||||
@@ -3264,7 +3265,7 @@ static int set_maps(struct perf_script *script)
|
||||
if (WARN_ONCE(script->allocated, "stats double allocation\n"))
|
||||
return -EINVAL;
|
||||
|
||||
perf_evlist__set_maps(evlist, script->cpus, script->threads);
|
||||
perf_evlist__set_maps(&evlist->core, script->cpus, script->threads);
|
||||
|
||||
if (perf_evlist__alloc_stats(evlist, true))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -83,6 +83,7 @@
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
#define DEFAULT_SEPARATOR " "
|
||||
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
|
||||
@@ -130,7 +131,7 @@ static const char *smi_cost_attrs = {
|
||||
"}"
|
||||
};
|
||||
|
||||
static struct perf_evlist *evsel_list;
|
||||
static struct evlist *evsel_list;
|
||||
|
||||
static struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
@@ -164,8 +165,8 @@ struct perf_stat {
|
||||
u64 bytes_written;
|
||||
struct perf_tool tool;
|
||||
bool maps_allocated;
|
||||
struct cpu_map *cpus;
|
||||
struct thread_map *threads;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
enum aggr_mode aggr_mode;
|
||||
};
|
||||
|
||||
@@ -234,7 +235,7 @@ static int write_stat_round_event(u64 tm, u64 type)
|
||||
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
||||
|
||||
static int
|
||||
perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
|
||||
perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
|
||||
struct perf_counts_values *count)
|
||||
{
|
||||
struct perf_sample_id *sid = SID(counter, cpu, thread);
|
||||
@@ -243,7 +244,7 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
|
||||
process_synthesized_event, NULL);
|
||||
}
|
||||
|
||||
static int read_single_counter(struct perf_evsel *counter, int cpu,
|
||||
static int read_single_counter(struct evsel *counter, int cpu,
|
||||
int thread, struct timespec *rs)
|
||||
{
|
||||
if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
|
||||
@@ -261,9 +262,9 @@ static int read_single_counter(struct perf_evsel *counter, int cpu,
|
||||
* Read out the results of a single counter:
|
||||
* do not aggregate counts across CPUs in system-wide mode
|
||||
*/
|
||||
static int read_counter(struct perf_evsel *counter, struct timespec *rs)
|
||||
static int read_counter(struct evsel *counter, struct timespec *rs)
|
||||
{
|
||||
int nthreads = thread_map__nr(evsel_list->threads);
|
||||
int nthreads = thread_map__nr(evsel_list->core.threads);
|
||||
int ncpus, cpu, thread;
|
||||
|
||||
if (target__has_cpu(&target) && !target__has_per_thread(&target))
|
||||
@@ -287,7 +288,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
|
||||
* The leader's group read loads data into its group members
|
||||
* (via perf_evsel__read_counter) and sets threir count->loaded.
|
||||
*/
|
||||
if (!count->loaded &&
|
||||
if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
|
||||
read_single_counter(counter, cpu, thread, rs)) {
|
||||
counter->counts->scaled = -1;
|
||||
perf_counts(counter->counts, cpu, thread)->ena = 0;
|
||||
@@ -295,7 +296,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
|
||||
return -1;
|
||||
}
|
||||
|
||||
count->loaded = false;
|
||||
perf_counts__set_loaded(counter->counts, cpu, thread, false);
|
||||
|
||||
if (STAT_RECORD) {
|
||||
if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
|
||||
@@ -319,7 +320,7 @@ static int read_counter(struct perf_evsel *counter, struct timespec *rs)
|
||||
|
||||
static void read_counters(struct timespec *rs)
|
||||
{
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
int ret;
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
@@ -362,7 +363,7 @@ static void enable_counters(void)
|
||||
* - we have initial delay configured
|
||||
*/
|
||||
if (!target__none(&target) || stat_config.initial_delay)
|
||||
perf_evlist__enable(evsel_list);
|
||||
evlist__enable(evsel_list);
|
||||
}
|
||||
|
||||
static void disable_counters(void)
|
||||
@@ -373,7 +374,7 @@ static void disable_counters(void)
|
||||
* from counting before reading their constituent counters.
|
||||
*/
|
||||
if (!target__none(&target))
|
||||
perf_evlist__disable(evsel_list);
|
||||
evlist__disable(evsel_list);
|
||||
}
|
||||
|
||||
static volatile int workload_exec_errno;
|
||||
@@ -389,13 +390,13 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
|
||||
workload_exec_errno = info->si_value.sival_int;
|
||||
}
|
||||
|
||||
static bool perf_evsel__should_store_id(struct perf_evsel *counter)
|
||||
static bool perf_evsel__should_store_id(struct evsel *counter)
|
||||
{
|
||||
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
|
||||
return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
|
||||
}
|
||||
|
||||
static bool is_target_alive(struct target *_target,
|
||||
struct thread_map *threads)
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
struct stat st;
|
||||
int i;
|
||||
@@ -423,7 +424,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
int timeout = stat_config.timeout;
|
||||
char msg[BUFSIZ];
|
||||
unsigned long long t0, t1;
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
struct timespec ts;
|
||||
size_t l;
|
||||
int status = 0;
|
||||
@@ -478,22 +479,22 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
counter->supported = false;
|
||||
|
||||
if ((counter->leader != counter) ||
|
||||
!(counter->leader->nr_members > 1))
|
||||
!(counter->leader->core.nr_members > 1))
|
||||
continue;
|
||||
} else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
|
||||
if (verbose > 0)
|
||||
ui__warning("%s\n", msg);
|
||||
goto try_again;
|
||||
} else if (target__has_per_thread(&target) &&
|
||||
evsel_list->threads &&
|
||||
evsel_list->threads->err_thread != -1) {
|
||||
evsel_list->core.threads &&
|
||||
evsel_list->core.threads->err_thread != -1) {
|
||||
/*
|
||||
* For global --per-thread case, skip current
|
||||
* error thread.
|
||||
*/
|
||||
if (!thread_map__remove(evsel_list->threads,
|
||||
evsel_list->threads->err_thread)) {
|
||||
evsel_list->threads->err_thread = -1;
|
||||
if (!thread_map__remove(evsel_list->core.threads,
|
||||
evsel_list->core.threads->err_thread)) {
|
||||
evsel_list->core.threads->err_thread = -1;
|
||||
goto try_again;
|
||||
}
|
||||
}
|
||||
@@ -579,7 +580,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
enable_counters();
|
||||
while (!done) {
|
||||
nanosleep(&ts, NULL);
|
||||
if (!is_target_alive(&target, evsel_list->threads))
|
||||
if (!is_target_alive(&target, evsel_list->core.threads))
|
||||
break;
|
||||
if (timeout)
|
||||
break;
|
||||
@@ -613,7 +614,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
* later the evsel_list will be closed after.
|
||||
*/
|
||||
if (!STAT_RECORD)
|
||||
perf_evlist__close(evsel_list);
|
||||
evlist__close(evsel_list);
|
||||
|
||||
return WEXITSTATUS(status);
|
||||
}
|
||||
@@ -803,24 +804,24 @@ static struct option stat_options[] = {
|
||||
};
|
||||
|
||||
static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int cpu)
|
||||
struct perf_cpu_map *map, int cpu)
|
||||
{
|
||||
return cpu_map__get_socket(map, cpu, NULL);
|
||||
}
|
||||
|
||||
static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int cpu)
|
||||
struct perf_cpu_map *map, int cpu)
|
||||
{
|
||||
return cpu_map__get_die(map, cpu, NULL);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int cpu)
|
||||
struct perf_cpu_map *map, int cpu)
|
||||
{
|
||||
return cpu_map__get_core(map, cpu, NULL);
|
||||
}
|
||||
|
||||
static int cpu_map__get_max(struct cpu_map *map)
|
||||
static int cpu_map__get_max(struct perf_cpu_map *map)
|
||||
{
|
||||
int i, max = -1;
|
||||
|
||||
@@ -833,7 +834,7 @@ static int cpu_map__get_max(struct cpu_map *map)
|
||||
}
|
||||
|
||||
static int perf_stat__get_aggr(struct perf_stat_config *config,
|
||||
aggr_get_id_t get_id, struct cpu_map *map, int idx)
|
||||
aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@@ -849,26 +850,26 @@ static int perf_stat__get_aggr(struct perf_stat_config *config,
|
||||
}
|
||||
|
||||
static int perf_stat__get_socket_cached(struct perf_stat_config *config,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
|
||||
}
|
||||
|
||||
static int perf_stat__get_die_cached(struct perf_stat_config *config,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core_cached(struct perf_stat_config *config,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
|
||||
}
|
||||
|
||||
static bool term_percore_set(void)
|
||||
{
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (counter->percore)
|
||||
@@ -884,21 +885,21 @@ static int perf_stat_init_aggr_mode(void)
|
||||
|
||||
switch (stat_config.aggr_mode) {
|
||||
case AGGR_SOCKET:
|
||||
if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build socket map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_socket_cached;
|
||||
break;
|
||||
case AGGR_DIE:
|
||||
if (cpu_map__build_die_map(evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build die map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_die_cached;
|
||||
break;
|
||||
case AGGR_CORE:
|
||||
if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build core map");
|
||||
return -1;
|
||||
}
|
||||
@@ -906,7 +907,7 @@ static int perf_stat_init_aggr_mode(void)
|
||||
break;
|
||||
case AGGR_NONE:
|
||||
if (term_percore_set()) {
|
||||
if (cpu_map__build_core_map(evsel_list->cpus,
|
||||
if (cpu_map__build_core_map(evsel_list->core.cpus,
|
||||
&stat_config.aggr_map)) {
|
||||
perror("cannot build core map");
|
||||
return -1;
|
||||
@@ -926,20 +927,20 @@ static int perf_stat_init_aggr_mode(void)
|
||||
* taking the highest cpu number to be the size of
|
||||
* the aggregation translate cpumap.
|
||||
*/
|
||||
nr = cpu_map__get_max(evsel_list->cpus);
|
||||
nr = cpu_map__get_max(evsel_list->core.cpus);
|
||||
stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
|
||||
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void perf_stat__exit_aggr_mode(void)
|
||||
{
|
||||
cpu_map__put(stat_config.aggr_map);
|
||||
cpu_map__put(stat_config.cpus_aggr_map);
|
||||
perf_cpu_map__put(stat_config.aggr_map);
|
||||
perf_cpu_map__put(stat_config.cpus_aggr_map);
|
||||
stat_config.aggr_map = NULL;
|
||||
stat_config.cpus_aggr_map = NULL;
|
||||
}
|
||||
|
||||
static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
|
||||
static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@@ -954,7 +955,7 @@ static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, i
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
|
||||
static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
|
||||
{
|
||||
struct perf_env *env = data;
|
||||
int cpu = perf_env__get_cpu(env, map, idx);
|
||||
@@ -962,7 +963,7 @@ static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
|
||||
return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
|
||||
}
|
||||
|
||||
static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
|
||||
static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
|
||||
{
|
||||
struct perf_env *env = data;
|
||||
int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
|
||||
@@ -986,7 +987,7 @@ static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
|
||||
return die_id;
|
||||
}
|
||||
|
||||
static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
|
||||
static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
|
||||
{
|
||||
struct perf_env *env = data;
|
||||
int core = -1, cpu = perf_env__get_cpu(env, map, idx);
|
||||
@@ -1016,37 +1017,37 @@ static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
|
||||
return core;
|
||||
}
|
||||
|
||||
static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
|
||||
struct cpu_map **sockp)
|
||||
static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
|
||||
struct perf_cpu_map **sockp)
|
||||
{
|
||||
return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
|
||||
}
|
||||
|
||||
static int perf_env__build_die_map(struct perf_env *env, struct cpu_map *cpus,
|
||||
struct cpu_map **diep)
|
||||
static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
|
||||
struct perf_cpu_map **diep)
|
||||
{
|
||||
return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
|
||||
}
|
||||
|
||||
static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
|
||||
struct cpu_map **corep)
|
||||
static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
|
||||
struct perf_cpu_map **corep)
|
||||
{
|
||||
return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
|
||||
}
|
||||
|
||||
static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
|
||||
}
|
||||
static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_env__get_die(map, idx, &perf_stat.session->header.env);
|
||||
}
|
||||
|
||||
static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
|
||||
struct cpu_map *map, int idx)
|
||||
struct perf_cpu_map *map, int idx)
|
||||
{
|
||||
return perf_env__get_core(map, idx, &perf_stat.session->header.env);
|
||||
}
|
||||
@@ -1057,21 +1058,21 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
|
||||
|
||||
switch (stat_config.aggr_mode) {
|
||||
case AGGR_SOCKET:
|
||||
if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build socket map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_socket_file;
|
||||
break;
|
||||
case AGGR_DIE:
|
||||
if (perf_env__build_die_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build die map");
|
||||
return -1;
|
||||
}
|
||||
stat_config.aggr_get_id = perf_stat__get_die_file;
|
||||
break;
|
||||
case AGGR_CORE:
|
||||
if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
|
||||
if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
|
||||
perror("cannot build core map");
|
||||
return -1;
|
||||
}
|
||||
@@ -1366,7 +1367,7 @@ static int add_default_attributes(void)
|
||||
free(str);
|
||||
}
|
||||
|
||||
if (!evsel_list->nr_entries) {
|
||||
if (!evsel_list->core.nr_entries) {
|
||||
if (target__has_cpu(&target))
|
||||
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
|
||||
|
||||
@@ -1462,7 +1463,7 @@ static int process_stat_round_event(struct perf_session *session,
|
||||
union perf_event *event)
|
||||
{
|
||||
struct stat_round_event *stat_round = &event->stat_round;
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
struct timespec tsh, *ts = NULL;
|
||||
const char **argv = session->header.env.cmdline_argv;
|
||||
int argc = session->header.env.nr_cmdline;
|
||||
@@ -1517,7 +1518,7 @@ static int set_maps(struct perf_stat *st)
|
||||
if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
|
||||
return -EINVAL;
|
||||
|
||||
perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
|
||||
perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
|
||||
|
||||
if (perf_evlist__alloc_stats(evsel_list, true))
|
||||
return -ENOMEM;
|
||||
@@ -1551,7 +1552,7 @@ int process_cpu_map_event(struct perf_session *session,
|
||||
{
|
||||
struct perf_tool *tool = session->tool;
|
||||
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
|
||||
struct cpu_map *cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
|
||||
if (st->cpus) {
|
||||
pr_warning("Extra cpu map event, ignoring.\n");
|
||||
@@ -1676,14 +1677,14 @@ static void setup_system_wide(int forks)
|
||||
if (!forks)
|
||||
target.system_wide = true;
|
||||
else {
|
||||
struct perf_evsel *counter;
|
||||
struct evsel *counter;
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (!counter->system_wide)
|
||||
return;
|
||||
}
|
||||
|
||||
if (evsel_list->nr_entries)
|
||||
if (evsel_list->core.nr_entries)
|
||||
target.system_wide = true;
|
||||
}
|
||||
}
|
||||
@@ -1702,7 +1703,7 @@ int cmd_stat(int argc, const char **argv)
|
||||
|
||||
setlocale(LC_ALL, "");
|
||||
|
||||
evsel_list = perf_evlist__new();
|
||||
evsel_list = evlist__new();
|
||||
if (evsel_list == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1889,10 +1890,10 @@ int cmd_stat(int argc, const char **argv)
|
||||
* so we could print it out on output.
|
||||
*/
|
||||
if (stat_config.aggr_mode == AGGR_THREAD) {
|
||||
thread_map__read_comms(evsel_list->threads);
|
||||
thread_map__read_comms(evsel_list->core.threads);
|
||||
if (target.system_wide) {
|
||||
if (runtime_stat_new(&stat_config,
|
||||
thread_map__nr(evsel_list->threads))) {
|
||||
thread_map__nr(evsel_list->core.threads))) {
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@@ -2003,7 +2004,7 @@ int cmd_stat(int argc, const char **argv)
|
||||
perf_session__write_header(perf_stat.session, evsel_list, fd, true);
|
||||
}
|
||||
|
||||
perf_evlist__close(evsel_list);
|
||||
evlist__close(evsel_list);
|
||||
perf_session__delete(perf_stat.session);
|
||||
}
|
||||
|
||||
@@ -2015,7 +2016,7 @@ int cmd_stat(int argc, const char **argv)
|
||||
if (smi_cost && smi_reset)
|
||||
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
|
||||
|
||||
perf_evlist__delete(evsel_list);
|
||||
evlist__delete(evsel_list);
|
||||
|
||||
runtime_stat_delete(&stat_config);
|
||||
|
||||
|
||||
@@ -545,19 +545,19 @@ static const char *cat_backtrace(union perf_event *event,
|
||||
}
|
||||
|
||||
typedef int (*tracepoint_handler)(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace);
|
||||
|
||||
static int process_sample_event(struct perf_tool *tool,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct timechart *tchart = container_of(tool, struct timechart, tool);
|
||||
|
||||
if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
|
||||
if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
|
||||
if (!tchart->first_time || tchart->first_time > sample->time)
|
||||
tchart->first_time = sample->time;
|
||||
if (tchart->last_time < sample->time)
|
||||
@@ -575,7 +575,7 @@ static int process_sample_event(struct perf_tool *tool,
|
||||
|
||||
static int
|
||||
process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace __maybe_unused)
|
||||
{
|
||||
@@ -591,7 +591,7 @@ process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
|
||||
|
||||
static int
|
||||
process_sample_cpu_frequency(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace __maybe_unused)
|
||||
{
|
||||
@@ -604,7 +604,7 @@ process_sample_cpu_frequency(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_sample_sched_wakeup(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace)
|
||||
{
|
||||
@@ -618,7 +618,7 @@ process_sample_sched_wakeup(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_sample_sched_switch(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace)
|
||||
{
|
||||
@@ -634,7 +634,7 @@ process_sample_sched_switch(struct timechart *tchart,
|
||||
#ifdef SUPPORT_OLD_POWER_EVENTS
|
||||
static int
|
||||
process_sample_power_start(struct timechart *tchart __maybe_unused,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace __maybe_unused)
|
||||
{
|
||||
@@ -647,7 +647,7 @@ process_sample_power_start(struct timechart *tchart __maybe_unused,
|
||||
|
||||
static int
|
||||
process_sample_power_end(struct timechart *tchart,
|
||||
struct perf_evsel *evsel __maybe_unused,
|
||||
struct evsel *evsel __maybe_unused,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace __maybe_unused)
|
||||
{
|
||||
@@ -657,7 +657,7 @@ process_sample_power_end(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_sample_power_frequency(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
const char *backtrace __maybe_unused)
|
||||
{
|
||||
@@ -840,7 +840,7 @@ static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
|
||||
|
||||
static int
|
||||
process_enter_read(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -850,7 +850,7 @@ process_enter_read(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_read(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -860,7 +860,7 @@ process_exit_read(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_enter_write(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -870,7 +870,7 @@ process_enter_write(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_write(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -880,7 +880,7 @@ process_exit_write(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_enter_sync(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -890,7 +890,7 @@ process_enter_sync(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_sync(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -900,7 +900,7 @@ process_exit_sync(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_enter_tx(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -910,7 +910,7 @@ process_enter_tx(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_tx(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -920,7 +920,7 @@ process_exit_tx(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_enter_rx(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -930,7 +930,7 @@ process_enter_rx(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_rx(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -940,7 +940,7 @@ process_exit_rx(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_enter_poll(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long fd = perf_evsel__intval(evsel, sample, "fd");
|
||||
@@ -950,7 +950,7 @@ process_enter_poll(struct timechart *tchart,
|
||||
|
||||
static int
|
||||
process_exit_poll(struct timechart *tchart,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
long ret = perf_evsel__intval(evsel, sample, "ret");
|
||||
@@ -1534,7 +1534,7 @@ static int process_header(struct perf_file_section *section __maybe_unused,
|
||||
|
||||
static int __cmd_timechart(struct timechart *tchart, const char *output_name)
|
||||
{
|
||||
const struct perf_evsel_str_handler power_tracepoints[] = {
|
||||
const struct evsel_str_handler power_tracepoints[] = {
|
||||
{ "power:cpu_idle", process_sample_cpu_idle },
|
||||
{ "power:cpu_frequency", process_sample_cpu_frequency },
|
||||
{ "sched:sched_wakeup", process_sample_sched_wakeup },
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
#include <subcmd/parse-options.h>
|
||||
#include "util/parse-events.h"
|
||||
#include "util/cpumap.h"
|
||||
#include "util/xyarray.h"
|
||||
#include "util/sort.h"
|
||||
#include "util/string2.h"
|
||||
#include "util/term.h"
|
||||
@@ -101,7 +100,7 @@ static void perf_top__resize(struct perf_top *top)
|
||||
|
||||
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct symbol *sym;
|
||||
struct annotation *notes;
|
||||
struct map *map;
|
||||
@@ -129,7 +128,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
|
||||
notes = symbol__annotation(sym);
|
||||
pthread_mutex_lock(¬es->lock);
|
||||
|
||||
if (!symbol__hists(sym, top->evlist->nr_entries)) {
|
||||
if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
|
||||
pthread_mutex_unlock(¬es->lock);
|
||||
pr_err("Not enough memory for annotating '%s' symbol!\n",
|
||||
sym->name);
|
||||
@@ -186,7 +185,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
|
||||
static void perf_top__record_precise_ip(struct perf_top *top,
|
||||
struct hist_entry *he,
|
||||
struct perf_sample *sample,
|
||||
struct perf_evsel *evsel, u64 ip)
|
||||
struct evsel *evsel, u64 ip)
|
||||
{
|
||||
struct annotation *notes;
|
||||
struct symbol *sym = he->ms.sym;
|
||||
@@ -228,7 +227,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
|
||||
static void perf_top__show_details(struct perf_top *top)
|
||||
{
|
||||
struct hist_entry *he = top->sym_filter_entry;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct annotation *notes;
|
||||
struct symbol *symbol;
|
||||
int more;
|
||||
@@ -270,7 +269,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
|
||||
char bf[160];
|
||||
int printed = 0;
|
||||
const int win_width = top->winsize.ws_col - 1;
|
||||
struct perf_evsel *evsel = top->sym_evsel;
|
||||
struct evsel *evsel = top->sym_evsel;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
|
||||
puts(CONSOLE_CLEAR);
|
||||
@@ -404,7 +403,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
|
||||
fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
|
||||
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
|
||||
|
||||
if (top->evlist->nr_entries > 1)
|
||||
if (top->evlist->core.nr_entries > 1)
|
||||
fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
|
||||
|
||||
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
|
||||
@@ -439,7 +438,7 @@ static int perf_top__key_mapped(struct perf_top *top, int c)
|
||||
case 'S':
|
||||
return 1;
|
||||
case 'E':
|
||||
return top->evlist->nr_entries > 1 ? 1 : 0;
|
||||
return top->evlist->core.nr_entries > 1 ? 1 : 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -485,7 +484,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
||||
}
|
||||
break;
|
||||
case 'E':
|
||||
if (top->evlist->nr_entries > 1) {
|
||||
if (top->evlist->core.nr_entries > 1) {
|
||||
/* Select 0 as the default event: */
|
||||
int counter = 0;
|
||||
|
||||
@@ -496,7 +495,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
||||
|
||||
prompt_integer(&counter, "Enter details event counter");
|
||||
|
||||
if (counter >= top->evlist->nr_entries) {
|
||||
if (counter >= top->evlist->core.nr_entries) {
|
||||
top->sym_evsel = perf_evlist__first(top->evlist);
|
||||
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
|
||||
sleep(1);
|
||||
@@ -554,7 +553,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
||||
static void perf_top__sort_new_samples(void *arg)
|
||||
{
|
||||
struct perf_top *t = arg;
|
||||
struct perf_evsel *evsel = t->sym_evsel;
|
||||
struct evsel *evsel = t->sym_evsel;
|
||||
struct hists *hists;
|
||||
|
||||
if (t->evlist->selected != NULL)
|
||||
@@ -586,7 +585,7 @@ static void stop_top(void)
|
||||
|
||||
static void *display_thread_tui(void *arg)
|
||||
{
|
||||
struct perf_evsel *pos;
|
||||
struct evsel *pos;
|
||||
struct perf_top *top = arg;
|
||||
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
|
||||
struct hist_browser_timer hbt = {
|
||||
@@ -693,7 +692,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
|
||||
{
|
||||
struct perf_top *top = arg;
|
||||
struct hist_entry *he = iter->he;
|
||||
struct perf_evsel *evsel = iter->evsel;
|
||||
struct evsel *evsel = iter->evsel;
|
||||
|
||||
if (perf_hpp_list.sym && single)
|
||||
perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
|
||||
@@ -705,7 +704,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
|
||||
|
||||
static void perf_event__process_sample(struct perf_tool *tool,
|
||||
const union perf_event *event,
|
||||
struct perf_evsel *evsel,
|
||||
struct evsel *evsel,
|
||||
struct perf_sample *sample,
|
||||
struct machine *machine)
|
||||
{
|
||||
@@ -813,7 +812,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
|
||||
|
||||
static void
|
||||
perf_top__process_lost(struct perf_top *top, union perf_event *event,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
|
||||
@@ -825,7 +824,7 @@ perf_top__process_lost(struct perf_top *top, union perf_event *event,
|
||||
static void
|
||||
perf_top__process_lost_samples(struct perf_top *top,
|
||||
union perf_event *event,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
|
||||
@@ -839,7 +838,7 @@ static u64 last_timestamp;
|
||||
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
{
|
||||
struct record_opts *opts = &top->record_opts;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct perf_mmap *md;
|
||||
union perf_event *event;
|
||||
|
||||
@@ -874,7 +873,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
static void perf_top__mmap_read(struct perf_top *top)
|
||||
{
|
||||
bool overwrite = top->record_opts.overwrite;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct evlist *evlist = top->evlist;
|
||||
int i;
|
||||
|
||||
if (overwrite)
|
||||
@@ -909,10 +908,10 @@ static void perf_top__mmap_read(struct perf_top *top)
|
||||
static int perf_top__overwrite_check(struct perf_top *top)
|
||||
{
|
||||
struct record_opts *opts = &top->record_opts;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct perf_evsel_config_term *term;
|
||||
struct list_head *config_terms;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
int set, overwrite = -1;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
@@ -952,11 +951,11 @@ static int perf_top__overwrite_check(struct perf_top *top)
|
||||
}
|
||||
|
||||
static int perf_top_overwrite_fallback(struct perf_top *top,
|
||||
struct perf_evsel *evsel)
|
||||
struct evsel *evsel)
|
||||
{
|
||||
struct record_opts *opts = &top->record_opts;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct perf_evsel *counter;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct evsel *counter;
|
||||
|
||||
if (!opts->overwrite)
|
||||
return 0;
|
||||
@@ -966,7 +965,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
|
||||
return 0;
|
||||
|
||||
evlist__for_each_entry(evlist, counter)
|
||||
counter->attr.write_backward = false;
|
||||
counter->core.attr.write_backward = false;
|
||||
opts->overwrite = false;
|
||||
pr_debug2("fall back to non-overwrite mode\n");
|
||||
return 1;
|
||||
@@ -975,8 +974,8 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
|
||||
static int perf_top__start_counters(struct perf_top *top)
|
||||
{
|
||||
char msg[BUFSIZ];
|
||||
struct perf_evsel *counter;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct evsel *counter;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct record_opts *opts = &top->record_opts;
|
||||
|
||||
if (perf_top__overwrite_check(top)) {
|
||||
@@ -989,8 +988,8 @@ static int perf_top__start_counters(struct perf_top *top)
|
||||
|
||||
evlist__for_each_entry(evlist, counter) {
|
||||
try_again:
|
||||
if (perf_evsel__open(counter, top->evlist->cpus,
|
||||
top->evlist->threads) < 0) {
|
||||
if (evsel__open(counter, top->evlist->core.cpus,
|
||||
top->evlist->core.threads) < 0) {
|
||||
|
||||
/*
|
||||
* Specially handle overwrite fall back.
|
||||
@@ -1100,11 +1099,11 @@ static int deliver_event(struct ordered_events *qe,
|
||||
struct ordered_event *qevent)
|
||||
{
|
||||
struct perf_top *top = qe->data;
|
||||
struct perf_evlist *evlist = top->evlist;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct perf_session *session = top->session;
|
||||
union perf_event *event = qevent->event;
|
||||
struct perf_sample sample;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct machine *machine;
|
||||
int ret = -1;
|
||||
|
||||
@@ -1222,7 +1221,7 @@ static int __cmd_top(struct perf_top *top)
|
||||
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
|
||||
|
||||
machine__synthesize_threads(&top->session->machines.host, &opts->target,
|
||||
top->evlist->threads, false,
|
||||
top->evlist->core.threads, false,
|
||||
top->nr_threads_synthesize);
|
||||
|
||||
if (top->nr_threads_synthesize > 1)
|
||||
@@ -1255,7 +1254,7 @@ static int __cmd_top(struct perf_top *top)
|
||||
* so leave the check here.
|
||||
*/
|
||||
if (!target__none(&opts->target))
|
||||
perf_evlist__enable(top->evlist);
|
||||
evlist__enable(top->evlist);
|
||||
|
||||
ret = -1;
|
||||
if (pthread_create(&thread_process, NULL, process_thread, top)) {
|
||||
@@ -1511,7 +1510,7 @@ int cmd_top(int argc, const char **argv)
|
||||
"Record namespaces events"),
|
||||
OPT_END()
|
||||
};
|
||||
struct perf_evlist *sb_evlist = NULL;
|
||||
struct evlist *sb_evlist = NULL;
|
||||
const char * const top_usage[] = {
|
||||
"perf top [<options>]",
|
||||
NULL
|
||||
@@ -1524,7 +1523,7 @@ int cmd_top(int argc, const char **argv)
|
||||
top.annotation_opts.min_pcnt = 5;
|
||||
top.annotation_opts.context = 4;
|
||||
|
||||
top.evlist = perf_evlist__new();
|
||||
top.evlist = evlist__new();
|
||||
if (top.evlist == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1536,7 +1535,7 @@ int cmd_top(int argc, const char **argv)
|
||||
if (argc)
|
||||
usage_with_options(top_usage, options);
|
||||
|
||||
if (!top.evlist->nr_entries &&
|
||||
if (!top.evlist->core.nr_entries &&
|
||||
perf_evlist__add_default(top.evlist) < 0) {
|
||||
pr_err("Not enough memory for event selector list\n");
|
||||
goto out_delete_evlist;
|
||||
@@ -1661,7 +1660,7 @@ int cmd_top(int argc, const char **argv)
|
||||
perf_evlist__stop_sb_thread(sb_evlist);
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(top.evlist);
|
||||
evlist__delete(top.evlist);
|
||||
perf_session__delete(top.session);
|
||||
|
||||
return status;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,7 @@
|
||||
|
||||
#include <unistd.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/socket.h>
|
||||
#include <pid_filter.h>
|
||||
|
||||
/* bpf-output associated map */
|
||||
@@ -33,6 +34,20 @@ struct syscall {
|
||||
|
||||
bpf_map(syscalls, ARRAY, int, struct syscall, 512);
|
||||
|
||||
/*
|
||||
* What to augment at entry?
|
||||
*
|
||||
* Pointer arg payloads (filenames, etc) passed from userspace to the kernel
|
||||
*/
|
||||
bpf_map(syscalls_sys_enter, PROG_ARRAY, u32, u32, 512);
|
||||
|
||||
/*
|
||||
* What to augment at exit?
|
||||
*
|
||||
* Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
|
||||
*/
|
||||
bpf_map(syscalls_sys_exit, PROG_ARRAY, u32, u32, 512);
|
||||
|
||||
struct syscall_enter_args {
|
||||
unsigned long long common_tp_fields;
|
||||
long syscall_nr;
|
||||
@@ -53,12 +68,18 @@ struct augmented_filename {
|
||||
|
||||
pid_filter(pids_filtered);
|
||||
|
||||
struct augmented_args_filename {
|
||||
struct augmented_args_payload {
|
||||
struct syscall_enter_args args;
|
||||
struct augmented_filename filename;
|
||||
union {
|
||||
struct {
|
||||
struct augmented_filename filename,
|
||||
filename2;
|
||||
};
|
||||
struct sockaddr_storage saddr;
|
||||
};
|
||||
};
|
||||
|
||||
bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
|
||||
bpf_map(augmented_args_tmp, PERCPU_ARRAY, int, struct augmented_args_payload, 1);
|
||||
|
||||
static inline
|
||||
unsigned int augmented_filename__read(struct augmented_filename *augmented_filename,
|
||||
@@ -88,10 +109,136 @@ unsigned int augmented_filename__read(struct augmented_filename *augmented_filen
|
||||
return len;
|
||||
}
|
||||
|
||||
SEC("!raw_syscalls:unaugmented")
|
||||
int syscall_unaugmented(struct syscall_enter_args *args)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* These will be tail_called from SEC("raw_syscalls:sys_enter"), so will find in
|
||||
* augmented_args_tmp what was read by that raw_syscalls:sys_enter and go
|
||||
* on from there, reading the first syscall arg as a string, i.e. open's
|
||||
* filename.
|
||||
*/
|
||||
SEC("!syscalls:sys_enter_connect")
|
||||
int sys_enter_connect(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *sockaddr_arg = (const void *)args->args[1];
|
||||
unsigned int socklen = args->args[2];
|
||||
unsigned int len = sizeof(augmented_args->args);
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
if (socklen > sizeof(augmented_args->saddr))
|
||||
socklen = sizeof(augmented_args->saddr);
|
||||
|
||||
probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len + socklen);
|
||||
}
|
||||
|
||||
SEC("!syscalls:sys_enter_sendto")
|
||||
int sys_enter_sendto(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *sockaddr_arg = (const void *)args->args[4];
|
||||
unsigned int socklen = args->args[5];
|
||||
unsigned int len = sizeof(augmented_args->args);
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
if (socklen > sizeof(augmented_args->saddr))
|
||||
socklen = sizeof(augmented_args->saddr);
|
||||
|
||||
probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len + socklen);
|
||||
}
|
||||
|
||||
SEC("!syscalls:sys_enter_open")
|
||||
int sys_enter_open(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *filename_arg = (const void *)args->args[0];
|
||||
unsigned int len = sizeof(augmented_args->args);
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
len += augmented_filename__read(&augmented_args->filename, filename_arg, sizeof(augmented_args->filename.value));
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
}
|
||||
|
||||
SEC("!syscalls:sys_enter_openat")
|
||||
int sys_enter_openat(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *filename_arg = (const void *)args->args[1];
|
||||
unsigned int len = sizeof(augmented_args->args);
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
len += augmented_filename__read(&augmented_args->filename, filename_arg, sizeof(augmented_args->filename.value));
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
}
|
||||
|
||||
SEC("!syscalls:sys_enter_rename")
|
||||
int sys_enter_rename(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *oldpath_arg = (const void *)args->args[0],
|
||||
*newpath_arg = (const void *)args->args[1];
|
||||
unsigned int len = sizeof(augmented_args->args), oldpath_len;
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
oldpath_len = augmented_filename__read(&augmented_args->filename, oldpath_arg, sizeof(augmented_args->filename.value));
|
||||
len += oldpath_len + augmented_filename__read((void *)(&augmented_args->filename) + oldpath_len, newpath_arg, sizeof(augmented_args->filename.value));
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
}
|
||||
|
||||
SEC("!syscalls:sys_enter_renameat")
|
||||
int sys_enter_renameat(struct syscall_enter_args *args)
|
||||
{
|
||||
int key = 0;
|
||||
struct augmented_args_payload *augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
const void *oldpath_arg = (const void *)args->args[1],
|
||||
*newpath_arg = (const void *)args->args[3];
|
||||
unsigned int len = sizeof(augmented_args->args), oldpath_len;
|
||||
|
||||
if (augmented_args == NULL)
|
||||
return 1; /* Failure: don't filter */
|
||||
|
||||
oldpath_len = augmented_filename__read(&augmented_args->filename, oldpath_arg, sizeof(augmented_args->filename.value));
|
||||
len += oldpath_len + augmented_filename__read((void *)(&augmented_args->filename) + oldpath_len, newpath_arg, sizeof(augmented_args->filename.value));
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
}
|
||||
|
||||
SEC("raw_syscalls:sys_enter")
|
||||
int sys_enter(struct syscall_enter_args *args)
|
||||
{
|
||||
struct augmented_args_filename *augmented_args;
|
||||
struct augmented_args_payload *augmented_args;
|
||||
/*
|
||||
* We start len, the amount of data that will be in the perf ring
|
||||
* buffer, if this is not filtered out by one of pid_filter__has(),
|
||||
@@ -105,7 +252,7 @@ int sys_enter(struct syscall_enter_args *args)
|
||||
struct syscall *syscall;
|
||||
int key = 0;
|
||||
|
||||
augmented_args = bpf_map_lookup_elem(&augmented_filename_map, &key);
|
||||
augmented_args = bpf_map_lookup_elem(&augmented_args_tmp, &key);
|
||||
if (augmented_args == NULL)
|
||||
return 1;
|
||||
|
||||
@@ -114,131 +261,36 @@ int sys_enter(struct syscall_enter_args *args)
|
||||
|
||||
probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
|
||||
|
||||
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args->args.syscall_nr);
|
||||
if (syscall == NULL || !syscall->enabled)
|
||||
return 0;
|
||||
/*
|
||||
* Yonghong and Edward Cree sayz:
|
||||
*
|
||||
* https://www.spinics.net/lists/netdev/msg531645.html
|
||||
*
|
||||
* >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
|
||||
* >> 10: (bf) r1 = r6
|
||||
* >> 11: (07) r1 += 16
|
||||
* >> 12: (05) goto pc+2
|
||||
* >> 15: (79) r3 = *(u64 *)(r1 +0)
|
||||
* >> dereference of modified ctx ptr R1 off=16 disallowed
|
||||
* > Aha, we at least got a different error message this time.
|
||||
* > And indeed llvm has done that optimisation, rather than the more obvious
|
||||
* > 11: r3 = *(u64 *)(r1 +16)
|
||||
* > because it wants to have lots of reads share a single insn. You may be able
|
||||
* > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
|
||||
* > with llvm knowledge can figure out how to stop it (ideally, llvm would know
|
||||
* > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
|
||||
*
|
||||
* The optimization mostly likes below:
|
||||
*
|
||||
* br1:
|
||||
* ...
|
||||
* r1 += 16
|
||||
* goto merge
|
||||
* br2:
|
||||
* ...
|
||||
* r1 += 20
|
||||
* goto merge
|
||||
* merge:
|
||||
* *(u64 *)(r1 + 0)
|
||||
*
|
||||
* The compiler tries to merge common loads. There is no easy way to
|
||||
* stop this compiler optimization without turning off a lot of other
|
||||
* optimizations. The easiest way is to add barriers:
|
||||
*
|
||||
* __asm__ __volatile__("": : :"memory")
|
||||
*
|
||||
* after the ctx memory access to prevent their down stream merging.
|
||||
* Jump to syscall specific augmenter, even if the default one,
|
||||
* "!raw_syscalls:unaugmented" that will just return 1 to return the
|
||||
* unagmented tracepoint payload.
|
||||
*/
|
||||
/*
|
||||
* For now copy just the first string arg, we need to improve the protocol
|
||||
* and have more than one.
|
||||
*
|
||||
* Using the unrolled loop is not working, only when we do it manually,
|
||||
* check this out later...
|
||||
bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
|
||||
|
||||
u8 arg;
|
||||
#pragma clang loop unroll(full)
|
||||
for (arg = 0; arg < 6; ++arg) {
|
||||
if (syscall->string_args_len[arg] != 0) {
|
||||
filename_len = syscall->string_args_len[arg];
|
||||
filename_arg = (const void *)args->args[arg];
|
||||
__asm__ __volatile__("": : :"memory");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
verifier log:
|
||||
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
37: (69) r3 = *(u16 *)(r0 +2)
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv0 R2_w=map_value(id=0,off=2,ks=4,vs=14,imm=0) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
38: (55) if r3 != 0x0 goto pc+5
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv0 R2=map_value(id=0,off=2,ks=4,vs=14,imm=0) R3=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
39: (b7) r1 = 1
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
40: (bf) r2 = r0
|
||||
41: (07) r2 += 4
|
||||
42: (69) r3 = *(u16 *)(r0 +4)
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv1 R2_w=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3_w=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; if (syscall->string_args_len[arg] != 0) {
|
||||
43: (15) if r3 == 0x0 goto pc+32
|
||||
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv1 R2=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
|
||||
; filename_arg = (const void *)args->args[arg];
|
||||
44: (67) r1 <<= 3
|
||||
45: (bf) r3 = r6
|
||||
46: (0f) r3 += r1
|
||||
47: (b7) r5 = 64
|
||||
48: (79) r3 = *(u64 *)(r3 +16)
|
||||
dereference of modified ctx ptr R3 off=8 disallowed
|
||||
processed 46 insns (limit 1000000) max_states_per_insn 0 total_states 12 peak_states 12 mark_read 7
|
||||
*/
|
||||
|
||||
#define __loop_iter(arg) \
|
||||
if (syscall->string_args_len[arg] != 0) { \
|
||||
unsigned int filename_len = syscall->string_args_len[arg]; \
|
||||
const void *filename_arg = (const void *)args->args[arg]; \
|
||||
if (filename_len <= sizeof(augmented_args->filename.value)) \
|
||||
len += augmented_filename__read(&augmented_args->filename, filename_arg, filename_len);
|
||||
#define loop_iter_first() __loop_iter(0); }
|
||||
#define loop_iter(arg) else __loop_iter(arg); }
|
||||
#define loop_iter_last(arg) else __loop_iter(arg); __asm__ __volatile__("": : :"memory"); }
|
||||
|
||||
loop_iter_first()
|
||||
loop_iter(1)
|
||||
loop_iter(2)
|
||||
loop_iter(3)
|
||||
loop_iter(4)
|
||||
loop_iter_last(5)
|
||||
|
||||
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
|
||||
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
|
||||
// If not found on the PROG_ARRAY syscalls map, then we're filtering it:
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_syscalls:sys_exit")
|
||||
int sys_exit(struct syscall_exit_args *args)
|
||||
{
|
||||
struct syscall_exit_args exit_args;
|
||||
struct syscall *syscall;
|
||||
|
||||
if (pid_filter__has(&pids_filtered, getpid()))
|
||||
return 0;
|
||||
|
||||
probe_read(&exit_args, sizeof(exit_args), args);
|
||||
|
||||
syscall = bpf_map_lookup_elem(&syscalls, &exit_args.syscall_nr);
|
||||
if (syscall == NULL || !syscall->enabled)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
/*
|
||||
* Jump to syscall specific return augmenter, even if the default one,
|
||||
* "!raw_syscalls:unaugmented" that will just return 1 to return the
|
||||
* unagmented tracepoint payload.
|
||||
*/
|
||||
bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
|
||||
/*
|
||||
* If not found on the PROG_ARRAY syscalls map, then we're filtering it:
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
license(GPL);
|
||||
|
||||
@@ -45,6 +45,8 @@ struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
|
||||
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
|
||||
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
|
||||
|
||||
static void (*bpf_tail_call)(void *ctx, void *map, int index) = (void *)BPF_FUNC_tail_call;
|
||||
|
||||
#define SEC(NAME) __attribute__((section(NAME), used))
|
||||
|
||||
#define probe(function, vars) \
|
||||
|
||||
12
tools/perf/lib/Build
Normal file
12
tools/perf/lib/Build
Normal file
@@ -0,0 +1,12 @@
|
||||
libperf-y += core.o
|
||||
libperf-y += cpumap.o
|
||||
libperf-y += threadmap.o
|
||||
libperf-y += evsel.o
|
||||
libperf-y += evlist.o
|
||||
libperf-y += zalloc.o
|
||||
libperf-y += xyarray.o
|
||||
libperf-y += lib.o
|
||||
|
||||
$(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE
|
||||
$(call rule_mkdir)
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
7
tools/perf/lib/Documentation/Makefile
Normal file
7
tools/perf/lib/Documentation/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
all:
|
||||
rst2man man/libperf.rst > man/libperf.7
|
||||
rst2pdf tutorial/tutorial.rst
|
||||
|
||||
clean:
|
||||
rm -f man/libperf.7
|
||||
rm -f tutorial/tutorial.pdf
|
||||
100
tools/perf/lib/Documentation/man/libperf.rst
Normal file
100
tools/perf/lib/Documentation/man/libperf.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
libperf
|
||||
|
||||
The libperf library provides an API to access the linux kernel perf
|
||||
events subsystem. It provides the following high level objects:
|
||||
|
||||
- struct perf_cpu_map
|
||||
- struct perf_thread_map
|
||||
- struct perf_evlist
|
||||
- struct perf_evsel
|
||||
|
||||
reference
|
||||
=========
|
||||
Function reference by header files:
|
||||
|
||||
perf/core.h
|
||||
-----------
|
||||
.. code-block:: c
|
||||
|
||||
typedef int (\*libperf_print_fn_t)(enum libperf_print_level level,
|
||||
const char \*, va_list ap);
|
||||
|
||||
void libperf_set_print(libperf_print_fn_t fn);
|
||||
|
||||
perf/cpumap.h
|
||||
-------------
|
||||
.. code-block:: c
|
||||
|
||||
struct perf_cpu_map \*perf_cpu_map__dummy_new(void);
|
||||
struct perf_cpu_map \*perf_cpu_map__new(const char \*cpu_list);
|
||||
struct perf_cpu_map \*perf_cpu_map__read(FILE \*file);
|
||||
struct perf_cpu_map \*perf_cpu_map__get(struct perf_cpu_map \*map);
|
||||
void perf_cpu_map__put(struct perf_cpu_map \*map);
|
||||
int perf_cpu_map__cpu(const struct perf_cpu_map \*cpus, int idx);
|
||||
int perf_cpu_map__nr(const struct perf_cpu_map \*cpus);
|
||||
perf_cpu_map__for_each_cpu(cpu, idx, cpus)
|
||||
|
||||
perf/threadmap.h
|
||||
----------------
|
||||
.. code-block:: c
|
||||
|
||||
struct perf_thread_map \*perf_thread_map__new_dummy(void);
|
||||
void perf_thread_map__set_pid(struct perf_thread_map \*map, int thread, pid_t pid);
|
||||
char \*perf_thread_map__comm(struct perf_thread_map \*map, int thread);
|
||||
struct perf_thread_map \*perf_thread_map__get(struct perf_thread_map \*map);
|
||||
void perf_thread_map__put(struct perf_thread_map \*map);
|
||||
|
||||
perf/evlist.h
|
||||
-------------
|
||||
.. code-block::
|
||||
|
||||
void perf_evlist__init(struct perf_evlist \*evlist);
|
||||
void perf_evlist__add(struct perf_evlist \*evlist,
|
||||
struct perf_evsel \*evsel);
|
||||
void perf_evlist__remove(struct perf_evlist \*evlist,
|
||||
struct perf_evsel \*evsel);
|
||||
struct perf_evlist \*perf_evlist__new(void);
|
||||
void perf_evlist__delete(struct perf_evlist \*evlist);
|
||||
struct perf_evsel\* perf_evlist__next(struct perf_evlist \*evlist,
|
||||
struct perf_evsel \*evsel);
|
||||
int perf_evlist__open(struct perf_evlist \*evlist);
|
||||
void perf_evlist__close(struct perf_evlist \*evlist);
|
||||
void perf_evlist__enable(struct perf_evlist \*evlist);
|
||||
void perf_evlist__disable(struct perf_evlist \*evlist);
|
||||
perf_evlist__for_each_evsel(evlist, pos)
|
||||
void perf_evlist__set_maps(struct perf_evlist \*evlist,
|
||||
struct perf_cpu_map \*cpus,
|
||||
struct perf_thread_map \*threads);
|
||||
|
||||
perf/evsel.h
|
||||
------------
|
||||
.. code-block:: c
|
||||
|
||||
struct perf_counts_values {
|
||||
union {
|
||||
struct {
|
||||
uint64_t val;
|
||||
uint64_t ena;
|
||||
uint64_t run;
|
||||
};
|
||||
uint64_t values[3];
|
||||
};
|
||||
};
|
||||
|
||||
void perf_evsel__init(struct perf_evsel \*evsel,
|
||||
struct perf_event_attr \*attr);
|
||||
struct perf_evsel \*perf_evsel__new(struct perf_event_attr \*attr);
|
||||
void perf_evsel__delete(struct perf_evsel \*evsel);
|
||||
int perf_evsel__open(struct perf_evsel \*evsel, struct perf_cpu_map \*cpus,
|
||||
struct perf_thread_map \*threads);
|
||||
void perf_evsel__close(struct perf_evsel \*evsel);
|
||||
int perf_evsel__read(struct perf_evsel \*evsel, int cpu, int thread,
|
||||
struct perf_counts_values \*count);
|
||||
int perf_evsel__enable(struct perf_evsel \*evsel);
|
||||
int perf_evsel__disable(struct perf_evsel \*evsel);
|
||||
int perf_evsel__apply_filter(struct perf_evsel \*evsel, const char \*filter);
|
||||
struct perf_cpu_map \*perf_evsel__cpus(struct perf_evsel \*evsel);
|
||||
struct perf_thread_map \*perf_evsel__threads(struct perf_evsel \*evsel);
|
||||
struct perf_event_attr \*perf_evsel__attr(struct perf_evsel \*evsel);
|
||||
123
tools/perf/lib/Documentation/tutorial/tutorial.rst
Normal file
123
tools/perf/lib/Documentation/tutorial/tutorial.rst
Normal file
@@ -0,0 +1,123 @@
|
||||
.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
libperf tutorial
|
||||
================
|
||||
|
||||
Compile and install libperf from kernel sources
|
||||
===============================================
|
||||
.. code-block:: bash
|
||||
|
||||
git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
||||
cd linux/tools/perf/lib
|
||||
make
|
||||
sudo make install prefix=/usr
|
||||
|
||||
Libperf object
|
||||
==============
|
||||
The libperf library provides several high level objects:
|
||||
|
||||
struct perf_cpu_map
|
||||
Provides a cpu list abstraction.
|
||||
|
||||
struct perf_thread_map
|
||||
Provides a thread list abstraction.
|
||||
|
||||
struct perf_evsel
|
||||
Provides an abstraction for single a perf event.
|
||||
|
||||
struct perf_evlist
|
||||
Gathers several struct perf_evsel object and performs functions on all of them.
|
||||
|
||||
The exported API binds these objects together,
|
||||
for full reference see the libperf.7 man page.
|
||||
|
||||
Examples
|
||||
========
|
||||
Examples aim to explain libperf functionality on simple use cases.
|
||||
They are based in on a checked out linux kernel git tree:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ cd tools/perf/lib/Documentation/tutorial/
|
||||
$ ls -d ex-*
|
||||
ex-1-compile ex-2-evsel-stat ex-3-evlist-stat
|
||||
|
||||
ex-1-compile example
|
||||
====================
|
||||
This example shows the basic usage of *struct perf_cpu_map*,
|
||||
how to create it and display its cpus:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ cd ex-1-compile/
|
||||
$ make
|
||||
gcc -o test test.c -lperf
|
||||
$ ./test
|
||||
0 1 2 3 4 5 6 7
|
||||
|
||||
|
||||
The full code listing is here:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
1 #include <perf/cpumap.h>
|
||||
2
|
||||
3 int main(int argc, char **Argv)
|
||||
4 {
|
||||
5 struct perf_cpu_map *cpus;
|
||||
6 int cpu, tmp;
|
||||
7
|
||||
8 cpus = perf_cpu_map__new(NULL);
|
||||
9
|
||||
10 perf_cpu_map__for_each_cpu(cpu, tmp, cpus)
|
||||
11 fprintf(stdout, "%d ", cpu);
|
||||
12
|
||||
13 fprintf(stdout, "\n");
|
||||
14
|
||||
15 perf_cpu_map__put(cpus);
|
||||
16 return 0;
|
||||
17 }
|
||||
|
||||
|
||||
First you need to include the proper header to have *struct perf_cpumap*
|
||||
declaration and functions:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
1 #include <perf/cpumap.h>
|
||||
|
||||
|
||||
The *struct perf_cpumap* object is created by *perf_cpu_map__new* call.
|
||||
The *NULL* argument asks it to populate the object with the current online CPUs list:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
8 cpus = perf_cpu_map__new(NULL);
|
||||
|
||||
This is paired with a *perf_cpu_map__put*, that drops its reference at the end, possibly deleting it.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
15 perf_cpu_map__put(cpus);
|
||||
|
||||
The iteration through the *struct perf_cpumap* CPUs is done using the *perf_cpu_map__for_each_cpu*
|
||||
macro which requires 3 arguments:
|
||||
|
||||
- cpu - the cpu numer
|
||||
- tmp - iteration helper variable
|
||||
- cpus - the *struct perf_cpumap* object
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
10 perf_cpu_map__for_each_cpu(cpu, tmp, cpus)
|
||||
11 fprintf(stdout, "%d ", cpu);
|
||||
|
||||
ex-2-evsel-stat example
|
||||
=======================
|
||||
|
||||
TBD
|
||||
|
||||
ex-3-evlist-stat example
|
||||
========================
|
||||
|
||||
TBD
|
||||
158
tools/perf/lib/Makefile
Normal file
158
tools/perf/lib/Makefile
Normal file
@@ -0,0 +1,158 @@
|
||||
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
# Most of this file is copied from tools/lib/bpf/Makefile
|
||||
|
||||
LIBPERF_VERSION = 0
|
||||
LIBPERF_PATCHLEVEL = 0
|
||||
LIBPERF_EXTRAVERSION = 1
|
||||
|
||||
MAKEFLAGS += --no-print-directory
|
||||
|
||||
ifeq ($(srctree),)
|
||||
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
#$(info Determined 'srctree' to be $(srctree))
|
||||
endif
|
||||
|
||||
INSTALL = install
|
||||
|
||||
# Use DESTDIR for installing into a different root directory.
|
||||
# This is useful for building a package. The program will be
|
||||
# installed in this directory as if it was the root directory.
|
||||
# Then the build tool can move it later.
|
||||
DESTDIR ?=
|
||||
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
|
||||
|
||||
include $(srctree)/tools/scripts/Makefile.include
|
||||
include $(srctree)/tools/scripts/Makefile.arch
|
||||
|
||||
ifeq ($(LP64), 1)
|
||||
libdir_relative = lib64
|
||||
else
|
||||
libdir_relative = lib
|
||||
endif
|
||||
|
||||
prefix ?=
|
||||
libdir = $(prefix)/$(libdir_relative)
|
||||
|
||||
# Shell quotes
|
||||
libdir_SQ = $(subst ','\'',$(libdir))
|
||||
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
|
||||
|
||||
ifeq ("$(origin V)", "command line")
|
||||
VERBOSE = $(V)
|
||||
endif
|
||||
ifndef VERBOSE
|
||||
VERBOSE = 0
|
||||
endif
|
||||
|
||||
ifeq ($(VERBOSE),1)
|
||||
Q =
|
||||
else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
# Set compile option CFLAGS
|
||||
ifdef EXTRA_CFLAGS
|
||||
CFLAGS := $(EXTRA_CFLAGS)
|
||||
else
|
||||
CFLAGS := -g -Wall
|
||||
endif
|
||||
|
||||
INCLUDES = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/ -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
|
||||
|
||||
# Append required CFLAGS
|
||||
override CFLAGS += $(EXTRA_WARNINGS)
|
||||
override CFLAGS += -Werror -Wall
|
||||
override CFLAGS += -fPIC
|
||||
override CFLAGS += $(INCLUDES)
|
||||
override CFLAGS += -fvisibility=hidden
|
||||
|
||||
all:
|
||||
|
||||
export srctree OUTPUT CC LD CFLAGS V
|
||||
export DESTDIR DESTDIR_SQ
|
||||
|
||||
include $(srctree)/tools/build/Makefile.include
|
||||
|
||||
VERSION_SCRIPT := libperf.map
|
||||
|
||||
PATCHLEVEL = $(LIBPERF_PATCHLEVEL)
|
||||
EXTRAVERSION = $(LIBPERF_EXTRAVERSION)
|
||||
VERSION = $(LIBPERF_VERSION).$(LIBPERF_PATCHLEVEL).$(LIBPERF_EXTRAVERSION)
|
||||
|
||||
LIBPERF_SO := $(OUTPUT)libperf.so.$(VERSION)
|
||||
LIBPERF_A := $(OUTPUT)libperf.a
|
||||
LIBPERF_IN := $(OUTPUT)libperf-in.o
|
||||
LIBPERF_PC := $(OUTPUT)libperf.pc
|
||||
|
||||
LIBPERF_ALL := $(LIBPERF_A) $(OUTPUT)libperf.so*
|
||||
|
||||
$(LIBPERF_IN): FORCE
|
||||
$(Q)$(MAKE) $(build)=libperf
|
||||
|
||||
$(LIBPERF_A): $(LIBPERF_IN)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN)
|
||||
|
||||
$(LIBPERF_SO): $(LIBPERF_IN)
|
||||
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libperf.so \
|
||||
-Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
|
||||
@ln -sf $(@F) $(OUTPUT)libperf.so
|
||||
@ln -sf $(@F) $(OUTPUT)libperf.so.$(LIBPERF_VERSION)
|
||||
|
||||
|
||||
libs: $(LIBPERF_A) $(LIBPERF_SO) $(LIBPERF_PC)
|
||||
|
||||
all: fixdep
|
||||
$(Q)$(MAKE) libs
|
||||
|
||||
clean:
|
||||
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
|
||||
*.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
|
||||
$(Q)$(MAKE) -C tests clean
|
||||
|
||||
tests:
|
||||
$(Q)$(MAKE) -C tests
|
||||
$(Q)$(MAKE) -C tests run
|
||||
|
||||
$(LIBPERF_PC):
|
||||
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
|
||||
-e "s|@LIBDIR@|$(libdir_SQ)|" \
|
||||
-e "s|@VERSION@|$(VERSION)|" \
|
||||
< libperf.pc.template > $@
|
||||
|
||||
define do_install_mkdir
|
||||
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
|
||||
fi
|
||||
endef
|
||||
|
||||
define do_install
|
||||
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
|
||||
fi; \
|
||||
$(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
|
||||
endef
|
||||
|
||||
install_lib: libs
|
||||
$(call QUIET_INSTALL, $(LIBPERF_ALL)) \
|
||||
$(call do_install_mkdir,$(libdir_SQ)); \
|
||||
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
install_headers:
|
||||
$(call QUIET_INSTALL, headers) \
|
||||
$(call do_install,include/perf/core.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
|
||||
$(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
|
||||
|
||||
install_pkgconfig: $(LIBPERF_PC)
|
||||
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
|
||||
$(call do_install,$(LIBPERF_PC),$(libdir_SQ)/pkgconfig,644)
|
||||
|
||||
install: install_lib install_headers install_pkgconfig
|
||||
|
||||
FORCE:
|
||||
|
||||
.PHONY: all install clean tests FORCE
|
||||
34
tools/perf/lib/core.c
Normal file
34
tools/perf/lib/core.c
Normal file
@@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#define __printf(a, b) __attribute__((format(printf, a, b)))
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <perf/core.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int __base_pr(enum libperf_print_level level, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
return vfprintf(stderr, format, args);
|
||||
}
|
||||
|
||||
static libperf_print_fn_t __libperf_pr = __base_pr;
|
||||
|
||||
void libperf_set_print(libperf_print_fn_t fn)
|
||||
{
|
||||
__libperf_pr = fn;
|
||||
}
|
||||
|
||||
__printf(2, 3)
|
||||
void libperf_print(enum libperf_print_level level, const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!__libperf_pr)
|
||||
return;
|
||||
|
||||
va_start(args, format);
|
||||
__libperf_pr(level, format, args);
|
||||
va_end(args);
|
||||
}
|
||||
239
tools/perf/lib/cpumap.c
Normal file
239
tools/perf/lib/cpumap.c
Normal file
@@ -0,0 +1,239 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <perf/cpumap.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <internal/cpumap.h>
|
||||
#include <asm/bug.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <ctype.h>
|
||||
#include <limits.h>
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__dummy_new(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
|
||||
|
||||
if (cpus != NULL) {
|
||||
cpus->nr = 1;
|
||||
cpus->map[0] = -1;
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
}
|
||||
|
||||
return cpus;
|
||||
}
|
||||
|
||||
static void cpu_map__delete(struct perf_cpu_map *map)
|
||||
{
|
||||
if (map) {
|
||||
WARN_ONCE(refcount_read(&map->refcnt) != 0,
|
||||
"cpu_map refcnt unbalanced\n");
|
||||
free(map);
|
||||
}
|
||||
}
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
|
||||
{
|
||||
if (map)
|
||||
refcount_inc(&map->refcnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
void perf_cpu_map__put(struct perf_cpu_map *map)
|
||||
{
|
||||
if (map && refcount_dec_and_test(&map->refcnt))
|
||||
cpu_map__delete(map);
|
||||
}
|
||||
|
||||
static struct perf_cpu_map *cpu_map__default_new(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
int nr_cpus;
|
||||
|
||||
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
if (nr_cpus < 0)
|
||||
return NULL;
|
||||
|
||||
cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
|
||||
if (cpus != NULL) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_cpus; ++i)
|
||||
cpus->map[i] = i;
|
||||
|
||||
cpus->nr = nr_cpus;
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
}
|
||||
|
||||
return cpus;
|
||||
}
|
||||
|
||||
static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
|
||||
{
|
||||
size_t payload_size = nr_cpus * sizeof(int);
|
||||
struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
|
||||
|
||||
if (cpus != NULL) {
|
||||
cpus->nr = nr_cpus;
|
||||
memcpy(cpus->map, tmp_cpus, payload_size);
|
||||
refcount_set(&cpus->refcnt, 1);
|
||||
}
|
||||
|
||||
return cpus;
|
||||
}
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__read(FILE *file)
|
||||
{
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
int nr_cpus = 0;
|
||||
int *tmp_cpus = NULL, *tmp;
|
||||
int max_entries = 0;
|
||||
int n, cpu, prev;
|
||||
char sep;
|
||||
|
||||
sep = 0;
|
||||
prev = -1;
|
||||
for (;;) {
|
||||
n = fscanf(file, "%u%c", &cpu, &sep);
|
||||
if (n <= 0)
|
||||
break;
|
||||
if (prev >= 0) {
|
||||
int new_max = nr_cpus + cpu - prev - 1;
|
||||
|
||||
if (new_max >= max_entries) {
|
||||
max_entries = new_max + MAX_NR_CPUS / 2;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
if (tmp == NULL)
|
||||
goto out_free_tmp;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
|
||||
while (++prev < cpu)
|
||||
tmp_cpus[nr_cpus++] = prev;
|
||||
}
|
||||
if (nr_cpus == max_entries) {
|
||||
max_entries += MAX_NR_CPUS;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
if (tmp == NULL)
|
||||
goto out_free_tmp;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
|
||||
tmp_cpus[nr_cpus++] = cpu;
|
||||
if (n == 2 && sep == '-')
|
||||
prev = cpu;
|
||||
else
|
||||
prev = -1;
|
||||
if (n == 1 || sep == '\n')
|
||||
break;
|
||||
}
|
||||
|
||||
if (nr_cpus > 0)
|
||||
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
|
||||
else
|
||||
cpus = cpu_map__default_new();
|
||||
out_free_tmp:
|
||||
free(tmp_cpus);
|
||||
return cpus;
|
||||
}
|
||||
|
||||
static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
FILE *onlnf;
|
||||
|
||||
onlnf = fopen("/sys/devices/system/cpu/online", "r");
|
||||
if (!onlnf)
|
||||
return cpu_map__default_new();
|
||||
|
||||
cpus = perf_cpu_map__read(onlnf);
|
||||
fclose(onlnf);
|
||||
return cpus;
|
||||
}
|
||||
|
||||
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
|
||||
{
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
unsigned long start_cpu, end_cpu = 0;
|
||||
char *p = NULL;
|
||||
int i, nr_cpus = 0;
|
||||
int *tmp_cpus = NULL, *tmp;
|
||||
int max_entries = 0;
|
||||
|
||||
if (!cpu_list)
|
||||
return cpu_map__read_all_cpu_map();
|
||||
|
||||
/*
|
||||
* must handle the case of empty cpumap to cover
|
||||
* TOPOLOGY header for NUMA nodes with no CPU
|
||||
* ( e.g., because of CPU hotplug)
|
||||
*/
|
||||
if (!isdigit(*cpu_list) && *cpu_list != '\0')
|
||||
goto out;
|
||||
|
||||
while (isdigit(*cpu_list)) {
|
||||
p = NULL;
|
||||
start_cpu = strtoul(cpu_list, &p, 0);
|
||||
if (start_cpu >= INT_MAX
|
||||
|| (*p != '\0' && *p != ',' && *p != '-'))
|
||||
goto invalid;
|
||||
|
||||
if (*p == '-') {
|
||||
cpu_list = ++p;
|
||||
p = NULL;
|
||||
end_cpu = strtoul(cpu_list, &p, 0);
|
||||
|
||||
if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
|
||||
goto invalid;
|
||||
|
||||
if (end_cpu < start_cpu)
|
||||
goto invalid;
|
||||
} else {
|
||||
end_cpu = start_cpu;
|
||||
}
|
||||
|
||||
for (; start_cpu <= end_cpu; start_cpu++) {
|
||||
/* check for duplicates */
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
if (tmp_cpus[i] == (int)start_cpu)
|
||||
goto invalid;
|
||||
|
||||
if (nr_cpus == max_entries) {
|
||||
max_entries += MAX_NR_CPUS;
|
||||
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
|
||||
if (tmp == NULL)
|
||||
goto invalid;
|
||||
tmp_cpus = tmp;
|
||||
}
|
||||
tmp_cpus[nr_cpus++] = (int)start_cpu;
|
||||
}
|
||||
if (*p)
|
||||
++p;
|
||||
|
||||
cpu_list = p;
|
||||
}
|
||||
|
||||
if (nr_cpus > 0)
|
||||
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
|
||||
else if (*cpu_list != '\0')
|
||||
cpus = cpu_map__default_new();
|
||||
else
|
||||
cpus = perf_cpu_map__dummy_new();
|
||||
invalid:
|
||||
free(tmp_cpus);
|
||||
out:
|
||||
return cpus;
|
||||
}
|
||||
|
||||
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
|
||||
{
|
||||
if (idx < cpus->nr)
|
||||
return cpus->map[idx];
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
|
||||
{
|
||||
return cpus ? cpus->nr : 1;
|
||||
}
|
||||
159
tools/perf/lib/evlist.c
Normal file
159
tools/perf/lib/evlist.c
Normal file
@@ -0,0 +1,159 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <linux/list.h>
|
||||
#include <internal/evlist.h>
|
||||
#include <internal/evsel.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
|
||||
void perf_evlist__init(struct perf_evlist *evlist)
|
||||
{
|
||||
INIT_LIST_HEAD(&evlist->entries);
|
||||
evlist->nr_entries = 0;
|
||||
}
|
||||
|
||||
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel)
|
||||
{
|
||||
/*
|
||||
* We already have cpus for evsel (via PMU sysfs) so
|
||||
* keep it, if there's no target cpu list defined.
|
||||
*/
|
||||
if (!evsel->own_cpus || evlist->has_user_cpus) {
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__get(evlist->cpus);
|
||||
} else if (evsel->cpus != evsel->own_cpus) {
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
|
||||
}
|
||||
|
||||
perf_thread_map__put(evsel->threads);
|
||||
evsel->threads = perf_thread_map__get(evlist->threads);
|
||||
}
|
||||
|
||||
static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
perf_evlist__for_each_evsel(evlist, evsel)
|
||||
__perf_evlist__propagate_maps(evlist, evsel);
|
||||
}
|
||||
|
||||
void perf_evlist__add(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel)
|
||||
{
|
||||
list_add_tail(&evsel->node, &evlist->entries);
|
||||
evlist->nr_entries += 1;
|
||||
__perf_evlist__propagate_maps(evlist, evsel);
|
||||
}
|
||||
|
||||
void perf_evlist__remove(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel)
|
||||
{
|
||||
list_del_init(&evsel->node);
|
||||
evlist->nr_entries -= 1;
|
||||
}
|
||||
|
||||
struct perf_evlist *perf_evlist__new(void)
|
||||
{
|
||||
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
|
||||
|
||||
if (evlist != NULL)
|
||||
perf_evlist__init(evlist);
|
||||
|
||||
return evlist;
|
||||
}
|
||||
|
||||
struct perf_evsel *
|
||||
perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
|
||||
{
|
||||
struct perf_evsel *next;
|
||||
|
||||
if (!prev) {
|
||||
next = list_first_entry(&evlist->entries,
|
||||
struct perf_evsel,
|
||||
node);
|
||||
} else {
|
||||
next = list_next_entry(prev, node);
|
||||
}
|
||||
|
||||
/* Empty list is noticed here so don't need checking on entry. */
|
||||
if (&next->node == &evlist->entries)
|
||||
return NULL;
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
void perf_evlist__delete(struct perf_evlist *evlist)
|
||||
{
|
||||
free(evlist);
|
||||
}
|
||||
|
||||
void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||
struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
/*
|
||||
* Allow for the possibility that one or another of the maps isn't being
|
||||
* changed i.e. don't put it. Note we are assuming the maps that are
|
||||
* being applied are brand new and evlist is taking ownership of the
|
||||
* original reference count of 1. If that is not the case it is up to
|
||||
* the caller to increase the reference count.
|
||||
*/
|
||||
if (cpus != evlist->cpus) {
|
||||
perf_cpu_map__put(evlist->cpus);
|
||||
evlist->cpus = perf_cpu_map__get(cpus);
|
||||
}
|
||||
|
||||
if (threads != evlist->threads) {
|
||||
perf_thread_map__put(evlist->threads);
|
||||
evlist->threads = perf_thread_map__get(threads);
|
||||
}
|
||||
|
||||
perf_evlist__propagate_maps(evlist);
|
||||
}
|
||||
|
||||
int perf_evlist__open(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
int err;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel) {
|
||||
err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
|
||||
if (err < 0)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
perf_evlist__close(evlist);
|
||||
return err;
|
||||
}
|
||||
|
||||
void perf_evlist__close(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
perf_evlist__for_each_entry_reverse(evlist, evsel)
|
||||
perf_evsel__close(evsel);
|
||||
}
|
||||
|
||||
void perf_evlist__enable(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel)
|
||||
perf_evsel__enable(evsel);
|
||||
}
|
||||
|
||||
void perf_evlist__disable(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel)
|
||||
perf_evsel__disable(evsel);
|
||||
}
|
||||
232
tools/perf/lib/evsel.c
Normal file
232
tools/perf/lib/evsel.c
Normal file
@@ -0,0 +1,232 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <linux/list.h>
|
||||
#include <internal/evsel.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <internal/xyarray.h>
|
||||
#include <internal/cpumap.h>
|
||||
#include <internal/threadmap.h>
|
||||
#include <internal/lib.h>
|
||||
#include <linux/string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
|
||||
{
|
||||
INIT_LIST_HEAD(&evsel->node);
|
||||
evsel->attr = *attr;
|
||||
}
|
||||
|
||||
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
|
||||
{
|
||||
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
||||
|
||||
if (evsel != NULL)
|
||||
perf_evsel__init(evsel, attr);
|
||||
|
||||
return evsel;
|
||||
}
|
||||
|
||||
void perf_evsel__delete(struct perf_evsel *evsel)
|
||||
{
|
||||
free(evsel);
|
||||
}
|
||||
|
||||
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
|
||||
|
||||
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
|
||||
|
||||
if (evsel->fd) {
|
||||
int cpu, thread;
|
||||
for (cpu = 0; cpu < ncpus; cpu++) {
|
||||
for (thread = 0; thread < nthreads; thread++) {
|
||||
FD(evsel, cpu, thread) = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return evsel->fd != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
sys_perf_event_open(struct perf_event_attr *attr,
|
||||
pid_t pid, int cpu, int group_fd,
|
||||
unsigned long flags)
|
||||
{
|
||||
return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
|
||||
}
|
||||
|
||||
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
int cpu, thread, err = 0;
|
||||
|
||||
if (cpus == NULL) {
|
||||
static struct perf_cpu_map *empty_cpu_map;
|
||||
|
||||
if (empty_cpu_map == NULL) {
|
||||
empty_cpu_map = perf_cpu_map__dummy_new();
|
||||
if (empty_cpu_map == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpus = empty_cpu_map;
|
||||
}
|
||||
|
||||
if (threads == NULL) {
|
||||
static struct perf_thread_map *empty_thread_map;
|
||||
|
||||
if (empty_thread_map == NULL) {
|
||||
empty_thread_map = perf_thread_map__new_dummy();
|
||||
if (empty_thread_map == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
threads = empty_thread_map;
|
||||
}
|
||||
|
||||
if (evsel->fd == NULL &&
|
||||
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
int fd;
|
||||
|
||||
fd = sys_perf_event_open(&evsel->attr,
|
||||
threads->map[thread].pid,
|
||||
cpus->map[cpu], -1, 0);
|
||||
|
||||
if (fd < 0)
|
||||
return -errno;
|
||||
|
||||
FD(evsel, cpu, thread) = fd;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void perf_evsel__close_fd(struct perf_evsel *evsel)
|
||||
{
|
||||
int cpu, thread;
|
||||
|
||||
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
|
||||
close(FD(evsel, cpu, thread));
|
||||
FD(evsel, cpu, thread) = -1;
|
||||
}
|
||||
}
|
||||
|
||||
void perf_evsel__free_fd(struct perf_evsel *evsel)
|
||||
{
|
||||
xyarray__delete(evsel->fd);
|
||||
evsel->fd = NULL;
|
||||
}
|
||||
|
||||
void perf_evsel__close(struct perf_evsel *evsel)
|
||||
{
|
||||
if (evsel->fd == NULL)
|
||||
return;
|
||||
|
||||
perf_evsel__close_fd(evsel);
|
||||
perf_evsel__free_fd(evsel);
|
||||
}
|
||||
|
||||
int perf_evsel__read_size(struct perf_evsel *evsel)
|
||||
{
|
||||
u64 read_format = evsel->attr.read_format;
|
||||
int entry = sizeof(u64); /* value */
|
||||
int size = 0;
|
||||
int nr = 1;
|
||||
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
size += sizeof(u64);
|
||||
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
size += sizeof(u64);
|
||||
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
entry += sizeof(u64);
|
||||
|
||||
if (read_format & PERF_FORMAT_GROUP) {
|
||||
nr = evsel->nr_members;
|
||||
size += sizeof(u64);
|
||||
}
|
||||
|
||||
size += entry * nr;
|
||||
return size;
|
||||
}
|
||||
|
||||
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
||||
struct perf_counts_values *count)
|
||||
{
|
||||
size_t size = perf_evsel__read_size(evsel);
|
||||
|
||||
memset(count, 0, sizeof(*count));
|
||||
|
||||
if (FD(evsel, cpu, thread) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
|
||||
return -errno;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
|
||||
int ioc, void *arg)
|
||||
{
|
||||
int cpu, thread;
|
||||
|
||||
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
|
||||
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
|
||||
int fd = FD(evsel, cpu, thread),
|
||||
err = ioctl(fd, ioc, arg);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evsel__enable(struct perf_evsel *evsel)
|
||||
{
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
|
||||
}
|
||||
|
||||
int perf_evsel__disable(struct perf_evsel *evsel)
|
||||
{
|
||||
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
|
||||
}
|
||||
|
||||
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
|
||||
{
|
||||
return perf_evsel__run_ioctl(evsel,
|
||||
PERF_EVENT_IOC_SET_FILTER,
|
||||
(void *)filter);
|
||||
}
|
||||
|
||||
struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
|
||||
{
|
||||
return evsel->cpus;
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
|
||||
{
|
||||
return evsel->threads;
|
||||
}
|
||||
|
||||
struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
|
||||
{
|
||||
return &evsel->attr;
|
||||
}
|
||||
17
tools/perf/lib/include/internal/cpumap.h
Normal file
17
tools/perf/lib/include/internal/cpumap.h
Normal file
@@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_CPUMAP_H
|
||||
#define __LIBPERF_INTERNAL_CPUMAP_H
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
struct perf_cpu_map {
|
||||
refcount_t refcnt;
|
||||
int nr;
|
||||
int map[];
|
||||
};
|
||||
|
||||
#ifndef MAX_NR_CPUS
|
||||
#define MAX_NR_CPUS 2048
|
||||
#endif
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
|
||||
50
tools/perf/lib/include/internal/evlist.h
Normal file
50
tools/perf/lib/include/internal/evlist.h
Normal file
@@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_EVLIST_H
|
||||
#define __LIBPERF_INTERNAL_EVLIST_H
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
|
||||
struct perf_evlist {
|
||||
struct list_head entries;
|
||||
int nr_entries;
|
||||
bool has_user_cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
};
|
||||
|
||||
/**
|
||||
* __perf_evlist__for_each_entry - iterate thru all the evsels
|
||||
* @list: list_head instance to iterate
|
||||
* @evsel: struct perf_evsel iterator
|
||||
*/
|
||||
#define __perf_evlist__for_each_entry(list, evsel) \
|
||||
list_for_each_entry(evsel, list, node)
|
||||
|
||||
/**
|
||||
* evlist__for_each_entry - iterate thru all the evsels
|
||||
* @evlist: perf_evlist instance to iterate
|
||||
* @evsel: struct perf_evsel iterator
|
||||
*/
|
||||
#define perf_evlist__for_each_entry(evlist, evsel) \
|
||||
__perf_evlist__for_each_entry(&(evlist)->entries, evsel)
|
||||
|
||||
/**
|
||||
* __perf_evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
|
||||
* @list: list_head instance to iterate
|
||||
* @evsel: struct evsel iterator
|
||||
*/
|
||||
#define __perf_evlist__for_each_entry_reverse(list, evsel) \
|
||||
list_for_each_entry_reverse(evsel, list, node)
|
||||
|
||||
/**
|
||||
* perf_evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
|
||||
* @evlist: evlist instance to iterate
|
||||
* @evsel: struct evsel iterator
|
||||
*/
|
||||
#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
|
||||
__perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
|
||||
29
tools/perf/lib/include/internal/evsel.h
Normal file
29
tools/perf/lib/include/internal/evsel.h
Normal file
@@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_EVSEL_H
|
||||
#define __LIBPERF_INTERNAL_EVSEL_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
|
||||
struct perf_evsel {
|
||||
struct list_head node;
|
||||
struct perf_event_attr attr;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_cpu_map *own_cpus;
|
||||
struct perf_thread_map *threads;
|
||||
struct xyarray *fd;
|
||||
|
||||
/* parse modifier helper */
|
||||
int nr_members;
|
||||
};
|
||||
|
||||
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
void perf_evsel__close_fd(struct perf_evsel *evsel);
|
||||
void perf_evsel__free_fd(struct perf_evsel *evsel);
|
||||
int perf_evsel__read_size(struct perf_evsel *evsel);
|
||||
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_EVSEL_H */
|
||||
10
tools/perf/lib/include/internal/lib.h
Normal file
10
tools/perf/lib/include/internal/lib.h
Normal file
@@ -0,0 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_LIB_H
|
||||
#define __LIBPERF_INTERNAL_LIB_H
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
ssize_t readn(int fd, void *buf, size_t n);
|
||||
ssize_t writen(int fd, const void *buf, size_t n);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
|
||||
19
tools/perf/lib/include/internal/tests.h
Normal file
19
tools/perf/lib/include/internal/tests.h
Normal file
@@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_TESTS_H
|
||||
#define __LIBPERF_INTERNAL_TESTS_H
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define __T_START fprintf(stdout, "- running %s...", __FILE__)
|
||||
#define __T_OK fprintf(stdout, "OK\n")
|
||||
#define __T_FAIL fprintf(stdout, "FAIL\n")
|
||||
|
||||
#define __T(text, cond) \
|
||||
do { \
|
||||
if (!(cond)) { \
|
||||
fprintf(stderr, "FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
|
||||
return -1; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_TESTS_H */
|
||||
23
tools/perf/lib/include/internal/threadmap.h
Normal file
23
tools/perf/lib/include/internal/threadmap.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_THREADMAP_H
|
||||
#define __LIBPERF_INTERNAL_THREADMAP_H
|
||||
|
||||
#include <linux/refcount.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
struct thread_map_data {
|
||||
pid_t pid;
|
||||
char *comm;
|
||||
};
|
||||
|
||||
struct perf_thread_map {
|
||||
refcount_t refcnt;
|
||||
int nr;
|
||||
int err_thread;
|
||||
struct thread_map_data map[];
|
||||
};
|
||||
|
||||
struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_THREADMAP_H */
|
||||
@@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _PERF_XYARRAY_H_
|
||||
#define _PERF_XYARRAY_H_ 1
|
||||
#ifndef __LIBPERF_INTERNAL_XYARRAY_H
|
||||
#define __LIBPERF_INTERNAL_XYARRAY_H
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
@@ -32,4 +32,4 @@ static inline int xyarray__max_x(struct xyarray *xy)
|
||||
return xy->max_x;
|
||||
}
|
||||
|
||||
#endif /* _PERF_XYARRAY_H_ */
|
||||
#endif /* __LIBPERF_INTERNAL_XYARRAY_H */
|
||||
22
tools/perf/lib/include/perf/core.h
Normal file
22
tools/perf/lib/include/perf/core.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_CORE_H
|
||||
#define __LIBPERF_CORE_H
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifndef LIBPERF_API
|
||||
#define LIBPERF_API __attribute__((visibility("default")))
|
||||
#endif
|
||||
|
||||
enum libperf_print_level {
|
||||
LIBPERF_WARN,
|
||||
LIBPERF_INFO,
|
||||
LIBPERF_DEBUG,
|
||||
};
|
||||
|
||||
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
|
||||
const char *, va_list ap);
|
||||
|
||||
LIBPERF_API void libperf_set_print(libperf_print_fn_t fn);
|
||||
|
||||
#endif /* __LIBPERF_CORE_H */
|
||||
23
tools/perf/lib/include/perf/cpumap.h
Normal file
23
tools/perf/lib/include/perf/cpumap.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_CPUMAP_H
|
||||
#define __LIBPERF_CPUMAP_H
|
||||
|
||||
#include <perf/core.h>
|
||||
#include <stdio.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
|
||||
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
|
||||
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
|
||||
LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
|
||||
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
|
||||
|
||||
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
|
||||
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
|
||||
(idx) < perf_cpu_map__nr(cpus); \
|
||||
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
|
||||
|
||||
#endif /* __LIBPERF_CPUMAP_H */
|
||||
35
tools/perf/lib/include/perf/evlist.h
Normal file
35
tools/perf/lib/include/perf/evlist.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_EVLIST_H
|
||||
#define __LIBPERF_EVLIST_H
|
||||
|
||||
#include <perf/core.h>
|
||||
|
||||
struct perf_evlist;
|
||||
struct perf_evsel;
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
|
||||
LIBPERF_API void perf_evlist__init(struct perf_evlist *evlist);
|
||||
LIBPERF_API void perf_evlist__add(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel);
|
||||
LIBPERF_API void perf_evlist__remove(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel);
|
||||
LIBPERF_API struct perf_evlist *perf_evlist__new(void);
|
||||
LIBPERF_API void perf_evlist__delete(struct perf_evlist *evlist);
|
||||
LIBPERF_API struct perf_evsel* perf_evlist__next(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel);
|
||||
LIBPERF_API int perf_evlist__open(struct perf_evlist *evlist);
|
||||
LIBPERF_API void perf_evlist__close(struct perf_evlist *evlist);
|
||||
LIBPERF_API void perf_evlist__enable(struct perf_evlist *evlist);
|
||||
LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist);
|
||||
|
||||
#define perf_evlist__for_each_evsel(evlist, pos) \
|
||||
for ((pos) = perf_evlist__next((evlist), NULL); \
|
||||
(pos) != NULL; \
|
||||
(pos) = perf_evlist__next((evlist), (pos)))
|
||||
|
||||
LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||
struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
|
||||
#endif /* __LIBPERF_EVLIST_H */
|
||||
39
tools/perf/lib/include/perf/evsel.h
Normal file
39
tools/perf/lib/include/perf/evsel.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_EVSEL_H
|
||||
#define __LIBPERF_EVSEL_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <perf/core.h>
|
||||
|
||||
struct perf_evsel;
|
||||
struct perf_event_attr;
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
|
||||
struct perf_counts_values {
|
||||
union {
|
||||
struct {
|
||||
uint64_t val;
|
||||
uint64_t ena;
|
||||
uint64_t run;
|
||||
};
|
||||
uint64_t values[3];
|
||||
};
|
||||
};
|
||||
|
||||
LIBPERF_API void perf_evsel__init(struct perf_evsel *evsel,
|
||||
struct perf_event_attr *attr);
|
||||
LIBPERF_API struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr);
|
||||
LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
|
||||
LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
|
||||
LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
||||
struct perf_counts_values *count);
|
||||
LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
|
||||
LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
|
||||
LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
|
||||
LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
|
||||
LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
|
||||
|
||||
#endif /* __LIBPERF_EVSEL_H */
|
||||
18
tools/perf/lib/include/perf/threadmap.h
Normal file
18
tools/perf/lib/include/perf/threadmap.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_THREADMAP_H
|
||||
#define __LIBPERF_THREADMAP_H
|
||||
|
||||
#include <perf/core.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
struct perf_thread_map;
|
||||
|
||||
LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
|
||||
|
||||
LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
|
||||
LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
|
||||
|
||||
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
|
||||
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
|
||||
|
||||
#endif /* __LIBPERF_THREADMAP_H */
|
||||
18
tools/perf/lib/internal.h
Normal file
18
tools/perf/lib/internal.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_H
|
||||
#define __LIBPERF_INTERNAL_H
|
||||
|
||||
void libperf_print(enum libperf_print_level level,
|
||||
const char *format, ...)
|
||||
__attribute__((format(printf, 2, 3)));
|
||||
|
||||
#define __pr(level, fmt, ...) \
|
||||
do { \
|
||||
libperf_print(level, "libperf: " fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define pr_warning(fmt, ...) __pr(LIBPERF_WARN, fmt, ##__VA_ARGS__)
|
||||
#define pr_info(fmt, ...) __pr(LIBPERF_INFO, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, ...) __pr(LIBPERF_DEBUG, fmt, ##__VA_ARGS__)
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_H */
|
||||
46
tools/perf/lib/lib.c
Normal file
46
tools/perf/lib/lib.c
Normal file
@@ -0,0 +1,46 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <unistd.h>
|
||||
#include <stdbool.h>
|
||||
#include <errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <internal/lib.h>
|
||||
|
||||
static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
|
||||
{
|
||||
void *buf_start = buf;
|
||||
size_t left = n;
|
||||
|
||||
while (left) {
|
||||
/* buf must be treated as const if !is_read. */
|
||||
ssize_t ret = is_read ? read(fd, buf, left) :
|
||||
write(fd, buf, left);
|
||||
|
||||
if (ret < 0 && errno == EINTR)
|
||||
continue;
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
left -= ret;
|
||||
buf += ret;
|
||||
}
|
||||
|
||||
BUG_ON((size_t)(buf - buf_start) != n);
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read exactly 'n' bytes or return an error.
|
||||
*/
|
||||
ssize_t readn(int fd, void *buf, size_t n)
|
||||
{
|
||||
return ion(true, fd, buf, n);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write exactly 'n' bytes or return an error.
|
||||
*/
|
||||
ssize_t writen(int fd, const void *buf, size_t n)
|
||||
{
|
||||
/* ion does not modify buf. */
|
||||
return ion(false, fd, (void *)buf, n);
|
||||
}
|
||||
40
tools/perf/lib/libperf.map
Normal file
40
tools/perf/lib/libperf.map
Normal file
@@ -0,0 +1,40 @@
|
||||
LIBPERF_0.0.1 {
|
||||
global:
|
||||
libperf_set_print;
|
||||
perf_cpu_map__dummy_new;
|
||||
perf_cpu_map__get;
|
||||
perf_cpu_map__put;
|
||||
perf_cpu_map__new;
|
||||
perf_cpu_map__read;
|
||||
perf_cpu_map__nr;
|
||||
perf_cpu_map__cpu;
|
||||
perf_thread_map__new_dummy;
|
||||
perf_thread_map__set_pid;
|
||||
perf_thread_map__comm;
|
||||
perf_thread_map__get;
|
||||
perf_thread_map__put;
|
||||
perf_evsel__new;
|
||||
perf_evsel__delete;
|
||||
perf_evsel__enable;
|
||||
perf_evsel__disable;
|
||||
perf_evsel__init;
|
||||
perf_evsel__open;
|
||||
perf_evsel__close;
|
||||
perf_evsel__read;
|
||||
perf_evsel__cpus;
|
||||
perf_evsel__threads;
|
||||
perf_evsel__attr;
|
||||
perf_evlist__new;
|
||||
perf_evlist__delete;
|
||||
perf_evlist__open;
|
||||
perf_evlist__close;
|
||||
perf_evlist__enable;
|
||||
perf_evlist__disable;
|
||||
perf_evlist__init;
|
||||
perf_evlist__add;
|
||||
perf_evlist__remove;
|
||||
perf_evlist__next;
|
||||
perf_evlist__set_maps;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
11
tools/perf/lib/libperf.pc.template
Normal file
11
tools/perf/lib/libperf.pc.template
Normal file
@@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
prefix=@PREFIX@
|
||||
libdir=@LIBDIR@
|
||||
includedir=${prefix}/include
|
||||
|
||||
Name: libperf
|
||||
Description: perf library
|
||||
Version: @VERSION@
|
||||
Libs: -L${libdir} -lperf
|
||||
Cflags: -I${includedir}
|
||||
38
tools/perf/lib/tests/Makefile
Normal file
38
tools/perf/lib/tests/Makefile
Normal file
@@ -0,0 +1,38 @@
|
||||
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
|
||||
TESTS = test-cpumap test-threadmap test-evlist test-evsel
|
||||
|
||||
TESTS_SO := $(addsuffix -so,$(TESTS))
|
||||
TESTS_A := $(addsuffix -a,$(TESTS))
|
||||
|
||||
# Set compile option CFLAGS
|
||||
ifdef EXTRA_CFLAGS
|
||||
CFLAGS := $(EXTRA_CFLAGS)
|
||||
else
|
||||
CFLAGS := -g -Wall
|
||||
endif
|
||||
|
||||
all:
|
||||
|
||||
include $(srctree)/tools/scripts/Makefile.include
|
||||
|
||||
INCLUDE = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include
|
||||
|
||||
$(TESTS_A): FORCE
|
||||
$(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a
|
||||
|
||||
$(TESTS_SO): FORCE
|
||||
$(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) -lperf
|
||||
|
||||
all: $(TESTS_A) $(TESTS_SO)
|
||||
|
||||
run:
|
||||
@echo "running static:"
|
||||
@for i in $(TESTS_A); do ./$$i; done
|
||||
@echo "running dynamic:"
|
||||
@for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i; done
|
||||
|
||||
clean:
|
||||
$(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO)
|
||||
|
||||
.PHONY: all clean FORCE
|
||||
21
tools/perf/lib/tests/test-cpumap.c
Normal file
21
tools/perf/lib/tests/test-cpumap.c
Normal file
@@ -0,0 +1,21 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <perf/cpumap.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
|
||||
__T_START;
|
||||
|
||||
cpus = perf_cpu_map__dummy_new();
|
||||
if (!cpus)
|
||||
return -1;
|
||||
|
||||
perf_cpu_map__get(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
|
||||
__T_OK;
|
||||
return 0;
|
||||
}
|
||||
186
tools/perf/lib/tests/test-evlist.c
Normal file
186
tools/perf/lib/tests/test-evlist.c
Normal file
@@ -0,0 +1,186 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int test_stat_cpu(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr1 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
};
|
||||
struct perf_event_attr attr2 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
};
|
||||
int err, cpu, tmp;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
__T("failed to create cpus", cpus);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
__T("failed to create evlist", evlist);
|
||||
|
||||
evsel = perf_evsel__new(&attr1);
|
||||
__T("failed to create evsel1", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
evsel = perf_evsel__new(&attr2);
|
||||
__T("failed to create evsel2", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
perf_evlist__set_maps(evlist, cpus, NULL);
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_evlist__for_each_evsel(evlist, evsel) {
|
||||
cpus = perf_evsel__cpus(evsel);
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
|
||||
perf_evsel__read(evsel, cpu, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
}
|
||||
}
|
||||
|
||||
perf_evlist__close(evlist);
|
||||
perf_evlist__delete(evlist);
|
||||
|
||||
perf_cpu_map__put(cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_stat_thread(void)
|
||||
{
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr1 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
};
|
||||
struct perf_event_attr attr2 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
};
|
||||
int err;
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
__T("failed to create threads", threads);
|
||||
|
||||
perf_thread_map__set_pid(threads, 0, 0);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
__T("failed to create evlist", evlist);
|
||||
|
||||
evsel = perf_evsel__new(&attr1);
|
||||
__T("failed to create evsel1", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
evsel = perf_evsel__new(&attr2);
|
||||
__T("failed to create evsel2", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
perf_evlist__set_maps(evlist, NULL, threads);
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_evlist__for_each_evsel(evlist, evsel) {
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
}
|
||||
|
||||
perf_evlist__close(evlist);
|
||||
perf_evlist__delete(evlist);
|
||||
|
||||
perf_thread_map__put(threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_stat_thread_enable(void)
|
||||
{
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr1 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
.disabled = 1,
|
||||
};
|
||||
struct perf_event_attr attr2 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
.disabled = 1,
|
||||
};
|
||||
int err;
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
__T("failed to create threads", threads);
|
||||
|
||||
perf_thread_map__set_pid(threads, 0, 0);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
__T("failed to create evlist", evlist);
|
||||
|
||||
evsel = perf_evsel__new(&attr1);
|
||||
__T("failed to create evsel1", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
evsel = perf_evsel__new(&attr2);
|
||||
__T("failed to create evsel2", evsel);
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
perf_evlist__set_maps(evlist, NULL, threads);
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_evlist__for_each_evsel(evlist, evsel) {
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val == 0);
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
|
||||
perf_evlist__for_each_evsel(evlist, evsel) {
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
}
|
||||
|
||||
perf_evlist__disable(evlist);
|
||||
|
||||
perf_evlist__close(evlist);
|
||||
perf_evlist__delete(evlist);
|
||||
|
||||
perf_thread_map__put(threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__T_START;
|
||||
|
||||
test_stat_cpu();
|
||||
test_stat_thread();
|
||||
test_stat_thread_enable();
|
||||
|
||||
__T_OK;
|
||||
return 0;
|
||||
}
|
||||
125
tools/perf/lib/tests/test-evsel.c
Normal file
125
tools/perf/lib/tests/test-evsel.c
Normal file
@@ -0,0 +1,125 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int test_stat_cpu(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
};
|
||||
int err, cpu, tmp;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
__T("failed to create cpus", cpus);
|
||||
|
||||
evsel = perf_evsel__new(&attr);
|
||||
__T("failed to create evsel", evsel);
|
||||
|
||||
err = perf_evsel__open(evsel, cpus, NULL);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
|
||||
perf_evsel__read(evsel, cpu, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
}
|
||||
|
||||
perf_evsel__close(evsel);
|
||||
perf_evsel__delete(evsel);
|
||||
|
||||
perf_cpu_map__put(cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_stat_thread(void)
|
||||
{
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
};
|
||||
int err;
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
__T("failed to create threads", threads);
|
||||
|
||||
perf_thread_map__set_pid(threads, 0, 0);
|
||||
|
||||
evsel = perf_evsel__new(&attr);
|
||||
__T("failed to create evsel", evsel);
|
||||
|
||||
err = perf_evsel__open(evsel, NULL, threads);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
|
||||
perf_evsel__close(evsel);
|
||||
perf_evsel__delete(evsel);
|
||||
|
||||
perf_thread_map__put(threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_stat_thread_enable(void)
|
||||
{
|
||||
struct perf_counts_values counts = { .val = 0 };
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
.disabled = 1,
|
||||
};
|
||||
int err;
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
__T("failed to create threads", threads);
|
||||
|
||||
perf_thread_map__set_pid(threads, 0, 0);
|
||||
|
||||
evsel = perf_evsel__new(&attr);
|
||||
__T("failed to create evsel", evsel);
|
||||
|
||||
err = perf_evsel__open(evsel, NULL, threads);
|
||||
__T("failed to open evsel", err == 0);
|
||||
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val == 0);
|
||||
|
||||
err = perf_evsel__enable(evsel);
|
||||
__T("failed to enable evsel", err == 0);
|
||||
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
__T("failed to read value for evsel", counts.val != 0);
|
||||
|
||||
err = perf_evsel__disable(evsel);
|
||||
__T("failed to enable evsel", err == 0);
|
||||
|
||||
perf_evsel__close(evsel);
|
||||
perf_evsel__delete(evsel);
|
||||
|
||||
perf_thread_map__put(threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
__T_START;
|
||||
|
||||
test_stat_cpu();
|
||||
test_stat_thread();
|
||||
test_stat_thread_enable();
|
||||
|
||||
__T_OK;
|
||||
return 0;
|
||||
}
|
||||
21
tools/perf/lib/tests/test-threadmap.c
Normal file
21
tools/perf/lib/tests/test-threadmap.c
Normal file
@@ -0,0 +1,21 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <perf/threadmap.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct perf_thread_map *threads;
|
||||
|
||||
__T_START;
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
if (!threads)
|
||||
return -1;
|
||||
|
||||
perf_thread_map__get(threads);
|
||||
perf_thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
|
||||
__T_OK;
|
||||
return 0;
|
||||
}
|
||||
81
tools/perf/lib/threadmap.c
Normal file
81
tools/perf/lib/threadmap.c
Normal file
@@ -0,0 +1,81 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <perf/threadmap.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <internal/threadmap.h>
|
||||
#include <string.h>
|
||||
#include <asm/bug.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr)
|
||||
{
|
||||
size_t size = (nr - start) * sizeof(map->map[0]);
|
||||
|
||||
memset(&map->map[start], 0, size);
|
||||
map->err_thread = -1;
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr)
|
||||
{
|
||||
size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
|
||||
int start = map ? map->nr : 0;
|
||||
|
||||
map = realloc(map, size);
|
||||
/*
|
||||
* We only realloc to add more items, let's reset new items.
|
||||
*/
|
||||
if (map)
|
||||
perf_thread_map__reset(map, start, nr);
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
|
||||
|
||||
void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid)
|
||||
{
|
||||
map->map[thread].pid = pid;
|
||||
}
|
||||
|
||||
char *perf_thread_map__comm(struct perf_thread_map *map, int thread)
|
||||
{
|
||||
return map->map[thread].comm;
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_thread_map__new_dummy(void)
|
||||
{
|
||||
struct perf_thread_map *threads = thread_map__alloc(1);
|
||||
|
||||
if (threads != NULL) {
|
||||
perf_thread_map__set_pid(threads, 0, -1);
|
||||
threads->nr = 1;
|
||||
refcount_set(&threads->refcnt, 1);
|
||||
}
|
||||
return threads;
|
||||
}
|
||||
|
||||
static void perf_thread_map__delete(struct perf_thread_map *threads)
|
||||
{
|
||||
if (threads) {
|
||||
int i;
|
||||
|
||||
WARN_ONCE(refcount_read(&threads->refcnt) != 0,
|
||||
"thread map refcnt unbalanced\n");
|
||||
for (i = 0; i < threads->nr; i++)
|
||||
free(perf_thread_map__comm(threads, i));
|
||||
free(threads);
|
||||
}
|
||||
}
|
||||
|
||||
struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map)
|
||||
{
|
||||
if (map)
|
||||
refcount_inc(&map->refcnt);
|
||||
return map;
|
||||
}
|
||||
|
||||
void perf_thread_map__put(struct perf_thread_map *map)
|
||||
{
|
||||
if (map && refcount_dec_and_test(&map->refcnt))
|
||||
perf_thread_map__delete(map);
|
||||
}
|
||||
33
tools/perf/lib/xyarray.c
Normal file
33
tools/perf/lib/xyarray.c
Normal file
@@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <internal/xyarray.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
|
||||
{
|
||||
size_t row_size = ylen * entry_size;
|
||||
struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
|
||||
|
||||
if (xy != NULL) {
|
||||
xy->entry_size = entry_size;
|
||||
xy->row_size = row_size;
|
||||
xy->entries = xlen * ylen;
|
||||
xy->max_x = xlen;
|
||||
xy->max_y = ylen;
|
||||
}
|
||||
|
||||
return xy;
|
||||
}
|
||||
|
||||
void xyarray__reset(struct xyarray *xy)
|
||||
{
|
||||
size_t n = xy->entries * xy->entry_size;
|
||||
|
||||
memset(xy->contents, 0, n);
|
||||
}
|
||||
|
||||
void xyarray__delete(struct xyarray *xy)
|
||||
{
|
||||
free(xy);
|
||||
}
|
||||
@@ -52,7 +52,7 @@
|
||||
{,
|
||||
"EventCode": "0x4D02C",
|
||||
"EventName": "PM_PMC1_REWIND",
|
||||
"BriefDescription": ""
|
||||
"BriefDescription": "PMC1 rewind event"
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x15158",
|
||||
|
||||
@@ -237,7 +237,7 @@
|
||||
{,
|
||||
"EventCode": "0xD0B0",
|
||||
"EventName": "PM_HWSYNC",
|
||||
"BriefDescription": ""
|
||||
"BriefDescription": "A hwsync instruction was decoded and transferred"
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x168B0",
|
||||
@@ -1232,7 +1232,7 @@
|
||||
{,
|
||||
"EventCode": "0xD8AC",
|
||||
"EventName": "PM_LWSYNC",
|
||||
"BriefDescription": ""
|
||||
"BriefDescription": "An lwsync instruction was decoded and transferred"
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x2094",
|
||||
@@ -1747,7 +1747,7 @@
|
||||
{,
|
||||
"EventCode": "0xD8B0",
|
||||
"EventName": "PM_PTESYNC",
|
||||
"BriefDescription": ""
|
||||
"BriefDescription": "A ptesync instruction was counted when the instruction is decoded and transmitted"
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x26086",
|
||||
@@ -2107,7 +2107,7 @@
|
||||
{,
|
||||
"EventCode": "0xF080",
|
||||
"EventName": "PM_LSU_STCX_FAIL",
|
||||
"BriefDescription": ""
|
||||
"BriefDescription": "The LSU detects the condition that a stcx instruction failed. No requirement to wait for a response from the nest"
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x30038",
|
||||
|
||||
@@ -25,7 +25,7 @@ static void testcase(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int count_samples(struct perf_evlist *evlist, int *sample_count,
|
||||
static int count_samples(struct evlist *evlist, int *sample_count,
|
||||
int *comm_count)
|
||||
{
|
||||
int i;
|
||||
@@ -55,7 +55,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
|
||||
return TEST_OK;
|
||||
}
|
||||
|
||||
static int do_test(struct perf_evlist *evlist, int mmap_pages,
|
||||
static int do_test(struct evlist *evlist, int mmap_pages,
|
||||
int *sample_count, int *comm_count)
|
||||
{
|
||||
int err;
|
||||
@@ -68,9 +68,9 @@ static int do_test(struct perf_evlist *evlist, int mmap_pages,
|
||||
return TEST_FAIL;
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
testcase();
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
err = count_samples(evlist, sample_count, comm_count);
|
||||
perf_evlist__munmap(evlist);
|
||||
@@ -82,8 +82,8 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
|
||||
{
|
||||
int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
|
||||
char pid[16], sbuf[STRERR_BUFSIZE];
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel __maybe_unused;
|
||||
struct evlist *evlist;
|
||||
struct evsel *evsel __maybe_unused;
|
||||
struct parse_events_error parse_error;
|
||||
struct record_opts opts = {
|
||||
.target = {
|
||||
@@ -99,7 +99,7 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
|
||||
pid[sizeof(pid) - 1] = '\0';
|
||||
opts.target.tid = opts.target.pid = pid;
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (!evlist) {
|
||||
pr_debug("Not enough memory to create evlist\n");
|
||||
return TEST_FAIL;
|
||||
@@ -125,7 +125,7 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
|
||||
|
||||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__open: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -150,6 +150,6 @@ int test__backward_ring_buffer(struct test *test __maybe_unused, int subtest __m
|
||||
|
||||
ret = TEST_OK;
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "tests.h"
|
||||
#include "cpumap.h"
|
||||
#include "debug.h"
|
||||
@@ -9,7 +10,7 @@
|
||||
|
||||
static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
{
|
||||
struct cpu_map *map = cpu_map__new(str);
|
||||
struct perf_cpu_map *map = perf_cpu_map__new(str);
|
||||
unsigned long *bm = NULL;
|
||||
int i;
|
||||
|
||||
@@ -21,7 +22,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
}
|
||||
|
||||
if (map)
|
||||
cpu_map__put(map);
|
||||
perf_cpu_map__put(map);
|
||||
return bm;
|
||||
}
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
|
||||
char pid[16];
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
struct perf_evlist *evlist;
|
||||
struct evlist *evlist;
|
||||
int i, ret = TEST_FAIL, err = 0, count = 0;
|
||||
|
||||
struct parse_events_state parse_state;
|
||||
@@ -140,7 +140,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
opts.target.tid = opts.target.pid = pid;
|
||||
|
||||
/* Instead of perf_evlist__new_default, don't add default events */
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (!evlist) {
|
||||
pr_debug("Not enough memory to create evlist\n");
|
||||
return TEST_FAIL;
|
||||
@@ -157,7 +157,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
|
||||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__open: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -171,9 +171,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
(*func)();
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
union perf_event *event;
|
||||
@@ -200,7 +200,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
ret = TEST_OK;
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/param.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
#include "parse-events.h"
|
||||
#include "evlist.h"
|
||||
@@ -362,7 +364,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
|
||||
}
|
||||
|
||||
static int process_sample_event(struct machine *machine,
|
||||
struct perf_evlist *evlist,
|
||||
struct evlist *evlist,
|
||||
union perf_event *event, struct state *state)
|
||||
{
|
||||
struct perf_sample sample;
|
||||
@@ -385,7 +387,7 @@ static int process_sample_event(struct machine *machine,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int process_event(struct machine *machine, struct perf_evlist *evlist,
|
||||
static int process_event(struct machine *machine, struct evlist *evlist,
|
||||
union perf_event *event, struct state *state)
|
||||
{
|
||||
if (event->header.type == PERF_RECORD_SAMPLE)
|
||||
@@ -408,7 +410,7 @@ static int process_event(struct machine *machine, struct perf_evlist *evlist,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_events(struct machine *machine, struct perf_evlist *evlist,
|
||||
static int process_events(struct machine *machine, struct evlist *evlist,
|
||||
struct state *state)
|
||||
{
|
||||
union perf_event *event;
|
||||
@@ -552,10 +554,10 @@ static int do_test_code_reading(bool try_kcore)
|
||||
struct state state = {
|
||||
.done_cnt = 0,
|
||||
};
|
||||
struct thread_map *threads = NULL;
|
||||
struct cpu_map *cpus = NULL;
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct perf_evsel *evsel = NULL;
|
||||
struct perf_thread_map *threads = NULL;
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel = NULL;
|
||||
int err = -1, ret;
|
||||
pid_t pid;
|
||||
struct map *map;
|
||||
@@ -613,22 +615,22 @@ static int do_test_code_reading(bool try_kcore)
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
cpus = cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
if (!cpus) {
|
||||
pr_debug("cpu_map__new failed\n");
|
||||
pr_debug("perf_cpu_map__new failed\n");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
const char *str;
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (!evlist) {
|
||||
pr_debug("perf_evlist__new failed\n");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
|
||||
str = do_determine_event(excl_kernel);
|
||||
pr_debug("Parsing event '%s'\n", str);
|
||||
@@ -642,11 +644,11 @@ static int do_test_code_reading(bool try_kcore)
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
|
||||
evsel->attr.comm = 1;
|
||||
evsel->attr.disabled = 1;
|
||||
evsel->attr.enable_on_exec = 0;
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
evsel->core.attr.enable_on_exec = 0;
|
||||
|
||||
ret = perf_evlist__open(evlist);
|
||||
ret = evlist__open(evlist);
|
||||
if (ret < 0) {
|
||||
if (!excl_kernel) {
|
||||
excl_kernel = true;
|
||||
@@ -655,10 +657,10 @@ static int do_test_code_reading(bool try_kcore)
|
||||
* and will be freed by following perf_evlist__set_maps
|
||||
* call. Getting refference to keep them alive.
|
||||
*/
|
||||
cpu_map__get(cpus);
|
||||
thread_map__get(threads);
|
||||
perf_evlist__set_maps(evlist, NULL, NULL);
|
||||
perf_evlist__delete(evlist);
|
||||
perf_cpu_map__get(cpus);
|
||||
perf_thread_map__get(threads);
|
||||
perf_evlist__set_maps(&evlist->core, NULL, NULL);
|
||||
evlist__delete(evlist);
|
||||
evlist = NULL;
|
||||
continue;
|
||||
}
|
||||
@@ -680,11 +682,11 @@ static int do_test_code_reading(bool try_kcore)
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
do_something();
|
||||
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
ret = process_events(machine, evlist, &state);
|
||||
if (ret < 0)
|
||||
@@ -703,10 +705,10 @@ static int do_test_code_reading(bool try_kcore)
|
||||
out_err:
|
||||
|
||||
if (evlist) {
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
} else {
|
||||
cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_cpu_map__put(cpus);
|
||||
perf_thread_map__put(threads);
|
||||
}
|
||||
machine__delete_threads(machine);
|
||||
machine__delete(machine);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "event.h"
|
||||
#include <string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "debug.h"
|
||||
|
||||
struct machine;
|
||||
@@ -17,7 +18,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
|
||||
struct cpu_map_event *map_event = &event->cpu_map;
|
||||
struct cpu_map_mask *mask;
|
||||
struct cpu_map_data *data;
|
||||
struct cpu_map *map;
|
||||
struct perf_cpu_map *map;
|
||||
int i;
|
||||
|
||||
data = &map_event->data;
|
||||
@@ -39,7 +40,7 @@ static int process_event_mask(struct perf_tool *tool __maybe_unused,
|
||||
TEST_ASSERT_VAL("wrong cpu", map->map[i] == i);
|
||||
}
|
||||
|
||||
cpu_map__put(map);
|
||||
perf_cpu_map__put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -51,7 +52,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
||||
struct cpu_map_event *map_event = &event->cpu_map;
|
||||
struct cpu_map_entries *cpus;
|
||||
struct cpu_map_data *data;
|
||||
struct cpu_map *map;
|
||||
struct perf_cpu_map *map;
|
||||
|
||||
data = &map_event->data;
|
||||
|
||||
@@ -68,36 +69,36 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
||||
TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1);
|
||||
TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256);
|
||||
TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
|
||||
cpu_map__put(map);
|
||||
perf_cpu_map__put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct cpu_map *cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
|
||||
/* This one is better stores in mask. */
|
||||
cpus = cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
|
||||
cpus = perf_cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
|
||||
|
||||
TEST_ASSERT_VAL("failed to synthesize map",
|
||||
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
|
||||
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
|
||||
/* This one is better stores in cpu values. */
|
||||
cpus = cpu_map__new("1,256");
|
||||
cpus = perf_cpu_map__new("1,256");
|
||||
|
||||
TEST_ASSERT_VAL("failed to synthesize map",
|
||||
!perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
|
||||
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_map_print(const char *str)
|
||||
{
|
||||
struct cpu_map *map = cpu_map__new(str);
|
||||
struct perf_cpu_map *map = perf_cpu_map__new(str);
|
||||
char buf[100];
|
||||
|
||||
if (!map)
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <sys/wait.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "tests.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
@@ -12,9 +13,9 @@
|
||||
#include "thread_map.h"
|
||||
#include "target.h"
|
||||
|
||||
static int attach__enable_on_exec(struct perf_evlist *evlist)
|
||||
static int attach__enable_on_exec(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
};
|
||||
@@ -36,9 +37,9 @@ static int attach__enable_on_exec(struct perf_evlist *evlist)
|
||||
return err;
|
||||
}
|
||||
|
||||
evsel->attr.enable_on_exec = 1;
|
||||
evsel->core.attr.enable_on_exec = 1;
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__open: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -48,16 +49,16 @@ static int attach__enable_on_exec(struct perf_evlist *evlist)
|
||||
return perf_evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
static int detach__enable_on_exec(struct perf_evlist *evlist)
|
||||
static int detach__enable_on_exec(struct evlist *evlist)
|
||||
{
|
||||
waitpid(evlist->workload.pid, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int attach__current_disabled(struct perf_evlist *evlist)
|
||||
static int attach__current_disabled(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct thread_map *threads;
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct perf_thread_map *threads;
|
||||
int err;
|
||||
|
||||
pr_debug("attaching to current thread as disabled\n");
|
||||
@@ -68,7 +69,7 @@ static int attach__current_disabled(struct perf_evlist *evlist)
|
||||
return -1;
|
||||
}
|
||||
|
||||
evsel->attr.disabled = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
|
||||
err = perf_evsel__open_per_thread(evsel, threads);
|
||||
if (err) {
|
||||
@@ -76,14 +77,14 @@ static int attach__current_disabled(struct perf_evlist *evlist)
|
||||
return err;
|
||||
}
|
||||
|
||||
thread_map__put(threads);
|
||||
return perf_evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
|
||||
perf_thread_map__put(threads);
|
||||
return evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
static int attach__current_enabled(struct perf_evlist *evlist)
|
||||
static int attach__current_enabled(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct thread_map *threads;
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct perf_thread_map *threads;
|
||||
int err;
|
||||
|
||||
pr_debug("attaching to current thread as enabled\n");
|
||||
@@ -96,32 +97,32 @@ static int attach__current_enabled(struct perf_evlist *evlist)
|
||||
|
||||
err = perf_evsel__open_per_thread(evsel, threads);
|
||||
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err == 0 ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
static int detach__disable(struct perf_evlist *evlist)
|
||||
static int detach__disable(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
|
||||
return perf_evsel__enable(evsel);
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
|
||||
static int attach__cpu_disabled(struct perf_evlist *evlist)
|
||||
static int attach__cpu_disabled(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct cpu_map *cpus;
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct perf_cpu_map *cpus;
|
||||
int err;
|
||||
|
||||
pr_debug("attaching to CPU 0 as enabled\n");
|
||||
|
||||
cpus = cpu_map__new("0");
|
||||
cpus = perf_cpu_map__new("0");
|
||||
if (cpus == NULL) {
|
||||
pr_debug("failed to call cpu_map__new\n");
|
||||
pr_debug("failed to call perf_cpu_map__new\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
evsel->attr.disabled = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
|
||||
err = perf_evsel__open_per_cpu(evsel, cpus);
|
||||
if (err) {
|
||||
@@ -132,21 +133,21 @@ static int attach__cpu_disabled(struct perf_evlist *evlist)
|
||||
return err;
|
||||
}
|
||||
|
||||
cpu_map__put(cpus);
|
||||
return perf_evsel__enable(evsel);
|
||||
perf_cpu_map__put(cpus);
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
|
||||
static int attach__cpu_enabled(struct perf_evlist *evlist)
|
||||
static int attach__cpu_enabled(struct evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evlist__last(evlist);
|
||||
struct cpu_map *cpus;
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct perf_cpu_map *cpus;
|
||||
int err;
|
||||
|
||||
pr_debug("attaching to CPU 0 as enabled\n");
|
||||
|
||||
cpus = cpu_map__new("0");
|
||||
cpus = perf_cpu_map__new("0");
|
||||
if (cpus == NULL) {
|
||||
pr_debug("failed to call cpu_map__new\n");
|
||||
pr_debug("failed to call perf_cpu_map__new\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -154,19 +155,19 @@ static int attach__cpu_enabled(struct perf_evlist *evlist)
|
||||
if (err == -EACCES)
|
||||
return TEST_SKIP;
|
||||
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
return err ? TEST_FAIL : TEST_OK;
|
||||
}
|
||||
|
||||
static int test_times(int (attach)(struct perf_evlist *),
|
||||
int (detach)(struct perf_evlist *))
|
||||
static int test_times(int (attach)(struct evlist *),
|
||||
int (detach)(struct evlist *))
|
||||
{
|
||||
struct perf_counts_values count;
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel;
|
||||
int err = -1, i;
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (!evlist) {
|
||||
pr_debug("failed to create event list\n");
|
||||
goto out_err;
|
||||
@@ -179,7 +180,7 @@ static int test_times(int (attach)(struct perf_evlist *),
|
||||
}
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
evsel->attr.read_format |=
|
||||
evsel->core.attr.read_format |=
|
||||
PERF_FORMAT_TOTAL_TIME_ENABLED |
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING;
|
||||
|
||||
@@ -195,7 +196,7 @@ static int test_times(int (attach)(struct perf_evlist *),
|
||||
|
||||
TEST_ASSERT_VAL("failed to detach", !detach(evlist));
|
||||
|
||||
perf_evsel__read(evsel, 0, 0, &count);
|
||||
perf_evsel__read(&evsel->core, 0, 0, &count);
|
||||
|
||||
err = !(count.ena == count.run);
|
||||
|
||||
@@ -204,7 +205,7 @@ static int test_times(int (attach)(struct perf_evlist *),
|
||||
count.ena, count.run);
|
||||
|
||||
out_err:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return !err ? TEST_OK : TEST_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/compiler.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "machine.h"
|
||||
@@ -61,7 +62,7 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
||||
{
|
||||
struct event_update_event *ev = (struct event_update_event*) event;
|
||||
struct event_update_event_cpus *ev_data;
|
||||
struct cpu_map *map;
|
||||
struct perf_cpu_map *map;
|
||||
|
||||
ev_data = (struct event_update_event_cpus*) ev->data;
|
||||
|
||||
@@ -73,14 +74,14 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3);
|
||||
cpu_map__put(map);
|
||||
perf_cpu_map__put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist;
|
||||
struct evsel *evsel;
|
||||
struct event_name tmp;
|
||||
|
||||
evlist = perf_evlist__new_default();
|
||||
@@ -108,11 +109,11 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
|
||||
TEST_ASSERT_VAL("failed to synthesize attr update name",
|
||||
!perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
|
||||
|
||||
evsel->own_cpus = cpu_map__new("1,2,3");
|
||||
evsel->core.own_cpus = perf_cpu_map__new("1,2,3");
|
||||
|
||||
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
|
||||
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
|
||||
|
||||
cpu_map__put(evsel->own_cpus);
|
||||
perf_cpu_map__put(evsel->core.own_cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ static int perf_evsel__roundtrip_cache_name_test(void)
|
||||
{
|
||||
char name[128];
|
||||
int type, op, err = 0, ret = 0, i, idx;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
|
||||
if (evlist == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -60,15 +60,15 @@ static int perf_evsel__roundtrip_cache_name_test(void)
|
||||
}
|
||||
}
|
||||
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __perf_evsel__name_array_test(const char *names[], int nr_names)
|
||||
{
|
||||
int i, err;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
|
||||
if (evlist == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -91,7 +91,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
|
||||
}
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include "tests.h"
|
||||
#include "debug.h"
|
||||
|
||||
static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
|
||||
static int perf_evsel__test_field(struct evsel *evsel, const char *name,
|
||||
int size, bool should_be_signed)
|
||||
{
|
||||
struct tep_format_field *field = perf_evsel__field(evsel, name);
|
||||
@@ -35,7 +35,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
|
||||
|
||||
int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
|
||||
struct evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR(evsel)) {
|
||||
@@ -64,7 +64,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
|
||||
if (perf_evsel__test_field(evsel, "next_prio", 4, true))
|
||||
ret = -1;
|
||||
|
||||
perf_evsel__delete(evsel);
|
||||
evsel__delete(evsel);
|
||||
|
||||
evsel = perf_evsel__newtp("sched", "sched_wakeup");
|
||||
|
||||
@@ -85,6 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
|
||||
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
|
||||
ret = -1;
|
||||
|
||||
perf_evsel__delete(evsel);
|
||||
evsel__delete(evsel);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ static u64 fake_callchains[][10] = {
|
||||
static int add_hist_entries(struct hists *hists, struct machine *machine)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct perf_evsel *evsel = hists_to_evsel(hists);
|
||||
struct evsel *evsel = hists_to_evsel(hists);
|
||||
struct perf_sample sample = { .period = 1000, };
|
||||
size_t i;
|
||||
|
||||
@@ -147,7 +147,7 @@ static void del_hist_entries(struct hists *hists)
|
||||
}
|
||||
}
|
||||
|
||||
typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
|
||||
typedef int (*test_fn_t)(struct evsel *, struct machine *);
|
||||
|
||||
#define COMM(he) (thread__comm_str(he->thread))
|
||||
#define DSO(he) (he->ms.map->dso->short_name)
|
||||
@@ -247,7 +247,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
|
||||
}
|
||||
|
||||
/* NO callchain + NO children */
|
||||
static int test1(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test1(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -298,7 +298,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* callcain + NO children */
|
||||
static int test2(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test2(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -446,7 +446,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* NO callchain + children */
|
||||
static int test3(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test3(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -503,7 +503,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* callchain + children */
|
||||
static int test4(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test4(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -694,8 +694,8 @@ int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_u
|
||||
int err = TEST_FAIL;
|
||||
struct machines machines;
|
||||
struct machine *machine;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
size_t i;
|
||||
test_fn_t testcases[] = {
|
||||
test1,
|
||||
@@ -731,7 +731,7 @@ int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_u
|
||||
|
||||
out:
|
||||
/* tear down everything */
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
machines__exit(&machines);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -47,10 +47,10 @@ static struct sample fake_samples[] = {
|
||||
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
|
||||
};
|
||||
|
||||
static int add_hist_entries(struct perf_evlist *evlist,
|
||||
static int add_hist_entries(struct evlist *evlist,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct addr_location al;
|
||||
struct perf_sample sample = { .period = 100, };
|
||||
size_t i;
|
||||
@@ -108,8 +108,8 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
|
||||
int err = TEST_FAIL;
|
||||
struct machines machines;
|
||||
struct machine *machine;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
|
||||
TEST_ASSERT_VAL("No memory", evlist);
|
||||
|
||||
@@ -321,7 +321,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
|
||||
|
||||
out:
|
||||
/* tear down everything */
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
reset_output_field();
|
||||
machines__exit(&machines);
|
||||
|
||||
|
||||
@@ -62,9 +62,9 @@ static struct sample fake_samples[][5] = {
|
||||
},
|
||||
};
|
||||
|
||||
static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
|
||||
static int add_hist_entries(struct evlist *evlist, struct machine *machine)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
struct addr_location al;
|
||||
struct hist_entry *he;
|
||||
struct perf_sample sample = { .period = 1, .weight = 1, };
|
||||
@@ -271,8 +271,8 @@ int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
struct hists *hists, *first_hists;
|
||||
struct machines machines;
|
||||
struct machine *machine = NULL;
|
||||
struct perf_evsel *evsel, *first;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel, *first;
|
||||
struct evlist *evlist = evlist__new();
|
||||
|
||||
if (evlist == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -334,7 +334,7 @@ int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
|
||||
out:
|
||||
/* tear down everything */
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
reset_output_field();
|
||||
machines__exit(&machines);
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ static struct sample fake_samples[] = {
|
||||
static int add_hist_entries(struct hists *hists, struct machine *machine)
|
||||
{
|
||||
struct addr_location al;
|
||||
struct perf_evsel *evsel = hists_to_evsel(hists);
|
||||
struct evsel *evsel = hists_to_evsel(hists);
|
||||
struct perf_sample sample = { .period = 100, };
|
||||
size_t i;
|
||||
|
||||
@@ -113,7 +113,7 @@ static void del_hist_entries(struct hists *hists)
|
||||
}
|
||||
}
|
||||
|
||||
typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
|
||||
typedef int (*test_fn_t)(struct evsel *, struct machine *);
|
||||
|
||||
#define COMM(he) (thread__comm_str(he->thread))
|
||||
#define DSO(he) (he->ms.map->dso->short_name)
|
||||
@@ -122,7 +122,7 @@ typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
|
||||
#define PID(he) (he->thread->tid)
|
||||
|
||||
/* default sort keys (no field) */
|
||||
static int test1(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test1(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -224,7 +224,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* mixed fields and sort keys */
|
||||
static int test2(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test2(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -280,7 +280,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* fields only (no sort key) */
|
||||
static int test3(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test3(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -354,7 +354,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* handle duplicate 'dso' field */
|
||||
static int test4(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test4(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -456,7 +456,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
|
||||
}
|
||||
|
||||
/* full sort keys w/o overhead field */
|
||||
static int test5(struct perf_evsel *evsel, struct machine *machine)
|
||||
static int test5(struct evsel *evsel, struct machine *machine)
|
||||
{
|
||||
int err;
|
||||
struct hists *hists = evsel__hists(evsel);
|
||||
@@ -580,8 +580,8 @@ int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unu
|
||||
int err = TEST_FAIL;
|
||||
struct machines machines;
|
||||
struct machine *machine;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
size_t i;
|
||||
test_fn_t testcases[] = {
|
||||
test1,
|
||||
@@ -618,7 +618,7 @@ int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unu
|
||||
|
||||
out:
|
||||
/* tear down everything */
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
machines__exit(&machines);
|
||||
|
||||
return err;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
#include "parse-events.h"
|
||||
#include "evlist.h"
|
||||
@@ -24,7 +26,7 @@
|
||||
} \
|
||||
}
|
||||
|
||||
static int find_comm(struct perf_evlist *evlist, const char *comm)
|
||||
static int find_comm(struct evlist *evlist, const char *comm)
|
||||
{
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
@@ -65,23 +67,23 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
||||
.uses_mmap = true,
|
||||
},
|
||||
};
|
||||
struct thread_map *threads = NULL;
|
||||
struct cpu_map *cpus = NULL;
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct perf_evsel *evsel = NULL;
|
||||
struct perf_thread_map *threads = NULL;
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel = NULL;
|
||||
int found, err = -1;
|
||||
const char *comm;
|
||||
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
|
||||
cpus = cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
CHECK_NOT_NULL__(cpus);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
CHECK_NOT_NULL__(evlist);
|
||||
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
|
||||
CHECK__(parse_events(evlist, "dummy:u", NULL));
|
||||
CHECK__(parse_events(evlist, "cycles:u", NULL));
|
||||
@@ -90,11 +92,11 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
|
||||
evsel->attr.comm = 1;
|
||||
evsel->attr.disabled = 1;
|
||||
evsel->attr.enable_on_exec = 0;
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
evsel->core.attr.enable_on_exec = 0;
|
||||
|
||||
if (perf_evlist__open(evlist) < 0) {
|
||||
if (evlist__open(evlist) < 0) {
|
||||
pr_debug("Unable to open dummy and cycles event\n");
|
||||
err = TEST_SKIP;
|
||||
goto out_err;
|
||||
@@ -107,12 +109,12 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
||||
* enabled.
|
||||
*/
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
comm = "Test COMM 1";
|
||||
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
|
||||
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
found = find_comm(evlist, comm);
|
||||
if (found != 1) {
|
||||
@@ -125,16 +127,16 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
||||
* disabled with the dummy event still enabled.
|
||||
*/
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
|
||||
CHECK__(perf_evsel__disable(evsel));
|
||||
CHECK__(evsel__disable(evsel));
|
||||
|
||||
comm = "Test COMM 2";
|
||||
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
|
||||
|
||||
perf_evlist__disable(evlist);
|
||||
evlist__disable(evlist);
|
||||
|
||||
found = find_comm(evlist, comm);
|
||||
if (found != 1) {
|
||||
@@ -146,11 +148,11 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
||||
|
||||
out_err:
|
||||
if (evlist) {
|
||||
perf_evlist__disable(evlist);
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__disable(evlist);
|
||||
evlist__delete(evlist);
|
||||
} else {
|
||||
cpu_map__put(cpus);
|
||||
thread_map__put(threads);
|
||||
perf_cpu_map__put(cpus);
|
||||
perf_thread_map__put(threads);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include "cpumap.h"
|
||||
#include "mem2node.h"
|
||||
#include "tests.h"
|
||||
@@ -19,7 +20,7 @@ static struct node {
|
||||
|
||||
static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
{
|
||||
struct cpu_map *map = cpu_map__new(str);
|
||||
struct perf_cpu_map *map = perf_cpu_map__new(str);
|
||||
unsigned long *bm = NULL;
|
||||
int i;
|
||||
|
||||
@@ -32,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
}
|
||||
|
||||
if (map)
|
||||
cpu_map__put(map);
|
||||
perf_cpu_map__put(map);
|
||||
else
|
||||
free(bm);
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <inttypes.h>
|
||||
/* For the CLR_() macros */
|
||||
#include <pthread.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
@@ -11,6 +12,7 @@
|
||||
#include "tests.h"
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <perf/evlist.h>
|
||||
|
||||
/*
|
||||
* This test will generate random numbers of calls to some getpid syscalls,
|
||||
@@ -27,16 +29,16 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
{
|
||||
int err = -1;
|
||||
union perf_event *event;
|
||||
struct thread_map *threads;
|
||||
struct cpu_map *cpus;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct evlist *evlist;
|
||||
cpu_set_t cpu_set;
|
||||
const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
|
||||
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
|
||||
#define nsyscalls ARRAY_SIZE(syscall_names)
|
||||
unsigned int nr_events[nsyscalls],
|
||||
expected_nr_events[nsyscalls], i, j;
|
||||
struct perf_evsel *evsels[nsyscalls], *evsel;
|
||||
struct evsel *evsels[nsyscalls], *evsel;
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
struct perf_mmap *md;
|
||||
|
||||
@@ -46,7 +48,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpus = cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
if (cpus == NULL) {
|
||||
pr_debug("cpu_map__new\n");
|
||||
goto out_free_threads;
|
||||
@@ -61,13 +63,13 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
goto out_free_cpus;
|
||||
}
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
evlist = evlist__new();
|
||||
if (evlist == NULL) {
|
||||
pr_debug("perf_evlist__new\n");
|
||||
goto out_free_cpus;
|
||||
}
|
||||
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
|
||||
for (i = 0; i < nsyscalls; ++i) {
|
||||
char name[64];
|
||||
@@ -79,12 +81,12 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
evsels[i]->attr.wakeup_events = 1;
|
||||
evsels[i]->core.attr.wakeup_events = 1;
|
||||
perf_evsel__set_sample_id(evsels[i], false);
|
||||
|
||||
perf_evlist__add(evlist, evsels[i]);
|
||||
evlist__add(evlist, evsels[i]);
|
||||
|
||||
if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
|
||||
if (evsel__open(evsels[i], cpus, threads) < 0) {
|
||||
pr_debug("failed to open counter: %s, "
|
||||
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -151,12 +153,12 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
}
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
cpus = NULL;
|
||||
threads = NULL;
|
||||
out_free_cpus:
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
out_free_threads:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ static int synth_all(struct machine *machine)
|
||||
|
||||
static int synth_process(struct machine *machine)
|
||||
{
|
||||
struct thread_map *map;
|
||||
struct perf_thread_map *map;
|
||||
int err;
|
||||
|
||||
map = thread_map__new_by_pid(getpid());
|
||||
@@ -147,7 +147,7 @@ static int synth_process(struct machine *machine)
|
||||
perf_event__process,
|
||||
machine, 0);
|
||||
|
||||
thread_map__put(map);
|
||||
perf_thread_map__put(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,11 @@
|
||||
int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
int err = -1, fd, cpu;
|
||||
struct cpu_map *cpus;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct evsel *evsel;
|
||||
unsigned int nr_openat_calls = 111, i;
|
||||
cpu_set_t cpu_set;
|
||||
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
char errbuf[BUFSIZ];
|
||||
|
||||
@@ -33,7 +33,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpus = cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
if (cpus == NULL) {
|
||||
pr_debug("cpu_map__new\n");
|
||||
goto out_thread_map_delete;
|
||||
@@ -48,7 +48,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
|
||||
goto out_cpu_map_delete;
|
||||
}
|
||||
|
||||
if (perf_evsel__open(evsel, cpus, threads) < 0) {
|
||||
if (evsel__open(evsel, cpus, threads) < 0) {
|
||||
pr_debug("failed to open counter: %s, "
|
||||
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -116,12 +116,12 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
|
||||
|
||||
perf_evsel__free_counts(evsel);
|
||||
out_close_fd:
|
||||
perf_evsel__close_fd(evsel);
|
||||
perf_evsel__close_fd(&evsel->core);
|
||||
out_evsel_delete:
|
||||
perf_evsel__delete(evsel);
|
||||
evsel__delete(evsel);
|
||||
out_cpu_map_delete:
|
||||
cpu_map__put(cpus);
|
||||
perf_cpu_map__put(cpus);
|
||||
out_thread_map_delete:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
};
|
||||
const char *filename = "/etc/passwd";
|
||||
int flags = O_RDONLY | O_DIRECTORY;
|
||||
struct perf_evlist *evlist = perf_evlist__new();
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = evlist__new();
|
||||
struct evsel *evsel;
|
||||
int err = -1, i, nr_events = 0, nr_polls = 0;
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
|
||||
@@ -48,7 +48,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
evlist__add(evlist, evsel);
|
||||
|
||||
err = perf_evlist__create_maps(evlist, &opts.target);
|
||||
if (err < 0) {
|
||||
@@ -58,9 +58,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
|
||||
perf_evsel__config(evsel, &opts, NULL);
|
||||
|
||||
thread_map__set_pid(evlist->threads, 0, getpid());
|
||||
perf_thread_map__set_pid(evlist->core.threads, 0, getpid());
|
||||
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__open: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -74,7 +74,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
/*
|
||||
* Generate the event:
|
||||
@@ -134,7 +134,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
out_ok:
|
||||
err = 0;
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
{
|
||||
int err = -1, fd;
|
||||
struct perf_evsel *evsel;
|
||||
struct evsel *evsel;
|
||||
unsigned int nr_openat_calls = 111, i;
|
||||
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
char errbuf[BUFSIZ];
|
||||
|
||||
@@ -57,10 +57,10 @@ int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __m
|
||||
|
||||
err = 0;
|
||||
out_close_fd:
|
||||
perf_evsel__close_fd(evsel);
|
||||
perf_evsel__close_fd(&evsel->core);
|
||||
out_evsel_delete:
|
||||
perf_evsel__delete(evsel);
|
||||
evsel__delete(evsel);
|
||||
out_thread_map_delete:
|
||||
thread_map__put(threads);
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
|
||||
static int process_event(struct perf_evlist **pevlist, union perf_event *event)
|
||||
static int process_event(struct evlist **pevlist, union perf_event *event)
|
||||
{
|
||||
struct perf_sample sample;
|
||||
|
||||
@@ -39,14 +39,14 @@ static int process_event(struct perf_evlist **pevlist, union perf_event *event)
|
||||
|
||||
static int process_events(union perf_event **events, size_t count)
|
||||
{
|
||||
struct perf_evlist *evlist = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
int err = 0;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < count && !err; i++)
|
||||
err = process_event(&evlist, events[i]);
|
||||
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -50,8 +50,8 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
};
|
||||
cpu_set_t cpu_mask;
|
||||
size_t cpu_mask_size = sizeof(cpu_mask);
|
||||
struct perf_evlist *evlist = perf_evlist__new_dummy();
|
||||
struct perf_evsel *evsel;
|
||||
struct evlist *evlist = perf_evlist__new_dummy();
|
||||
struct evsel *evsel;
|
||||
struct perf_sample sample;
|
||||
const char *cmd = "sleep";
|
||||
const char *argv[] = { cmd, "1", NULL, };
|
||||
@@ -130,7 +130,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
* Call sys_perf_event_open on all the fds on all the evsels,
|
||||
* grouping them if asked to.
|
||||
*/
|
||||
err = perf_evlist__open(evlist);
|
||||
err = evlist__open(evlist);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__open: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
@@ -153,7 +153,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
* Now that all is properly set up, enable the events, they will
|
||||
* count just on workload.pid, which will start...
|
||||
*/
|
||||
perf_evlist__enable(evlist);
|
||||
evlist__enable(evlist);
|
||||
|
||||
/*
|
||||
* Now!
|
||||
@@ -325,7 +325,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
++errs;
|
||||
}
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
evlist__delete(evlist);
|
||||
out:
|
||||
return (err < 0 || errs > 0) ? -1 : 0;
|
||||
}
|
||||
|
||||
@@ -153,11 +153,13 @@ static bool samples_same(const struct perf_sample *s1,
|
||||
|
||||
static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
|
||||
{
|
||||
struct perf_evsel evsel = {
|
||||
struct evsel evsel = {
|
||||
.needs_swap = false,
|
||||
.attr = {
|
||||
.sample_type = sample_type,
|
||||
.read_format = read_format,
|
||||
.core = {
|
||||
. attr = {
|
||||
.sample_type = sample_type,
|
||||
.read_format = read_format,
|
||||
},
|
||||
},
|
||||
};
|
||||
union perf_event *event;
|
||||
@@ -221,10 +223,10 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
|
||||
int err, ret = -1;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_USER)
|
||||
evsel.attr.sample_regs_user = sample_regs;
|
||||
evsel.core.attr.sample_regs_user = sample_regs;
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR)
|
||||
evsel.attr.sample_regs_intr = sample_regs;
|
||||
evsel.core.attr.sample_regs_intr = sample_regs;
|
||||
|
||||
for (i = 0; i < sizeof(regs); i++)
|
||||
*(i + (u8 *)regs) = i & 0xfe;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user