mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
libperf evsel: Rename own_cpus to pmu_cpus
own_cpus is generally the cpumask from the PMU. Rename to pmu_cpus to try to make this clearer. Variable rename with no other changes. Reviewed-by: Thomas Falcon <thomas.falcon@intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: James Clark <james.clark@linaro.org> Link: https://lore.kernel.org/r/20250719030517.1990983-7-irogers@google.com Signed-off-by: Namhyung Kim <namhyung@kernel.org>
This commit is contained in:
@@ -46,7 +46,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
* are valid by intersecting with those of the PMU.
|
||||
*/
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
|
||||
evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->pmu_cpus);
|
||||
|
||||
/*
|
||||
* Empty cpu lists would eventually get opened as "any" so remove
|
||||
@@ -61,7 +61,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
list_for_each_entry_from(next, &evlist->entries, node)
|
||||
next->idx--;
|
||||
}
|
||||
} else if (!evsel->own_cpus || evlist->has_user_cpus ||
|
||||
} else if (!evsel->pmu_cpus || evlist->has_user_cpus ||
|
||||
(!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
|
||||
/*
|
||||
* The PMU didn't specify a default cpu map, this isn't a core
|
||||
@@ -72,13 +72,13 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
*/
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
|
||||
} else if (evsel->cpus != evsel->own_cpus) {
|
||||
} else if (evsel->cpus != evsel->pmu_cpus) {
|
||||
/*
|
||||
* No user requested cpu map but the PMU cpu map doesn't match
|
||||
* the evsel's. Reset it back to the PMU cpu map.
|
||||
*/
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
|
||||
evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
|
||||
}
|
||||
|
||||
if (evsel->system_wide) {
|
||||
|
||||
@@ -46,7 +46,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
|
||||
assert(evsel->mmap == NULL); /* If not munmap wasn't called. */
|
||||
assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
perf_cpu_map__put(evsel->own_cpus);
|
||||
perf_cpu_map__put(evsel->pmu_cpus);
|
||||
perf_thread_map__put(evsel->threads);
|
||||
free(evsel);
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ struct perf_evsel {
|
||||
* cpu map for opening the event on, for example, the first CPU on a
|
||||
* socket for an uncore event.
|
||||
*/
|
||||
struct perf_cpu_map *own_cpus;
|
||||
struct perf_cpu_map *pmu_cpus;
|
||||
struct perf_thread_map *threads;
|
||||
struct xyarray *fd;
|
||||
struct xyarray *mmap;
|
||||
|
||||
@@ -109,8 +109,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
|
||||
TEST_ASSERT_VAL("failed to synthesize attr update name",
|
||||
!perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
|
||||
|
||||
perf_cpu_map__put(evsel->core.own_cpus);
|
||||
evsel->core.own_cpus = perf_cpu_map__new("1,2,3");
|
||||
perf_cpu_map__put(evsel->core.pmu_cpus);
|
||||
evsel->core.pmu_cpus = perf_cpu_map__new("1,2,3");
|
||||
|
||||
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
|
||||
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
|
||||
|
||||
@@ -488,7 +488,7 @@ struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
|
||||
return NULL;
|
||||
|
||||
evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
|
||||
evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
|
||||
evsel->core.pmu_cpus = perf_cpu_map__get(orig->core.pmu_cpus);
|
||||
evsel->core.threads = perf_thread_map__get(orig->core.threads);
|
||||
evsel->core.nr_members = orig->core.nr_members;
|
||||
evsel->core.system_wide = orig->core.system_wide;
|
||||
@@ -1527,7 +1527,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
|
||||
attr->exclude_user = 1;
|
||||
}
|
||||
|
||||
if (evsel->core.own_cpus || evsel->unit)
|
||||
if (evsel->core.pmu_cpus || evsel->unit)
|
||||
evsel->core.attr.read_format |= PERF_FORMAT_ID;
|
||||
|
||||
/*
|
||||
@@ -1680,7 +1680,7 @@ void evsel__exit(struct evsel *evsel)
|
||||
evsel__free_config_terms(evsel);
|
||||
cgroup__put(evsel->cgrp);
|
||||
perf_cpu_map__put(evsel->core.cpus);
|
||||
perf_cpu_map__put(evsel->core.own_cpus);
|
||||
perf_cpu_map__put(evsel->core.pmu_cpus);
|
||||
perf_thread_map__put(evsel->core.threads);
|
||||
zfree(&evsel->group_name);
|
||||
zfree(&evsel->name);
|
||||
|
||||
@@ -4507,8 +4507,8 @@ int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused
|
||||
case PERF_EVENT_UPDATE__CPUS:
|
||||
map = cpu_map__new_data(&ev->cpus.cpus);
|
||||
if (map) {
|
||||
perf_cpu_map__put(evsel->core.own_cpus);
|
||||
evsel->core.own_cpus = map;
|
||||
perf_cpu_map__put(evsel->core.pmu_cpus);
|
||||
evsel->core.pmu_cpus = map;
|
||||
} else
|
||||
pr_err("failed to get event_update cpus\n");
|
||||
default:
|
||||
|
||||
@@ -320,7 +320,7 @@ __add_event(struct list_head *list, int *idx,
|
||||
|
||||
(*idx)++;
|
||||
evsel->core.cpus = cpus;
|
||||
evsel->core.own_cpus = perf_cpu_map__get(cpus);
|
||||
evsel->core.pmu_cpus = perf_cpu_map__get(cpus);
|
||||
evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
|
||||
evsel->core.is_pmu_core = is_pmu_core;
|
||||
evsel->pmu = pmu;
|
||||
|
||||
@@ -2045,7 +2045,7 @@ int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struc
|
||||
int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
|
||||
perf_event__handler_t process)
|
||||
{
|
||||
struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
|
||||
struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
|
||||
struct perf_record_event_update *ev;
|
||||
int err;
|
||||
|
||||
@@ -2126,7 +2126,7 @@ int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlis
|
||||
}
|
||||
}
|
||||
|
||||
if (evsel->core.own_cpus) {
|
||||
if (evsel->core.pmu_cpus) {
|
||||
err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
|
||||
if (err < 0) {
|
||||
pr_err("Couldn't synthesize evsel cpus.\n");
|
||||
|
||||
@@ -357,10 +357,10 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, struct evsel *evsel, u64 *resu
|
||||
/*
|
||||
* "Any CPU" event that can be scheduled on any CPU in
|
||||
* the PMU's cpumask. The PMU cpumask should be saved in
|
||||
* own_cpus. If not present fall back to max.
|
||||
* pmu_cpus. If not present fall back to max.
|
||||
*/
|
||||
if (!perf_cpu_map__is_empty(evsel->core.own_cpus))
|
||||
*result = perf_cpu_map__nr(evsel->core.own_cpus);
|
||||
if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus))
|
||||
*result = perf_cpu_map__nr(evsel->core.pmu_cpus);
|
||||
else
|
||||
*result = cpu__max_present_cpu().cpu;
|
||||
}
|
||||
@@ -386,12 +386,12 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, struct evsel *evsel, u64 *resu
|
||||
/*
|
||||
* "Any CPU" event that can be scheduled on any CPU in
|
||||
* the PMU's cpumask. The PMU cpumask should be saved in
|
||||
* own_cpus, if not present then just the online cpu
|
||||
* pmu_cpus, if not present then just the online cpu
|
||||
* mask.
|
||||
*/
|
||||
if (!perf_cpu_map__is_empty(evsel->core.own_cpus)) {
|
||||
if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) {
|
||||
struct perf_cpu_map *tmp =
|
||||
perf_cpu_map__intersect(online, evsel->core.own_cpus);
|
||||
perf_cpu_map__intersect(online, evsel->core.pmu_cpus);
|
||||
|
||||
*result = perf_cpu_map__nr(tmp);
|
||||
perf_cpu_map__put(tmp);
|
||||
|
||||
Reference in New Issue
Block a user