drivers/perf: apple_m1: Refactor event select/filter configuration

Supporting guest mode events will necessitate programming two event
filters. Prepare by splitting up the programming of the event selector +
event filter into separate headers.

Opportunistically replace RMW patterns with sysreg_clear_set_s().

Tested-by: Janne Grunau <j@jannau.net>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250305202641.428114-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton
2025-03-05 12:26:28 -08:00
parent 0ad2507d5d
commit 75ecffc361

View File

@@ -327,11 +327,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
static void m1_pmu_configure_counter(unsigned int index, u8 event,
bool user, bool kernel)
static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
bool kernel)
{
u64 val, user_bit, kernel_bit;
int shift;
u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -346,19 +345,24 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
clear = set = 0;
if (user)
val |= user_bit;
set |= user_bit;
else
val &= ~user_bit;
clear |= user_bit;
if (kernel)
val |= kernel_bit;
set |= kernel_bit;
else
val &= ~kernel_bit;
clear |= kernel_bit;
write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
}
static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
{
u64 clear = 0, set = 0;
int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -371,21 +375,29 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
val &= ~((u64)0xff << shift);
val |= (u64)event << shift;
write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
clear |= (u64)0xff << shift;
set |= (u64)event << shift;
sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
val &= ~((u64)0xff << shift);
val |= (u64)event << shift;
write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
clear |= (u64)0xff << shift;
set |= (u64)event << shift;
sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
{
bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
bool user = config_base & M1_PMU_CFG_COUNT_USER;
u8 evt = config_base & M1_PMU_CFG_EVENT;
__m1_pmu_configure_event_filter(index, user, kernel);
__m1_pmu_configure_eventsel(index, evt);
}
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -400,7 +412,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
m1_pmu_disable_counter(event->hw.idx);
isb();
m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();