KVM: arm64: coresight: Give TRBE enabled state to KVM

Currently in nVHE, KVM has to check if TRBE is enabled on every guest
switch even if it was never used. Because it's a debug feature and is
more likely to not be used than used, give KVM the TRBE buffer status to
allow a much simpler and faster do-nothing path in the hyp.

Protected mode now disables trace regardless of TRBE (because
trfcr_while_in_guest is always 0), which was not previously done.
However, it continues to flush whenever the buffer is enabled
regardless of the filter status. This avoids the hypothetical case of a
host that had disabled the filter but not flushed which would arise if
only doing the flush when the filter was enabled.

Signed-off-by: James Clark <james.clark@linaro.org>
Link: https://lore.kernel.org/r/20250106142446.628923-6-james.clark@linaro.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
James Clark
2025-01-06 14:24:40 +00:00
committed by Marc Zyngier
parent a2b579c41f
commit a665e3bc88
4 changed files with 80 additions and 29 deletions

View File

@@ -614,6 +614,8 @@ struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2
#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
unsigned long flags;
struct kvm_cpu_context host_ctxt;
@@ -659,6 +661,9 @@ struct kvm_host_data {
u64 mdcr_el2;
} host_debug_state;
/* Guest trace filter value */
u64 trfcr_while_in_guest;
/* Number of programmable event counters (PMCR_EL0.N) for this CPU */
unsigned int nr_event_counters;
@@ -1381,6 +1386,8 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
void kvm_enable_trbe(void);
void kvm_disable_trbe(void);
#else
static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u64 clr) {}
@@ -1388,6 +1395,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
{
return false;
}
static inline void kvm_enable_trbe(void) {}
static inline void kvm_disable_trbe(void) {}
#endif
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);

View File

@@ -81,9 +81,15 @@ void kvm_init_host_debug_data(void)
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
host_data_set_flag(HAS_SPE);
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
host_data_set_flag(HAS_TRBE);
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT)) {
/* Force disable trace in protected mode in case of no TRBE */
if (is_protected_kvm_enabled())
host_data_set_flag(EL1_TRACING_CONFIGURED);
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
host_data_set_flag(HAS_TRBE);
}
}
/*
@@ -219,3 +225,23 @@ void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
}
void kvm_enable_trbe(void)
{
if (has_vhe() || is_protected_kvm_enabled() ||
WARN_ON_ONCE(preemptible()))
return;
host_data_set_flag(TRBE_ENABLED);
}
EXPORT_SYMBOL_GPL(kvm_enable_trbe);
void kvm_disable_trbe(void)
{
if (has_vhe() || is_protected_kvm_enabled() ||
WARN_ON_ONCE(preemptible()))
return;
host_data_clear_flag(TRBE_ENABLED);
}
EXPORT_SYMBOL_GPL(kvm_disable_trbe);

View File

@@ -51,32 +51,45 @@ static void __debug_restore_spe(u64 pmscr_el1)
write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
static void __trace_do_switch(u64 *saved_trfcr, u64 new_trfcr)
{
*trfcr_el1 = 0;
/* Check if the TRBE is enabled */
if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
return;
/*
* Prohibit trace generation while we are in guest.
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
*trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
*saved_trfcr = read_sysreg_el1(SYS_TRFCR);
write_sysreg_el1(new_trfcr, SYS_TRFCR);
}
static void __debug_restore_trace(u64 trfcr_el1)
static bool __trace_needs_drain(void)
{
if (!trfcr_el1)
return;
if (is_protected_kvm_enabled() && host_data_test_flag(HAS_TRBE))
return read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E;
/* Restore trace filter controls */
write_sysreg_el1(trfcr_el1, SYS_TRFCR);
return host_data_test_flag(TRBE_ENABLED);
}
static bool __trace_needs_switch(void)
{
return host_data_test_flag(TRBE_ENABLED) ||
host_data_test_flag(EL1_TRACING_CONFIGURED);
}
static void __trace_switch_to_guest(void)
{
/* Unsupported with TRBE so disable */
if (host_data_test_flag(TRBE_ENABLED))
*host_data_ptr(trfcr_while_in_guest) = 0;
__trace_do_switch(host_data_ptr(host_debug_state.trfcr_el1),
*host_data_ptr(trfcr_while_in_guest));
if (__trace_needs_drain()) {
isb();
tsb_csync();
}
}
static void __trace_switch_to_host(void)
{
__trace_do_switch(host_data_ptr(trfcr_while_in_guest),
*host_data_ptr(host_debug_state.trfcr_el1));
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
@@ -84,9 +97,9 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
/* Disable and flush SPE data generation */
if (host_data_test_flag(HAS_SPE))
__debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
/* Disable and flush Self-Hosted Trace generation */
if (host_data_test_flag(HAS_TRBE))
__debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1));
if (__trace_needs_switch())
__trace_switch_to_guest();
}
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
@@ -98,8 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
if (host_data_test_flag(HAS_SPE))
__debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
if (host_data_test_flag(HAS_TRBE))
__debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1));
if (__trace_needs_switch())
__trace_switch_to_host();
}
void __debug_switch_to_host(struct kvm_vcpu *vcpu)

View File

@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include "coresight-self-hosted-trace.h"
@@ -221,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
*/
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
kvm_enable_trbe();
/* Synchronize the TRBE enable event */
isb();
@@ -239,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
*/
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
kvm_disable_trbe();
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();