mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 13:30:45 -05:00
Merge tag 'kvmarm-fixes-6.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.18, take #1 Improvements and bug fixes: - Fix the handling of ZCR_EL2 in NV VMs (20250926194108.84093-1-oliver.upton@linux.dev) - Pick the correct translation regime when doing a PTW on the back of a SEA (20250926224246.731748-1-oliver.upton@linux.dev) - Prevent userspace from injecting an event into a vcpu that isn't initialised yet (20250930085237.108326-1-oliver.upton@linux.dev) - Move timer save/restore to the sysreg handling code, fixing EL2 timer access in the process (20250929160458.3351788-1-maz@kernel.org) - Add FGT-based trapping of MDSCR_EL1 to reduce the overhead of debug (20250924235150.617451-1-oliver.upton@linux.dev) - Fix trapping configuration when the host isn't GICv3 (20251007160704.1673584-1-sascha.bischoff@arm.com) - Improve the detection of HCR_EL2.E2H being RES1 (20251009121239.29370-1-maz@kernel.org) - Drop a spurious 'break' statement in the S1 PTW (20250930135621.162050-1-osama.abdelkader@gmail.com) - Don't try to access SPE when owned by EL3 (20251010174707.1684200-1-mukesh.ojha@oss.qualcomm.com) Documentation updates: - Document the failure modes of event injection (20250930233620.124607-1-oliver.upton@linux.dev) - Document that a GICv3 guest can be created on a GICv5 host with FEAT_GCIE_LEGACY (20251007154848.1640444-1-sascha.bischoff@arm.com) Selftest improvements: - Add a selftest for the effective value of HCR_EL2.AMO (20250926224454.734066-1-oliver.upton@linux.dev) - Address build warning in the timer selftest when building with clang (20250926155838.2612205-1-seanjc@google.com) - Teach irq_fd selftests about non-x86 architectures (20250930193301.119859-1-oliver.upton@linux.dev) - Add missing sysregs to the set_id_regs selftest (20251012154352.61133-1-zenghui.yu@linux.dev) - Fix vcpu allocation in the vgic_lpi_stress selftest (20251008154520.54801-1-zenghui.yu@linux.dev) - Correctly enable interrupts in the vgic_lpi_stress selftest (20251007195254.260539-1-oliver.upton@linux.dev)
This commit is contained in:
@@ -1229,6 +1229,9 @@ It is not possible to read back a pending external abort (injected via
|
||||
KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered
|
||||
directly to the virtual CPU).
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
::
|
||||
|
||||
struct kvm_vcpu_events {
|
||||
@@ -1309,6 +1312,8 @@ exceptions by manipulating individual registers using the KVM_SET_ONE_REG API.
|
||||
|
||||
See KVM_GET_VCPU_EVENTS for the data structure.
|
||||
|
||||
Calling this ioctl on a vCPU that hasn't been initialized will return
|
||||
-ENOEXEC.
|
||||
|
||||
4.33 KVM_GET_DEBUGREGS
|
||||
----------------------
|
||||
|
||||
@@ -13,7 +13,8 @@ will act as the VM interrupt controller, requiring emulated user-space devices
|
||||
to inject interrupts to the VGIC instead of directly to CPUs. It is not
|
||||
possible to create both a GICv3 and GICv2 on the same VM.
|
||||
|
||||
Creating a guest GICv3 device requires a host GICv3 as well.
|
||||
Creating a guest GICv3 device requires a host GICv3 host, or a GICv5 host with
|
||||
support for FEAT_GCIE_LEGACY.
|
||||
|
||||
|
||||
Groups:
|
||||
|
||||
@@ -24,22 +24,48 @@
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
|
||||
* can reset into an UNKNOWN state and might not read as 1 until it has
|
||||
* been initialized explicitly.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*
|
||||
* Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
|
||||
* indicating whether the CPU is running in E2H mode.
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
cmp x1, #0
|
||||
b.ge .LnVHE_\@
|
||||
b.lt .LnE2H0_\@
|
||||
|
||||
/*
|
||||
* Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised
|
||||
* as such via ID_AA64MMFR4_EL1.E2H0:
|
||||
*
|
||||
* - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to
|
||||
* have HCR_EL2.E2H implemented as RAO/WI.
|
||||
*
|
||||
* - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest
|
||||
* reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV
|
||||
* guests on these hosts can write to HCR_EL2.E2H without
|
||||
* trapping to the hypervisor, but these writes have no
|
||||
* functional effect.
|
||||
*
|
||||
* Handle both cases by checking for an essential VHE property
|
||||
* (system register remapping) to decide whether we're
|
||||
* effectively VHE-only or not.
|
||||
*/
|
||||
msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE
|
||||
isb
|
||||
mov x1, #1 // Write something to FAR_EL1
|
||||
msr far_el1, x1
|
||||
isb
|
||||
mov x1, #2 // Try to overwrite it via FAR_EL2
|
||||
msr far_el2, x1
|
||||
isb
|
||||
mrs x1, far_el1 // If we see the latest write in FAR_EL1,
|
||||
cmp x1, #2 // we can safely assume we are VHE only.
|
||||
b.ne .LnVHE_\@ // Otherwise, we know that nVHE works.
|
||||
|
||||
.LnE2H0_\@:
|
||||
orr x0, x0, #HCR_E2H
|
||||
.LnVHE_\@:
|
||||
msr_hcr_el2 x0
|
||||
isb
|
||||
.LnVHE_\@:
|
||||
.endm
|
||||
|
||||
.macro __init_el2_sctlr
|
||||
|
||||
@@ -816,6 +816,11 @@ struct kvm_vcpu_arch {
|
||||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
|
||||
struct {
|
||||
u64 r;
|
||||
u64 w;
|
||||
} fgt[__NR_FGT_GROUP_IDS__];
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
@@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void)
|
||||
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
|
||||
void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
|
||||
void check_feature_map(void);
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
|
||||
|
||||
static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
case HFGWTR_EL2:
|
||||
return HFGRTR_GROUP;
|
||||
case HFGITR_EL2:
|
||||
return HFGITR_GROUP;
|
||||
case HDFGRTR_EL2:
|
||||
case HDFGWTR_EL2:
|
||||
return HDFGRTR_GROUP;
|
||||
case HAFGRTR_EL2:
|
||||
return HAFGRTR_GROUP;
|
||||
case HFGRTR2_EL2:
|
||||
case HFGWTR2_EL2:
|
||||
return HFGRTR2_GROUP;
|
||||
case HFGITR2_EL2:
|
||||
return HFGITR2_GROUP;
|
||||
case HDFGRTR2_EL2:
|
||||
case HDFGWTR2_EL2:
|
||||
return HDFGRTR2_GROUP;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
#define vcpu_fgt(vcpu, reg) \
|
||||
({ \
|
||||
enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
|
||||
u64 *p; \
|
||||
switch (reg) { \
|
||||
case HFGWTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
p = &(vcpu)->arch.fgt[id].w; \
|
||||
break; \
|
||||
default: \
|
||||
p = &(vcpu)->arch.fgt[id].r; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
p; \
|
||||
})
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
||||
@@ -66,7 +66,7 @@ static int nr_timers(struct kvm_vcpu *vcpu)
|
||||
|
||||
u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -85,7 +85,7 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt)
|
||||
|
||||
u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -104,7 +104,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
|
||||
|
||||
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -126,7 +126,7 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
|
||||
|
||||
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt);
|
||||
|
||||
switch(arch_timer_ctx_index(ctxt)) {
|
||||
case TIMER_VTIMER:
|
||||
@@ -146,16 +146,6 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
|
||||
}
|
||||
}
|
||||
|
||||
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
u64 kvm_phys_timer_read(void)
|
||||
{
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
@@ -343,7 +333,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
|
||||
u64 ns;
|
||||
|
||||
ctx = container_of(hrt, struct arch_timer_context, hrtimer);
|
||||
vcpu = ctx->vcpu;
|
||||
vcpu = timer_context_to_vcpu(ctx);
|
||||
|
||||
trace_kvm_timer_hrtimer_expire(ctx);
|
||||
|
||||
@@ -436,8 +426,9 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
|
||||
*
|
||||
* But hey, it's fast, right?
|
||||
*/
|
||||
if (is_hyp_ctxt(ctx->vcpu) &&
|
||||
(ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) {
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
if (is_hyp_ctxt(vcpu) &&
|
||||
(ctx == vcpu_vtimer(vcpu) || ctx == vcpu_ptimer(vcpu))) {
|
||||
unsigned long val = timer_get_ctl(ctx);
|
||||
__assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level);
|
||||
timer_set_ctl(ctx, val);
|
||||
@@ -470,7 +461,7 @@ static void timer_emulate(struct arch_timer_context *ctx)
|
||||
trace_kvm_timer_emulate(ctx, should_fire);
|
||||
|
||||
if (should_fire != ctx->irq.level)
|
||||
kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
|
||||
kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx);
|
||||
|
||||
kvm_timer_update_status(ctx, should_fire);
|
||||
|
||||
@@ -498,7 +489,7 @@ static void set_cntpoff(u64 cntpoff)
|
||||
|
||||
static void timer_save_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
@@ -609,7 +600,7 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void timer_restore_state(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
|
||||
struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx));
|
||||
enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
|
||||
unsigned long flags;
|
||||
|
||||
@@ -668,7 +659,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo
|
||||
|
||||
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctx->vcpu;
|
||||
struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx);
|
||||
bool phys_active = false;
|
||||
|
||||
/*
|
||||
@@ -677,7 +668,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
|
||||
* this point and the register restoration, we'll take the
|
||||
* interrupt anyway.
|
||||
*/
|
||||
kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx);
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm))
|
||||
phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
|
||||
@@ -1063,7 +1054,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
|
||||
struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
ctxt->vcpu = vcpu;
|
||||
ctxt->timer_id = timerid;
|
||||
|
||||
if (timerid == TIMER_VTIMER)
|
||||
ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
|
||||
@@ -1121,49 +1112,6 @@ void kvm_timer_cpu_down(void)
|
||||
disable_percpu_irq(host_ptimer_irq);
|
||||
}
|
||||
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
{
|
||||
struct arch_timer_context *timer;
|
||||
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
timer = vcpu_vtimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
|
||||
&vcpu->kvm->arch.flags)) {
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
timer_set_offset(timer, kvm_phys_timer_read() - value);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
timer = vcpu_ptimer(vcpu);
|
||||
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 read_timer_ctl(struct arch_timer_context *timer)
|
||||
{
|
||||
/*
|
||||
@@ -1180,31 +1128,6 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
|
||||
return ctl;
|
||||
}
|
||||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
||||
{
|
||||
switch (regid) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CTL);
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CNT);
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return kvm_arm_timer_read(vcpu,
|
||||
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
|
||||
}
|
||||
return (u64)-1;
|
||||
}
|
||||
|
||||
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
|
||||
struct arch_timer_context *timer,
|
||||
enum kvm_arch_timer_regs treg)
|
||||
|
||||
@@ -642,6 +642,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
kvm_vcpu_load_fgt(vcpu);
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
||||
@@ -1794,6 +1795,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
case KVM_GET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (kvm_arm_vcpu_get_events(vcpu, &events))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1805,6 +1809,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
case KVM_SET_VCPU_EVENTS: {
|
||||
struct kvm_vcpu_events events;
|
||||
|
||||
if (!kvm_vcpu_initialized(vcpu))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (copy_from_user(&events, argp, sizeof(events)))
|
||||
return -EFAULT;
|
||||
|
||||
|
||||
@@ -91,7 +91,6 @@ static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 o
|
||||
case OP_AT_S1E2W:
|
||||
case OP_AT_S1E2A:
|
||||
return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
break;
|
||||
default:
|
||||
return (vcpu_el2_e2h_is_set(vcpu) &&
|
||||
vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10;
|
||||
@@ -1602,13 +1601,17 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
|
||||
.fn = match_s1_desc,
|
||||
.priv = &dm,
|
||||
},
|
||||
.regime = TR_EL10,
|
||||
.as_el0 = false,
|
||||
.pan = false,
|
||||
};
|
||||
struct s1_walk_result wr = {};
|
||||
int ret;
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
wi.regime = vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
|
||||
else
|
||||
wi.regime = TR_EL10;
|
||||
|
||||
ret = setup_s1_walk(vcpu, &wi, &wr, va);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
@@ -1428,3 +1430,91 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
case HFGRTR_EL2:
|
||||
return &hfgrtr_masks;
|
||||
case HFGWTR_EL2:
|
||||
return &hfgwtr_masks;
|
||||
case HFGITR_EL2:
|
||||
return &hfgitr_masks;
|
||||
case HDFGRTR_EL2:
|
||||
return &hdfgrtr_masks;
|
||||
case HDFGWTR_EL2:
|
||||
return &hdfgwtr_masks;
|
||||
case HAFGRTR_EL2:
|
||||
return &hafgrtr_masks;
|
||||
case HFGRTR2_EL2:
|
||||
return &hfgrtr2_masks;
|
||||
case HFGWTR2_EL2:
|
||||
return &hfgwtr2_masks;
|
||||
case HFGITR2_EL2:
|
||||
return &hfgitr2_masks;
|
||||
case HDFGRTR2_EL2:
|
||||
return &hdfgrtr2_masks;
|
||||
case HDFGWTR2_EL2:
|
||||
return &hdfgwtr2_masks;
|
||||
default:
|
||||
BUILD_BUG_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
|
||||
{
|
||||
u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)];
|
||||
struct fgt_masks *m = __fgt_reg_to_masks(reg);
|
||||
u64 clear = 0, set = 0, val = m->nmask;
|
||||
|
||||
set |= fgu & m->mask;
|
||||
clear |= fgu & m->nmask;
|
||||
|
||||
if (is_nested_ctxt(vcpu)) {
|
||||
u64 nested = __vcpu_sys_reg(vcpu, reg);
|
||||
set |= nested & m->mask;
|
||||
clear |= ~nested & m->nmask;
|
||||
}
|
||||
|
||||
val |= set;
|
||||
val &= ~clear;
|
||||
*vcpu_fgt(vcpu, reg) = val;
|
||||
}
|
||||
|
||||
static void __compute_hfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HFGWTR_EL2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
|
||||
*vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1;
|
||||
}
|
||||
|
||||
static void __compute_hdfgwtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__compute_fgt(vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
*vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1;
|
||||
}
|
||||
|
||||
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR_EL2);
|
||||
__compute_hfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HFGITR_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR_EL2);
|
||||
__compute_hdfgwtr(vcpu);
|
||||
__compute_fgt(vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
__compute_fgt(vcpu, HFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGWTR2_EL2);
|
||||
__compute_fgt(vcpu, HFGITR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGRTR2_EL2);
|
||||
__compute_fgt(vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
||||
@@ -15,6 +15,12 @@
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
static int cpu_has_spe(u64 dfr0)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
|
||||
*
|
||||
@@ -77,13 +83,12 @@ void kvm_init_host_debug_data(void)
|
||||
*host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0);
|
||||
*host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0);
|
||||
|
||||
if (cpu_has_spe(dfr0))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
if (has_vhe())
|
||||
return;
|
||||
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P))
|
||||
host_data_set_flag(HAS_SPE);
|
||||
|
||||
/* Check if we have BRBE implemented and available at the host */
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT))
|
||||
host_data_set_flag(HAS_BRBE);
|
||||
@@ -102,7 +107,7 @@ void kvm_init_host_debug_data(void)
|
||||
void kvm_debug_init_vhe(void)
|
||||
{
|
||||
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
|
||||
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
|
||||
if (host_data_test_flag(HAS_SPE))
|
||||
write_sysreg_el1(0, SYS_PMSCR);
|
||||
}
|
||||
|
||||
|
||||
@@ -591,64 +591,6 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
|
||||
return copy_core_reg_indices(vcpu, NULL);
|
||||
}
|
||||
|
||||
static const u64 timer_reg_list[] = {
|
||||
KVM_REG_ARM_TIMER_CTL,
|
||||
KVM_REG_ARM_TIMER_CNT,
|
||||
KVM_REG_ARM_TIMER_CVAL,
|
||||
KVM_REG_ARM_PTIMER_CTL,
|
||||
KVM_REG_ARM_PTIMER_CNT,
|
||||
KVM_REG_ARM_PTIMER_CVAL,
|
||||
};
|
||||
|
||||
#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
switch (index) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
case KVM_REG_ARM_PTIMER_CTL:
|
||||
case KVM_REG_ARM_PTIMER_CNT:
|
||||
case KVM_REG_ARM_PTIMER_CVAL:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
for (int i = 0; i < NUM_TIMER_REGS; i++) {
|
||||
if (put_user(timer_reg_list[i], uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
|
||||
}
|
||||
|
||||
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
|
||||
val = kvm_arm_timer_get_reg(vcpu, reg->id);
|
||||
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const unsigned int slices = vcpu_sve_slices(vcpu);
|
||||
@@ -724,7 +666,6 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
res += num_sve_regs(vcpu);
|
||||
res += kvm_arm_num_sys_reg_descs(vcpu);
|
||||
res += kvm_arm_get_fw_num_regs(vcpu);
|
||||
res += NUM_TIMER_REGS;
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -755,11 +696,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
return ret;
|
||||
uindices += kvm_arm_get_fw_num_regs(vcpu);
|
||||
|
||||
ret = copy_timer_indices(vcpu, uindices);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
uindices += NUM_TIMER_REGS;
|
||||
|
||||
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
@@ -777,9 +713,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return get_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
@@ -797,9 +730,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return set_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
|
||||
@@ -147,7 +147,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
|
||||
if (esr & ESR_ELx_WFx_ISS_RV) {
|
||||
u64 val, now;
|
||||
|
||||
now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
|
||||
now = kvm_phys_timer_read();
|
||||
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
|
||||
now -= timer_get_offset(vcpu_hvtimer(vcpu));
|
||||
else
|
||||
now -= timer_get_offset(vcpu_vtimer(vcpu));
|
||||
|
||||
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
|
||||
|
||||
if (now >= val)
|
||||
|
||||
@@ -195,123 +195,6 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
__deactivate_cptr_traps_nvhe(vcpu);
|
||||
}
|
||||
|
||||
#define reg_to_fgt_masks(reg) \
|
||||
({ \
|
||||
struct fgt_masks *m; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
m = &hfgrtr_masks; \
|
||||
break; \
|
||||
case HFGWTR_EL2: \
|
||||
m = &hfgwtr_masks; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
m = &hfgitr_masks; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
m = &hdfgrtr_masks; \
|
||||
break; \
|
||||
case HDFGWTR_EL2: \
|
||||
m = &hdfgwtr_masks; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
m = &hafgrtr_masks; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
m = &hfgrtr2_masks; \
|
||||
break; \
|
||||
case HFGWTR2_EL2: \
|
||||
m = &hfgwtr2_masks; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
m = &hfgitr2_masks; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
m = &hdfgrtr2_masks; \
|
||||
break; \
|
||||
case HDFGWTR2_EL2: \
|
||||
m = &hdfgwtr2_masks; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
m; \
|
||||
})
|
||||
|
||||
#define compute_clr_set(vcpu, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = __vcpu_sys_reg(vcpu, reg); \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= ~hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define reg_to_fgt_group_id(reg) \
|
||||
({ \
|
||||
enum fgt_group_id id; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
case HFGWTR_EL2: \
|
||||
id = HFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
id = HFGITR_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
id = HDFGRTR_GROUP; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
id = HAFGRTR_GROUP; \
|
||||
break; \
|
||||
case HFGRTR2_EL2: \
|
||||
case HFGWTR2_EL2: \
|
||||
id = HFGRTR2_GROUP; \
|
||||
break; \
|
||||
case HFGITR2_EL2: \
|
||||
id = HFGITR2_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR2_EL2: \
|
||||
case HDFGWTR2_EL2: \
|
||||
id = HDFGRTR2_GROUP; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
id; \
|
||||
})
|
||||
|
||||
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
set |= hfg & m->mask; \
|
||||
clr |= hfg & m->nmask; \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
struct fgt_masks *m = reg_to_fgt_masks(reg); \
|
||||
u64 c = clr, s = set; \
|
||||
u64 val; \
|
||||
\
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
if (is_nested_ctxt(vcpu)) \
|
||||
compute_clr_set(vcpu, reg, c, s); \
|
||||
\
|
||||
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
|
||||
\
|
||||
val = m->nmask; \
|
||||
val |= s; \
|
||||
val &= ~c; \
|
||||
write_sysreg_s(val, SYS_ ## reg); \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
|
||||
|
||||
static inline bool cpu_has_amu(void)
|
||||
{
|
||||
u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
|
||||
@@ -320,33 +203,36 @@ static inline bool cpu_has_amu(void)
|
||||
ID_AA64PFR0_EL1_AMU_SHIFT);
|
||||
}
|
||||
|
||||
#define __activate_fgt(hctxt, vcpu, reg) \
|
||||
do { \
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
|
||||
} while (0)
|
||||
|
||||
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
|
||||
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
|
||||
HFGWTR_EL2_TCR_EL1_MASK : 0);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
|
||||
|
||||
if (cpu_has_amu())
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT2))
|
||||
return;
|
||||
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HFGITR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
|
||||
__activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
|
||||
}
|
||||
|
||||
#define __deactivate_fgt(htcxt, vcpu, reg) \
|
||||
|
||||
@@ -172,6 +172,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
|
||||
/* Trust the host for non-protected vcpu features. */
|
||||
vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
|
||||
memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1859,13 +1859,16 @@ void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (is_nested_ctxt(vcpu))
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
/*
|
||||
* In yet another example where FEAT_NV2 is fscking broken, accesses
|
||||
* to MDSCR_EL1 are redirected to the VNCR despite having an effect
|
||||
* at EL2. Use a big hammer to apply sanity.
|
||||
*
|
||||
* Unless of course we have FEAT_FGT, in which case we can precisely
|
||||
* trap MDSCR_EL1.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
else if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
|
||||
else
|
||||
vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
|
||||
}
|
||||
|
||||
@@ -203,7 +203,6 @@ static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
|
||||
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
|
||||
case CNTHCTL_EL2:
|
||||
@@ -1595,14 +1594,47 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_hv_timer(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
static int arch_timer_set_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 val)
|
||||
{
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
return undef_access(vcpu, p, r);
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTV_CTL_EL0:
|
||||
case SYS_CNTP_CTL_EL0:
|
||||
case SYS_CNTHV_CTL_EL2:
|
||||
case SYS_CNTHP_CTL_EL2:
|
||||
val &= ~ARCH_TIMER_CTRL_IT_STAT;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
case SYS_CNTPCT_EL0:
|
||||
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
|
||||
timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return access_arch_timer(vcpu, p, r);
|
||||
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_get_user(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
u64 *val)
|
||||
{
|
||||
switch (reg_to_encoding(rd)) {
|
||||
case SYS_CNTVCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
|
||||
break;
|
||||
case SYS_CNTPCT_EL0:
|
||||
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
|
||||
break;
|
||||
default:
|
||||
*val = __vcpu_sys_reg(vcpu, rd->reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
|
||||
@@ -2507,15 +2539,20 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
"trap of EL2 register redirected to EL1");
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
|
||||
#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
.access = acc, \
|
||||
.reset = rst, \
|
||||
.reg = name, \
|
||||
.get_user = gu, \
|
||||
.set_user = su, \
|
||||
.visibility = filter, \
|
||||
.val = v, \
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) \
|
||||
SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
|
||||
|
||||
#define EL2_REG(name, acc, rst, v) \
|
||||
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
|
||||
|
||||
@@ -2526,6 +2563,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
EL2_REG_VNCR_FILT(name, hidden_visibility)
|
||||
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
|
||||
|
||||
#define TIMER_REG(name, vis) \
|
||||
SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
|
||||
arch_timer_get_user, arch_timer_set_user, vis)
|
||||
|
||||
/*
|
||||
* Since reset() callback and field val are not used for idregs, they will be
|
||||
* used for specific purposes for idregs.
|
||||
@@ -2705,18 +2746,17 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (guest_hyp_sve_traps_enabled(vcpu)) {
|
||||
kvm_inject_nested_sve_trap(vcpu);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!p->is_write) {
|
||||
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
|
||||
p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
|
||||
return true;
|
||||
}
|
||||
|
||||
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
|
||||
vq = min(vq, vcpu_sve_max_vq(vcpu));
|
||||
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
|
||||
|
||||
__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2833,6 +2873,16 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (vcpu_has_nv(vcpu) &&
|
||||
!vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static bool access_mdcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
@@ -3482,17 +3532,19 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
AMU_AMEVTYPER1_EL0(14),
|
||||
AMU_AMEVTYPER1_EL0(15),
|
||||
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
|
||||
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
|
||||
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTP_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTP_CVAL_EL0, NULL),
|
||||
|
||||
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
|
||||
{ SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
|
||||
TIMER_REG(CNTV_CTL_EL0, NULL),
|
||||
TIMER_REG(CNTV_CVAL_EL0, NULL),
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
@@ -3690,12 +3742,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
|
||||
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
|
||||
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
|
||||
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
|
||||
TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
|
||||
TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
|
||||
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
|
||||
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
|
||||
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
|
||||
TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
|
||||
TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
|
||||
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
|
||||
|
||||
@@ -5233,15 +5285,28 @@ static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
|
||||
{
|
||||
switch(reg->id) {
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
return TO_ARM64_SYS_REG(CNTVCT_EL0);
|
||||
default:
|
||||
return reg->id;
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
const struct sys_reg_desc table[], unsigned int num)
|
||||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
@@ -5274,13 +5339,14 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
{
|
||||
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
|
||||
const struct sys_reg_desc *r;
|
||||
u64 id = kvm_one_reg_to_id(reg);
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (get_user(val, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
|
||||
r = id_to_sys_reg_desc(vcpu, id, table, num);
|
||||
if (!r || sysreg_hidden(vcpu, r))
|
||||
return -ENOENT;
|
||||
|
||||
@@ -5340,10 +5406,23 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
|
||||
|
||||
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
|
||||
{
|
||||
u64 idx;
|
||||
|
||||
if (!*uind)
|
||||
return true;
|
||||
|
||||
if (put_user(sys_reg_to_index(reg), *uind))
|
||||
switch (reg_to_encoding(reg)) {
|
||||
case SYS_CNTV_CVAL_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CVAL;
|
||||
break;
|
||||
case SYS_CNTVCT_EL0:
|
||||
idx = KVM_REG_ARM_TIMER_CNT;
|
||||
break;
|
||||
default:
|
||||
idx = sys_reg_to_index(reg);
|
||||
}
|
||||
|
||||
if (put_user(idx, *uind))
|
||||
return false;
|
||||
|
||||
(*uind)++;
|
||||
|
||||
@@ -257,4 +257,10 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
|
||||
(val); \
|
||||
})
|
||||
|
||||
#define TO_ARM64_SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
|
||||
sys_reg_Op1(SYS_ ## r), \
|
||||
sys_reg_CRn(SYS_ ## r), \
|
||||
sys_reg_CRm(SYS_ ## r), \
|
||||
sys_reg_Op2(SYS_ ## r))
|
||||
|
||||
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
|
||||
|
||||
@@ -297,8 +297,11 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (!vgic_is_v3(vcpu->kvm))
|
||||
return;
|
||||
|
||||
/* Hide GICv3 sysreg if necessary */
|
||||
if (!kvm_has_gicv3(vcpu->kvm)) {
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
|
||||
ICH_HCR_EL2_TC);
|
||||
return;
|
||||
|
||||
@@ -51,8 +51,6 @@ struct arch_timer_vm_data {
|
||||
};
|
||||
|
||||
struct arch_timer_context {
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
/* Emulated Timer (may be unused) */
|
||||
struct hrtimer hrtimer;
|
||||
u64 ns_frac;
|
||||
@@ -71,6 +69,9 @@ struct arch_timer_context {
|
||||
bool level;
|
||||
} irq;
|
||||
|
||||
/* Who am I? */
|
||||
enum kvm_arch_timers timer_id;
|
||||
|
||||
/* Duplicated state from arch_timer.c for convenience */
|
||||
u32 host_timer_irq;
|
||||
};
|
||||
@@ -106,9 +107,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_timer_init_vm(struct kvm *kvm);
|
||||
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
|
||||
|
||||
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
|
||||
@@ -127,9 +125,9 @@ void kvm_timer_init_vhe(void);
|
||||
#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
|
||||
#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
|
||||
|
||||
#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
|
||||
|
||||
#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
|
||||
#define arch_timer_ctx_index(ctx) ((ctx)->timer_id)
|
||||
#define timer_context_to_vcpu(ctx) container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
|
||||
#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
|
||||
#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
|
||||
|
||||
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
|
||||
@@ -178,4 +176,14 @@ static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
|
||||
{
|
||||
if (!ctxt->offset.vm_offset) {
|
||||
WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
|
||||
return;
|
||||
}
|
||||
|
||||
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1020,7 +1020,7 @@ static void set_counter_defaults(void)
|
||||
{
|
||||
const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
|
||||
uint64_t freq = read_sysreg(CNTFRQ_EL0);
|
||||
uint64_t width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
int width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
|
||||
width = clamp(width, 56, 64);
|
||||
CVAL_MAX = GENMASK_ULL(width - 1, 0);
|
||||
|
||||
@@ -359,6 +359,44 @@ static void test_mmio_ease(void)
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void test_serror_amo_guest(void)
|
||||
{
|
||||
/*
|
||||
* The ISB is entirely unnecessary (and highlights how FEAT_NV2 is borked)
|
||||
* since the write is redirected to memory. But don't write (intentionally)
|
||||
* broken code!
|
||||
*/
|
||||
sysreg_clear_set(hcr_el2, HCR_EL2_AMO | HCR_EL2_TGE, 0);
|
||||
isb();
|
||||
|
||||
GUEST_SYNC(0);
|
||||
GUEST_ASSERT(read_sysreg(isr_el1) & ISR_EL1_A);
|
||||
|
||||
/*
|
||||
* KVM treats the effective value of AMO as 1 when
|
||||
* HCR_EL2.{E2H,TGE} = {1, 0}, meaning the SError will be taken when
|
||||
* unmasked.
|
||||
*/
|
||||
local_serror_enable();
|
||||
isb();
|
||||
local_serror_disable();
|
||||
|
||||
GUEST_FAIL("Should've taken pending SError exception");
|
||||
}
|
||||
|
||||
static void test_serror_amo(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_serror_amo_guest,
|
||||
unexpected_dabt_handler);
|
||||
|
||||
vm_install_exception_handler(vm, VECTOR_ERROR_CURRENT, expect_serror_handler);
|
||||
vcpu_run_expect_sync(vcpu);
|
||||
vcpu_inject_serror(vcpu);
|
||||
vcpu_run_expect_done(vcpu);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
test_mmio_abort();
|
||||
@@ -369,4 +407,9 @@ int main(void)
|
||||
test_serror_emulated();
|
||||
test_mmio_ease();
|
||||
test_s1ptw_abort();
|
||||
|
||||
if (!test_supports_el2())
|
||||
return 0;
|
||||
|
||||
test_serror_amo();
|
||||
}
|
||||
|
||||
@@ -65,6 +65,9 @@ static struct feature_id_reg feat_id_regs[] = {
|
||||
REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
|
||||
REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
|
||||
REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
|
||||
REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
|
||||
REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
|
||||
REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
|
||||
};
|
||||
|
||||
bool filter_reg(__u64 reg)
|
||||
@@ -345,9 +348,20 @@ static __u64 base_regs[] = {
|
||||
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
|
||||
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
|
||||
KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 0, 2),
|
||||
|
||||
/*
|
||||
* EL0 Virtual Timer Registers
|
||||
*
|
||||
* WARNING:
|
||||
* KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
|
||||
* with the appropriate register encodings. Their values have been
|
||||
* accidentally swapped. As this is set API, the definitions here
|
||||
* must be used, rather than ones derived from the encodings.
|
||||
*/
|
||||
KVM_ARM64_SYS_REG(SYS_CNTV_CTL_EL0),
|
||||
KVM_REG_ARM_TIMER_CVAL,
|
||||
KVM_REG_ARM_TIMER_CNT,
|
||||
|
||||
ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
|
||||
ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
|
||||
ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
|
||||
@@ -755,6 +769,10 @@ static __u64 el2_regs[] = {
|
||||
SYS_REG(VSESR_EL2),
|
||||
};
|
||||
|
||||
static __u64 el2_e2h0_regs[] = {
|
||||
/* Empty */
|
||||
};
|
||||
|
||||
#define BASE_SUBLIST \
|
||||
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
|
||||
#define VREGS_SUBLIST \
|
||||
@@ -789,6 +807,15 @@ static __u64 el2_regs[] = {
|
||||
.regs = el2_regs, \
|
||||
.regs_n = ARRAY_SIZE(el2_regs), \
|
||||
}
|
||||
#define EL2_E2H0_SUBLIST \
|
||||
EL2_SUBLIST, \
|
||||
{ \
|
||||
.name = "EL2 E2H0", \
|
||||
.capability = KVM_CAP_ARM_EL2_E2H0, \
|
||||
.feature = KVM_ARM_VCPU_HAS_EL2_E2H0, \
|
||||
.regs = el2_e2h0_regs, \
|
||||
.regs_n = ARRAY_SIZE(el2_e2h0_regs), \
|
||||
}
|
||||
|
||||
static struct vcpu_reg_list vregs_config = {
|
||||
.sublists = {
|
||||
@@ -897,6 +924,65 @@ static struct vcpu_reg_list el2_pauth_pmu_config = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_vregs_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_vregs_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_sve_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
SVE_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_sve_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
SVE_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_pauth_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PAUTH_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
static struct vcpu_reg_list el2_e2h0_pauth_pmu_config = {
|
||||
.sublists = {
|
||||
BASE_SUBLIST,
|
||||
EL2_E2H0_SUBLIST,
|
||||
VREGS_SUBLIST,
|
||||
PAUTH_SUBLIST,
|
||||
PMU_SUBLIST,
|
||||
{0},
|
||||
},
|
||||
};
|
||||
|
||||
struct vcpu_reg_list *vcpu_configs[] = {
|
||||
&vregs_config,
|
||||
&vregs_pmu_config,
|
||||
@@ -911,5 +997,12 @@ struct vcpu_reg_list *vcpu_configs[] = {
|
||||
&el2_sve_pmu_config,
|
||||
&el2_pauth_config,
|
||||
&el2_pauth_pmu_config,
|
||||
|
||||
&el2_e2h0_vregs_config,
|
||||
&el2_e2h0_vregs_pmu_config,
|
||||
&el2_e2h0_sve_config,
|
||||
&el2_e2h0_sve_pmu_config,
|
||||
&el2_e2h0_pauth_config,
|
||||
&el2_e2h0_pauth_pmu_config,
|
||||
};
|
||||
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
|
||||
|
||||
@@ -249,11 +249,14 @@ static void guest_code(void)
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64PFR1_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64MMFR3_EL1);
|
||||
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
|
||||
GUEST_REG_SYNC(SYS_MPIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_CLIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_CTR_EL0);
|
||||
GUEST_REG_SYNC(SYS_MIDR_EL1);
|
||||
GUEST_REG_SYNC(SYS_REVIDR_EL1);
|
||||
|
||||
@@ -123,6 +123,7 @@ static void guest_setup_gic(void)
|
||||
static void guest_code(size_t nr_lpis)
|
||||
{
|
||||
guest_setup_gic();
|
||||
local_irq_enable();
|
||||
|
||||
GUEST_SYNC(0);
|
||||
|
||||
@@ -331,7 +332,7 @@ static void setup_vm(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu));
|
||||
vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu *));
|
||||
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
|
||||
|
||||
vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus);
|
||||
|
||||
@@ -305,7 +305,17 @@ void test_wants_mte(void);
|
||||
void test_disable_default_vgic(void);
|
||||
|
||||
bool vm_supports_el2(struct kvm_vm *vm);
|
||||
static bool vcpu_has_el2(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool test_supports_el2(void)
|
||||
{
|
||||
struct kvm_vm *vm = vm_create(1);
|
||||
bool supported = vm_supports_el2(vm);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
return supported;
|
||||
}
|
||||
|
||||
static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
|
||||
}
|
||||
|
||||
@@ -1273,4 +1273,6 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
|
||||
|
||||
uint32_t guest_get_vcpuid(void);
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void);
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_H */
|
||||
|
||||
@@ -89,11 +89,19 @@ static void juggle_eventfd_primary(struct kvm_vm *vm, int eventfd)
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
pthread_t racing_thread;
|
||||
struct kvm_vcpu *unused;
|
||||
int r, i;
|
||||
|
||||
/* Create "full" VMs, as KVM_IRQFD requires an in-kernel IRQ chip. */
|
||||
vm1 = vm_create(1);
|
||||
vm2 = vm_create(1);
|
||||
TEST_REQUIRE(kvm_arch_has_default_irqchip());
|
||||
|
||||
/*
|
||||
* Create "full" VMs, as KVM_IRQFD requires an in-kernel IRQ chip. Also
|
||||
* create an unused vCPU as certain architectures (like arm64) need to
|
||||
* complete IRQ chip initialization after all possible vCPUs for a VM
|
||||
* have been created.
|
||||
*/
|
||||
vm1 = vm_create_with_one_vcpu(&unused, NULL);
|
||||
vm2 = vm_create_with_one_vcpu(&unused, NULL);
|
||||
|
||||
WRITE_ONCE(__eventfd, kvm_new_eventfd());
|
||||
|
||||
|
||||
@@ -725,3 +725,8 @@ void kvm_arch_vm_release(struct kvm_vm *vm)
|
||||
if (vm->arch.has_gic)
|
||||
close(vm->arch.gic_fd);
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return request_vgic && kvm_supports_vgic_v3();
|
||||
}
|
||||
|
||||
@@ -2344,3 +2344,8 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
|
||||
pg = paddr >> vm->page_shift;
|
||||
return sparsebit_is_set(region->protected_phy_pages, pg);
|
||||
}
|
||||
|
||||
__weak bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -221,3 +221,8 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1318,3 +1318,8 @@ bool sys_clocksource_is_based_on_tsc(void)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user