KVM: arm64: Kill leftovers of ad-hoc timer userspace access

Now that the whole timer infrastructure is handled as system register
accesses, get rid of the now unused ad-hoc infrastructure.

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier
2025-09-29 17:04:54 +01:00
parent 892f7c38ba
commit 386aac77da
3 changed files with 0 additions and 126 deletions

View File

@@ -1112,49 +1112,6 @@ void kvm_timer_cpu_down(void)
disable_percpu_irq(host_ptimer_irq);
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
struct arch_timer_context *timer;
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_TIMER_CNT:
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
&vcpu->kvm->arch.flags)) {
timer = vcpu_vtimer(vcpu);
timer_set_offset(timer, kvm_phys_timer_read() - value);
}
break;
case KVM_REG_ARM_TIMER_CVAL:
timer = vcpu_vtimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
case KVM_REG_ARM_PTIMER_CTL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
break;
case KVM_REG_ARM_PTIMER_CNT:
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
&vcpu->kvm->arch.flags)) {
timer = vcpu_ptimer(vcpu);
timer_set_offset(timer, kvm_phys_timer_read() - value);
}
break;
case KVM_REG_ARM_PTIMER_CVAL:
timer = vcpu_ptimer(vcpu);
kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
break;
default:
return -1;
}
return 0;
}
static u64 read_timer_ctl(struct arch_timer_context *timer)
{
/*
@@ -1171,31 +1128,6 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
return ctl;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_TIMER_CNT:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_TIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_vtimer(vcpu), TIMER_REG_CVAL);
case KVM_REG_ARM_PTIMER_CTL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CTL);
case KVM_REG_ARM_PTIMER_CNT:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CNT);
case KVM_REG_ARM_PTIMER_CVAL:
return kvm_arm_timer_read(vcpu,
vcpu_ptimer(vcpu), TIMER_REG_CVAL);
}
return (u64)-1;
}
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg)

View File

@@ -591,49 +591,6 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
return copy_core_reg_indices(vcpu, NULL);
}
static const u64 timer_reg_list[] = {
};
#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
static bool is_timer_reg(u64 index)
{
return false;
}
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
for (int i = 0; i < NUM_TIMER_REGS; i++) {
if (put_user(timer_reg_list[i], uindices))
return -EFAULT;
uindices++;
}
return 0;
}
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
{
const unsigned int slices = vcpu_sve_slices(vcpu);
@@ -709,7 +666,6 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
res += num_sve_regs(vcpu);
res += kvm_arm_num_sys_reg_descs(vcpu);
res += kvm_arm_get_fw_num_regs(vcpu);
res += NUM_TIMER_REGS;
return res;
}
@@ -740,11 +696,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
}
@@ -762,9 +713,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
}
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_sys_reg_get_reg(vcpu, reg);
}
@@ -782,9 +730,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
}
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_sys_reg_set_reg(vcpu, reg);
}

View File

@@ -107,9 +107,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
void kvm_timer_init_vm(struct kvm *kvm);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);