Merge tag 'loongarch-kvm-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD

LoongArch KVM changes for v6.19

1. Get VM PMU capability from HW GCFG register.
2. Add AVEC basic support.
3. Use 64-bit register definition for EIOINTC.
4. Add KVM timer test cases for tools/selftests.
This commit is contained in:
Paolo Bonzini
2025-12-02 18:34:22 +01:00
15 changed files with 536 additions and 109 deletions

View File

@@ -10,10 +10,7 @@
#define EIOINTC_IRQS 256
#define EIOINTC_ROUTE_MAX_VCPUS 256
#define EIOINTC_IRQS_U8_NUMS (EIOINTC_IRQS / 8)
#define EIOINTC_IRQS_U16_NUMS (EIOINTC_IRQS_U8_NUMS / 2)
#define EIOINTC_IRQS_U32_NUMS (EIOINTC_IRQS_U8_NUMS / 4)
#define EIOINTC_IRQS_U64_NUMS (EIOINTC_IRQS_U8_NUMS / 8)
#define EIOINTC_IRQS_U64_NUMS (EIOINTC_IRQS / 64)
/* map to ipnum per 32 irqs */
#define EIOINTC_IRQS_NODETYPE_COUNT 16
@@ -64,54 +61,18 @@ struct loongarch_eiointc {
uint32_t status;
/* hardware state */
union nodetype {
u64 reg_u64[EIOINTC_IRQS_NODETYPE_COUNT / 4];
u32 reg_u32[EIOINTC_IRQS_NODETYPE_COUNT / 2];
u16 reg_u16[EIOINTC_IRQS_NODETYPE_COUNT];
u8 reg_u8[EIOINTC_IRQS_NODETYPE_COUNT * 2];
} nodetype;
u64 nodetype[EIOINTC_IRQS_NODETYPE_COUNT / 4];
/* one bit shows the state of one irq */
union bounce {
u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
} bounce;
union isr {
u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
} isr;
union coreisr {
u64 reg_u64[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U64_NUMS];
u32 reg_u32[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U32_NUMS];
u16 reg_u16[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U16_NUMS];
u8 reg_u8[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U8_NUMS];
} coreisr;
union enable {
u64 reg_u64[EIOINTC_IRQS_U64_NUMS];
u32 reg_u32[EIOINTC_IRQS_U32_NUMS];
u16 reg_u16[EIOINTC_IRQS_U16_NUMS];
u8 reg_u8[EIOINTC_IRQS_U8_NUMS];
} enable;
u64 bounce[EIOINTC_IRQS_U64_NUMS];
u64 isr[EIOINTC_IRQS_U64_NUMS];
u64 coreisr[EIOINTC_ROUTE_MAX_VCPUS][EIOINTC_IRQS_U64_NUMS];
u64 enable[EIOINTC_IRQS_U64_NUMS];
/* use one byte to config ipmap for 32 irqs at once */
union ipmap {
u64 reg_u64;
u32 reg_u32[EIOINTC_IRQS_U32_NUMS / 4];
u16 reg_u16[EIOINTC_IRQS_U16_NUMS / 4];
u8 reg_u8[EIOINTC_IRQS_U8_NUMS / 4];
} ipmap;
u64 ipmap;
/* use one byte to config coremap for one irq */
union coremap {
u64 reg_u64[EIOINTC_IRQS / 8];
u32 reg_u32[EIOINTC_IRQS / 4];
u16 reg_u16[EIOINTC_IRQS / 2];
u8 reg_u8[EIOINTC_IRQS];
} coremap;
u64 coremap[EIOINTC_IRQS / 8];
DECLARE_BITMAP(sw_coreisr[EIOINTC_ROUTE_MAX_VCPUS][LOONGSON_IP_NUM], EIOINTC_IRQS);
uint8_t sw_coremap[EIOINTC_IRQS];

View File

@@ -126,6 +126,8 @@ struct kvm_arch {
struct kvm_phyid_map *phyid_map;
/* Enabled PV features */
unsigned long pv_features;
/* Supported KVM features */
unsigned long kvm_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
@@ -293,6 +295,12 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
}
/* Check whether KVM support this feature (VMM may disable it) */
static inline bool kvm_vm_support(struct kvm_arch *arch, int feature)
{
return !!(arch->kvm_features & BIT_ULL(feature));
}
bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
/* Debug: dump vcpu state */

View File

@@ -15,6 +15,7 @@
#define CPU_PMU (_ULCAST_(1) << 10)
#define CPU_TIMER (_ULCAST_(1) << 11)
#define CPU_IPI (_ULCAST_(1) << 12)
#define CPU_AVEC (_ULCAST_(1) << 14)
/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
#define CPU_IP0 (_ULCAST_(1))

View File

@@ -511,6 +511,8 @@
#define CSR_GCFG_GPERF_SHIFT 24
#define CSR_GCFG_GPERF_WIDTH 3
#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
#define CSR_GCFG_GPMP_SHIFT 23
#define CSR_GCFG_GPMP (_ULCAST_(0x1) << CSR_GCFG_GPMP_SHIFT)
#define CSR_GCFG_GCI_SHIFT 20
#define CSR_GCFG_GCI_WIDTH 2
#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)

View File

@@ -104,6 +104,7 @@ struct kvm_fpu {
#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
#define KVM_LOONGARCH_VM_FEAT_PTW 8
#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
/* Device Control API on vcpu fd */
#define KVM_LOONGARCH_VCPU_CPUCFG 0

View File

@@ -13,19 +13,19 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
struct kvm_vcpu *vcpu;
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
ipnum = s->ipmap.reg_u8[irq / 32];
ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
}
cpuid = s->coremap.reg_u8[irq];
cpuid = ((u8 *)s->coremap)[irq];
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
if (!vcpu)
continue;
cpu = vcpu->vcpu_id;
if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
if (test_bit(irq, (unsigned long *)s->coreisr[cpu]))
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
@@ -38,7 +38,7 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
struct kvm_vcpu *vcpu;
struct kvm_interrupt vcpu_irq;
ipnum = s->ipmap.reg_u8[irq / 32];
ipnum = (s->ipmap >> (irq / 32 * 8)) & 0xff;
if (!(s->status & BIT(EIOINTC_ENABLE_INT_ENCODE))) {
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
@@ -53,13 +53,13 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
if (level) {
/* if not enable return false */
if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
if (!test_bit(irq, (unsigned long *)s->enable))
return;
__set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
__set_bit(irq, (unsigned long *)s->coreisr[cpu]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
} else {
__clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
__clear_bit(irq, (unsigned long *)s->coreisr[cpu]);
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
}
@@ -94,7 +94,7 @@ static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
if (s->sw_coremap[irq + i] == cpu)
continue;
if (notify && test_bit(irq + i, (unsigned long *)s->isr.reg_u8)) {
if (notify && test_bit(irq + i, (unsigned long *)s->isr)) {
/* lower irq at old cpu and raise irq at new cpu */
eiointc_update_irq(s, irq + i, 0);
s->sw_coremap[irq + i] = cpu;
@@ -108,7 +108,7 @@ static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s,
void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
{
unsigned long flags;
unsigned long *isr = (unsigned long *)s->isr.reg_u8;
unsigned long *isr = (unsigned long *)s->isr;
spin_lock_irqsave(&s->lock, flags);
level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
@@ -127,27 +127,27 @@ static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eioint
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 3;
data = s->nodetype.reg_u64[index];
data = s->nodetype[index];
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
index = (offset - EIOINTC_IPMAP_START) >> 3;
data = s->ipmap.reg_u64;
data = s->ipmap;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 3;
data = s->enable.reg_u64[index];
data = s->enable[index];
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
index = (offset - EIOINTC_BOUNCE_START) >> 3;
data = s->bounce.reg_u64[index];
data = s->bounce[index];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 3;
data = s->coreisr.reg_u64[vcpu->vcpu_id][index];
data = s->coreisr[vcpu->vcpu_id][index];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
index = (offset - EIOINTC_COREMAP_START) >> 3;
data = s->coremap.reg_u64[index];
data = s->coremap[index];
break;
default:
ret = -EINVAL;
@@ -223,26 +223,26 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 3;
old = s->nodetype.reg_u64[index];
s->nodetype.reg_u64[index] = (old & ~mask) | data;
old = s->nodetype[index];
s->nodetype[index] = (old & ~mask) | data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
old = s->ipmap.reg_u64;
s->ipmap.reg_u64 = (old & ~mask) | data;
old = s->ipmap;
s->ipmap = (old & ~mask) | data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 3;
old = s->enable.reg_u64[index];
s->enable.reg_u64[index] = (old & ~mask) | data;
old = s->enable[index];
s->enable[index] = (old & ~mask) | data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
data = s->enable[index] & ~old & s->isr[index];
while (data) {
irq = __ffs(data);
eiointc_update_irq(s, irq + index * 64, 1);
@@ -252,7 +252,7 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
data = ~s->enable[index] & old & s->isr[index];
while (data) {
irq = __ffs(data);
eiointc_update_irq(s, irq + index * 64, 0);
@@ -262,16 +262,16 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 3;
old = s->bounce.reg_u64[index];
s->bounce.reg_u64[index] = (old & ~mask) | data;
old = s->bounce[index];
s->bounce[index] = (old & ~mask) | data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 3;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
old = s->coreisr.reg_u64[cpu][index];
old = s->coreisr[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u64[cpu][index] = old & ~data;
s->coreisr[cpu][index] = old & ~data;
data &= old;
while (data) {
irq = __ffs(data);
@@ -281,9 +281,9 @@ static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
index = (offset - EIOINTC_COREMAP_START) >> 3;
old = s->coremap.reg_u64[index];
s->coremap.reg_u64[index] = (old & ~mask) | data;
data = s->coremap.reg_u64[index];
old = s->coremap[index];
s->coremap[index] = (old & ~mask) | data;
data = s->coremap[index];
eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
break;
default:
@@ -451,10 +451,10 @@ static int kvm_eiointc_ctrl_access(struct kvm_device *dev,
break;
case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED:
eiointc_set_sw_coreisr(s);
for (i = 0; i < (EIOINTC_IRQS / 4); i++) {
start_irq = i * 4;
for (i = 0; i < (EIOINTC_IRQS / 8); i++) {
start_irq = i * 8;
eiointc_update_sw_coremap(s, start_irq,
s->coremap.reg_u32[i], sizeof(u32), false);
s->coremap[i], sizeof(u64), false);
}
break;
default:
@@ -481,34 +481,34 @@ static int kvm_eiointc_regs_access(struct kvm_device *dev,
switch (addr) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
offset = (addr - EIOINTC_NODETYPE_START) / 4;
p = &s->nodetype.reg_u32[offset];
p = s->nodetype + offset * 4;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
offset = (addr - EIOINTC_IPMAP_START) / 4;
p = &s->ipmap.reg_u32[offset];
p = &s->ipmap + offset * 4;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
offset = (addr - EIOINTC_ENABLE_START) / 4;
p = &s->enable.reg_u32[offset];
p = s->enable + offset * 4;
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
offset = (addr - EIOINTC_BOUNCE_START) / 4;
p = &s->bounce.reg_u32[offset];
p = s->bounce + offset * 4;
break;
case EIOINTC_ISR_START ... EIOINTC_ISR_END:
offset = (addr - EIOINTC_ISR_START) / 4;
p = &s->isr.reg_u32[offset];
p = s->isr + offset * 4;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
if (cpu >= s->num_cpu)
return -EINVAL;
offset = (addr - EIOINTC_COREISR_START) / 4;
p = &s->coreisr.reg_u32[cpu][offset];
p = s->coreisr[cpu] + offset * 4;
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
offset = (addr - EIOINTC_COREMAP_START) / 4;
p = &s->coremap.reg_u32[offset];
p = s->coremap + offset * 4;
break;
default:
kvm_err("%s: unknown eiointc register, addr = %d\n", __func__, addr);

View File

@@ -21,6 +21,7 @@ static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
[INT_HWI5] = CPU_IP5,
[INT_HWI6] = CPU_IP6,
[INT_HWI7] = CPU_IP7,
[INT_AVEC] = CPU_AVEC,
};
static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
@@ -31,6 +32,11 @@ static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
if (cpu_has_msgint && (priority == INT_AVEC)) {
set_gcsr_estat(irq);
return 1;
}
switch (priority) {
case INT_TI:
case INT_IPI:
@@ -58,6 +64,11 @@ static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
if (priority < EXCCODE_INT_NUM)
irq = priority_to_irq[priority];
if (cpu_has_msgint && (priority == INT_AVEC)) {
clear_gcsr_estat(irq);
return 1;
}
switch (priority) {
case INT_TI:
case INT_IPI:
@@ -83,10 +94,10 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.irq_pending;
unsigned long *pending_clr = &vcpu->arch.irq_clear;
for_each_set_bit(priority, pending_clr, INT_IPI + 1)
for_each_set_bit(priority, pending_clr, EXCCODE_INT_NUM)
kvm_irq_clear(vcpu, priority);
for_each_set_bit(priority, pending, INT_IPI + 1)
for_each_set_bit(priority, pending, EXCCODE_INT_NUM)
kvm_irq_deliver(vcpu, priority);
}

View File

@@ -659,8 +659,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
*v = GENMASK(31, 0);
return 0;
case LOONGARCH_CPUCFG1:
/* CPUCFG1_MSGINT is not supported by KVM */
*v = GENMASK(25, 0);
*v = GENMASK(26, 0);
return 0;
case LOONGARCH_CPUCFG2:
/* CPUCFG2 features unconditionally supported by KVM */
@@ -728,6 +727,10 @@ static int kvm_check_cpucfg(int id, u64 val)
return -EINVAL;
switch (id) {
case LOONGARCH_CPUCFG1:
if ((val & CPUCFG1_MSGINT) && !cpu_has_msgint)
return -EINVAL;
return 0;
case LOONGARCH_CPUCFG2:
if (!(val & CPUCFG2_LLFTP))
/* Guests must have a constant timer */
@@ -1657,6 +1660,12 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
if (cpu_has_msgint) {
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
}
/* Restore Root.GINTC from unused Guest.GINTC register */
write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
@@ -1746,6 +1755,12 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
if (cpu_has_msgint) {
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR3);
}
vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;

View File

@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_vcpu.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_eiointc.h>
#include <asm/kvm_pch_pic.h>
@@ -24,6 +25,23 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
static void kvm_vm_init_features(struct kvm *kvm)
{
unsigned long val;
val = read_csr_gcfg();
if (val & CSR_GCFG_GPMP)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
/* Enable all PV features by default */
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
if (kvm_pvtime_supported()) {
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
}
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int i;
@@ -42,11 +60,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.phyid_map_lock);
kvm_init_vmcs(kvm);
/* Enable all PV features by default */
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
if (kvm_pvtime_supported())
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm_vm_init_features(kvm);
/*
* cpu_vabits means user address space only (a half of total).
@@ -136,20 +150,20 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
if (cpu_has_lbt_mips)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
if (cpu_has_pmp)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
return 0;
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_pvtime_supported())
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PTW:
if (cpu_has_ptw)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_MSGINT:
if (cpu_has_msgint)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_vm_support(&kvm->arch, attr->attr))
return 0;
return -ENXIO;
default:
return -ENXIO;
}

View File

@@ -212,6 +212,7 @@ TEST_GEN_PROGS_riscv += mmu_stress_test
TEST_GEN_PROGS_riscv += rseq_test
TEST_GEN_PROGS_riscv += steal_time
TEST_GEN_PROGS_loongarch = arch_timer
TEST_GEN_PROGS_loongarch += coalesced_io_test
TEST_GEN_PROGS_loongarch += demand_paging_test
TEST_GEN_PROGS_loongarch += dirty_log_perf_test

View File

@@ -0,0 +1,85 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* LoongArch Constant Timer specific interface
*/
#ifndef SELFTEST_KVM_ARCH_TIMER_H
#define SELFTEST_KVM_ARCH_TIMER_H
#include "processor.h"
/* LoongArch timer frequency is constant 100MHZ */
#define TIMER_FREQ (100UL << 20)
#define msec_to_cycles(msec) (TIMER_FREQ * (unsigned long)(msec) / 1000)
#define usec_to_cycles(usec) (TIMER_FREQ * (unsigned long)(usec) / 1000000)
#define cycles_to_usec(cycles) ((unsigned long)(cycles) * 1000000 / TIMER_FREQ)
static inline unsigned long timer_get_cycles(void)
{
unsigned long val = 0;
__asm__ __volatile__(
"rdtime.d %0, $zero\n\t"
: "=r"(val)
:
);
return val;
}
static inline unsigned long timer_get_cfg(void)
{
return csr_read(LOONGARCH_CSR_TCFG);
}
static inline unsigned long timer_get_val(void)
{
return csr_read(LOONGARCH_CSR_TVAL);
}
static inline void disable_timer(void)
{
csr_write(0, LOONGARCH_CSR_TCFG);
}
static inline void timer_irq_enable(void)
{
unsigned long val;
val = csr_read(LOONGARCH_CSR_ECFG);
val |= ECFGF_TIMER;
csr_write(val, LOONGARCH_CSR_ECFG);
}
static inline void timer_irq_disable(void)
{
unsigned long val;
val = csr_read(LOONGARCH_CSR_ECFG);
val &= ~ECFGF_TIMER;
csr_write(val, LOONGARCH_CSR_ECFG);
}
static inline void timer_set_next_cmp_ms(unsigned int msec, bool period)
{
unsigned long val;
val = msec_to_cycles(msec) & CSR_TCFG_VAL;
val |= CSR_TCFG_EN;
if (period)
val |= CSR_TCFG_PERIOD;
csr_write(val, LOONGARCH_CSR_TCFG);
}
static inline void __delay(uint64_t cycles)
{
uint64_t start = timer_get_cycles();
while ((timer_get_cycles() - start) < cycles)
cpu_relax();
}
static inline void udelay(unsigned long usec)
{
__delay(usec_to_cycles(usec));
}
#endif /* SELFTEST_KVM_ARCH_TIMER_H */

View File

@@ -83,7 +83,14 @@
#define LOONGARCH_CSR_PRMD 0x1
#define LOONGARCH_CSR_EUEN 0x2
#define LOONGARCH_CSR_ECFG 0x4
#define ECFGB_TIMER 11
#define ECFGF_TIMER (BIT_ULL(ECFGB_TIMER))
#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
#define CSR_ESTAT_EXC_SHIFT 16
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (0x3f << CSR_ESTAT_EXC_SHIFT)
#define EXCCODE_INT 0 /* Interrupt */
#define INT_TI 11 /* Timer interrupt*/
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
#define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */
#define LOONGARCH_CSR_EENTRY 0xc
@@ -106,6 +113,14 @@
#define LOONGARCH_CSR_KS1 0x31
#define LOONGARCH_CSR_TMID 0x40
#define LOONGARCH_CSR_TCFG 0x41
#define CSR_TCFG_VAL (BIT_ULL(48) - BIT_ULL(2))
#define CSR_TCFG_PERIOD_SHIFT 1
#define CSR_TCFG_PERIOD (0x1UL << CSR_TCFG_PERIOD_SHIFT)
#define CSR_TCFG_EN (0x1UL)
#define LOONGARCH_CSR_TVAL 0x42
#define LOONGARCH_CSR_TINTCLR 0x44 /* Timer interrupt clear */
#define CSR_TINTCLR_TI_SHIFT 0
#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
/* TLB refill exception entry */
#define LOONGARCH_CSR_TLBRENTRY 0x88
#define LOONGARCH_CSR_TLBRSAVE 0x8b
@@ -113,6 +128,28 @@
#define CSR_TLBREHI_PS_SHIFT 0
#define CSR_TLBREHI_PS (0x3fUL << CSR_TLBREHI_PS_SHIFT)
#define csr_read(csr) \
({ \
register unsigned long __v; \
__asm__ __volatile__( \
"csrrd %[val], %[reg]\n\t" \
: [val] "=r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
__v; \
})
#define csr_write(v, csr) \
({ \
register unsigned long __v = v; \
__asm__ __volatile__ ( \
"csrwr %[val], %[reg]\n\t" \
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
__v; \
})
#define EXREGS_GPRS (32)
#ifndef __ASSEMBLER__
@@ -124,18 +161,60 @@ struct ex_regs {
unsigned long pc;
unsigned long estat;
unsigned long badv;
unsigned long prmd;
};
#define PC_OFFSET_EXREGS offsetof(struct ex_regs, pc)
#define ESTAT_OFFSET_EXREGS offsetof(struct ex_regs, estat)
#define BADV_OFFSET_EXREGS offsetof(struct ex_regs, badv)
#define PRMD_OFFSET_EXREGS offsetof(struct ex_regs, prmd)
#define EXREGS_SIZE sizeof(struct ex_regs)
#define VECTOR_NUM 64
typedef void(*handler_fn)(struct ex_regs *);
struct handlers {
handler_fn exception_handlers[VECTOR_NUM];
};
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler);
static inline void cpu_relax(void)
{
asm volatile("nop" ::: "memory");
}
static inline void local_irq_enable(void)
{
unsigned int flags = CSR_CRMD_IE;
register unsigned int mask asm("$t0") = CSR_CRMD_IE;
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
static inline void local_irq_disable(void)
{
unsigned int flags = 0;
register unsigned int mask asm("$t0") = CSR_CRMD_IE;
__asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags)
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory");
}
#else
#define PC_OFFSET_EXREGS ((EXREGS_GPRS + 0) * 8)
#define ESTAT_OFFSET_EXREGS ((EXREGS_GPRS + 1) * 8)
#define BADV_OFFSET_EXREGS ((EXREGS_GPRS + 2) * 8)
#define EXREGS_SIZE ((EXREGS_GPRS + 3) * 8)
#define PRMD_OFFSET_EXREGS ((EXREGS_GPRS + 3) * 8)
#define EXREGS_SIZE ((EXREGS_GPRS + 4) * 8)
#endif
#endif /* SELFTEST_KVM_PROCESSOR_H */

View File

@@ -51,9 +51,15 @@ handle_exception:
st.d t0, sp, ESTAT_OFFSET_EXREGS
csrrd t0, LOONGARCH_CSR_BADV
st.d t0, sp, BADV_OFFSET_EXREGS
csrrd t0, LOONGARCH_CSR_PRMD
st.d t0, sp, PRMD_OFFSET_EXREGS
or a0, sp, zero
bl route_exception
ld.d t0, sp, PC_OFFSET_EXREGS
csrwr t0, LOONGARCH_CSR_ERA
ld.d t0, sp, PRMD_OFFSET_EXREGS
csrwr t0, LOONGARCH_CSR_PRMD
restore_gprs sp
csrrd sp, LOONGARCH_CSR_KS0
ertn

View File

@@ -3,6 +3,7 @@
#include <assert.h>
#include <linux/compiler.h>
#include <asm/kvm.h>
#include "kvm_util.h"
#include "processor.h"
#include "ucall_common.h"
@@ -11,6 +12,7 @@
#define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
static vm_paddr_t invalid_pgtable[4];
static vm_vaddr_t exception_handlers;
static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
{
@@ -183,7 +185,14 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
void route_exception(struct ex_regs *regs)
{
int vector;
unsigned long pc, estat, badv;
struct handlers *handlers;
handlers = (struct handlers *)exception_handlers;
vector = (regs->estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
if (handlers && handlers->exception_handlers[vector])
return handlers->exception_handlers[vector](regs);
pc = regs->pc;
badv = regs->badv;
@@ -192,6 +201,32 @@ void route_exception(struct ex_regs *regs)
while (1) ;
}
void vm_init_descriptor_tables(struct kvm_vm *vm)
{
void *addr;
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
addr = addr_gva2hva(vm, vm->handlers);
memset(addr, 0, vm->page_size);
exception_handlers = vm->handlers;
sync_global_to_guest(vm, exception_handlers);
}
void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn handler)
{
struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
assert(vector < VECTOR_NUM);
handlers->exception_handlers[vector] = handler;
}
uint32_t guest_get_vcpuid(void)
{
return csr_read(LOONGARCH_CSR_CPUID);
}
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
int i;
@@ -211,6 +246,11 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
vcpu_regs_set(vcpu, &regs);
}
static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
{
__vcpu_set_reg(vcpu, id, val);
}
static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
{
uint64_t csrid;
@@ -242,8 +282,8 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
/* user mode and page enable mode */
val = PLV_USER | CSR_CRMD_PG;
/* kernel mode and page enable mode */
val = PLV_KERN | CSR_CRMD_PG;
loongarch_set_csr(vcpu, LOONGARCH_CSR_CRMD, val);
loongarch_set_csr(vcpu, LOONGARCH_CSR_PRMD, val);
loongarch_set_csr(vcpu, LOONGARCH_CSR_EUEN, 1);
@@ -251,7 +291,10 @@ static void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
loongarch_set_csr(vcpu, LOONGARCH_CSR_TCFG, 0);
loongarch_set_csr(vcpu, LOONGARCH_CSR_ASID, 1);
/* time count start from 0 */
val = 0;
loongarch_set_reg(vcpu, KVM_REG_LOONGARCH_COUNTER, val);
width = vm->page_shift - 3;
switch (vm->pgtable_levels) {

View File

@@ -0,0 +1,200 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* The test validates periodic/one-shot constant timer IRQ using
* CSR.TCFG and CSR.TVAL registers.
*/
#include "arch_timer.h"
#include "kvm_util.h"
#include "processor.h"
#include "timer_test.h"
#include "ucall_common.h"
static void do_idle(void)
{
unsigned int intid;
unsigned long estat;
__asm__ __volatile__("idle 0" : : : "memory");
estat = csr_read(LOONGARCH_CSR_ESTAT);
intid = !!(estat & BIT(INT_TI));
/* Make sure pending timer IRQ arrived */
GUEST_ASSERT_EQ(intid, 1);
csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
}
static void guest_irq_handler(struct ex_regs *regs)
{
unsigned int intid;
uint32_t cpu = guest_get_vcpuid();
uint64_t xcnt, val, cfg, xcnt_diff_us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
intid = !!(regs->estat & BIT(INT_TI));
/* Make sure we are dealing with the correct timer IRQ */
GUEST_ASSERT_EQ(intid, 1);
cfg = timer_get_cfg();
if (cfg & CSR_TCFG_PERIOD) {
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter - 1);
if (shared_data->nr_iter == 0)
disable_timer();
csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
return;
}
/*
* On real machine, value of LOONGARCH_CSR_TVAL is BIT_ULL(48) - 1
* On virtual machine, its value counts down from BIT_ULL(48) - 1
*/
val = timer_get_val();
xcnt = timer_get_cycles();
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Basic 'timer condition met' check */
__GUEST_ASSERT(val > cfg,
"val = 0x%lx, cfg = 0x%lx, xcnt_diff_us = 0x%lx",
val, cfg, xcnt_diff_us);
csr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
static void guest_test_period_timer(uint32_t cpu)
{
uint32_t irq_iter, config_iter;
uint64_t us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
shared_data->nr_iter = test_args.nr_iter;
shared_data->xcnt = timer_get_cycles();
us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
timer_set_next_cmp_ms(test_args.timer_period_ms, true);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
/* Setup a timeout for the interrupt to arrive */
udelay(us);
}
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(irq_iter == 0,
"irq_iter = 0x%x.\n"
" Guest period timer interrupt was not triggered within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
irq_iter);
}
static void guest_test_oneshot_timer(uint32_t cpu)
{
uint32_t irq_iter, config_iter;
uint64_t us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
shared_data->nr_iter = 0;
shared_data->guest_stage = 0;
us = msecs_to_usecs(test_args.timer_period_ms) + test_args.timer_err_margin_us;
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
shared_data->xcnt = timer_get_cycles();
/* Setup the next interrupt */
timer_set_next_cmp_ms(test_args.timer_period_ms, false);
/* Setup a timeout for the interrupt to arrive */
udelay(us);
irq_iter = READ_ONCE(shared_data->nr_iter);
__GUEST_ASSERT(config_iter + 1 == irq_iter,
"config_iter + 1 = 0x%x, irq_iter = 0x%x.\n"
" Guest timer interrupt was not triggered within the specified\n"
" interval, try to increase the error margin by [-e] option.\n",
config_iter + 1, irq_iter);
}
}
static void guest_test_emulate_timer(uint32_t cpu)
{
uint32_t config_iter;
uint64_t xcnt_diff_us, us;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
local_irq_disable();
shared_data->nr_iter = 0;
us = msecs_to_usecs(test_args.timer_period_ms);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
shared_data->xcnt = timer_get_cycles();
/* Setup the next interrupt */
timer_set_next_cmp_ms(test_args.timer_period_ms, false);
do_idle();
xcnt_diff_us = cycles_to_usec(timer_get_cycles() - shared_data->xcnt);
__GUEST_ASSERT(xcnt_diff_us >= us,
"xcnt_diff_us = 0x%lx, us = 0x%lx.\n",
xcnt_diff_us, us);
}
local_irq_enable();
}
static void guest_time_count_test(uint32_t cpu)
{
uint32_t config_iter;
unsigned long start, end, prev, us;
/* Assuming that test case starts to run in 1 second */
start = timer_get_cycles();
us = msec_to_cycles(1000);
__GUEST_ASSERT(start <= us,
"start = 0x%lx, us = 0x%lx.\n",
start, us);
us = msec_to_cycles(test_args.timer_period_ms);
for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
start = timer_get_cycles();
end = start + us;
/* test time count growing up always */
while (start < end) {
prev = start;
start = timer_get_cycles();
__GUEST_ASSERT(prev <= start,
"prev = 0x%lx, start = 0x%lx.\n",
prev, start);
}
}
}
static void guest_code(void)
{
uint32_t cpu = guest_get_vcpuid();
/* must run at first */
guest_time_count_test(cpu);
timer_irq_enable();
local_irq_enable();
guest_test_period_timer(cpu);
guest_test_oneshot_timer(cpu);
guest_test_emulate_timer(cpu);
GUEST_DONE();
}
struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, EXCCODE_INT, guest_irq_handler);
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
return vm;
}
void test_vm_cleanup(struct kvm_vm *vm)
{
kvm_vm_free(vm);
}