LoongArch: KVM: Get VM PMU capability from HW GCFG register

Now VM PMU capability comes from host PMU capability directly, instead
bit 23 of HW GCFG CSR register also show PMU capability for VM. It will
be better if it comes from HW GCFG CSR register rather than just host
PMU capability, especially when LVZ feature is emulated in TCG mode, in
which case without PMU capability.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Bibo Mao
2025-11-27 11:00:18 +08:00
committed by Huacai Chen
parent ac3fd01e4c
commit 74087611f0
3 changed files with 35 additions and 15 deletions

View File

@@ -126,6 +126,8 @@ struct kvm_arch {
struct kvm_phyid_map *phyid_map;
/* Enabled PV features */
unsigned long pv_features;
/* Supported KVM features */
unsigned long kvm_features;
s64 time_offset;
struct kvm_context __percpu *vmcs;
@@ -293,6 +295,12 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
}
/* Check whether KVM support this feature (VMM may disable it) */
static inline bool kvm_vm_support(struct kvm_arch *arch, int feature)
{
return !!(arch->kvm_features & BIT_ULL(feature));
}
bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
/* Debug: dump vcpu state */

View File

@@ -511,6 +511,8 @@
#define CSR_GCFG_GPERF_SHIFT 24
#define CSR_GCFG_GPERF_WIDTH 3
#define CSR_GCFG_GPERF (_ULCAST_(0x7) << CSR_GCFG_GPERF_SHIFT)
#define CSR_GCFG_GPMP_SHIFT 23
#define CSR_GCFG_GPMP (_ULCAST_(0x1) << CSR_GCFG_GPMP_SHIFT)
#define CSR_GCFG_GCI_SHIFT 20
#define CSR_GCFG_GCI_WIDTH 2
#define CSR_GCFG_GCI (_ULCAST_(0x3) << CSR_GCFG_GCI_SHIFT)

View File

@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_vcpu.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_eiointc.h>
#include <asm/kvm_pch_pic.h>
@@ -24,6 +25,23 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
static void kvm_vm_init_features(struct kvm *kvm)
{
unsigned long val;
val = read_csr_gcfg();
if (val & CSR_GCFG_GPMP)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
/* Enable all PV features by default */
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
if (kvm_pvtime_supported()) {
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
}
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int i;
@@ -42,11 +60,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.phyid_map_lock);
kvm_init_vmcs(kvm);
/* Enable all PV features by default */
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
if (kvm_pvtime_supported())
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
kvm_vm_init_features(kvm);
/*
* cpu_vabits means user address space only (a half of total).
@@ -136,20 +150,16 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
if (cpu_has_lbt_mips)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
if (cpu_has_pmp)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
return 0;
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_pvtime_supported())
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PTW:
if (cpu_has_ptw)
return 0;
return -ENXIO;
case KVM_LOONGARCH_VM_FEAT_PMU:
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
if (kvm_vm_support(&kvm->arch, attr->attr))
return 0;
return -ENXIO;
default:
return -ENXIO;
}