mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 09:02:21 -04:00
LoongArch: KVM: Add paravirt preempt feature in hypervisor side
Feature KVM_FEATURE_PREEMPT is added to show whether vCPU is preempted or not. It is to help guest OS scheduling or lock checking etc. Here add KVM_FEATURE_PREEMPT feature and use one byte as preempted flag in the steal time structure. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
@@ -165,6 +165,7 @@ enum emulation_result {
|
||||
|
||||
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
|
||||
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
|
||||
BIT(KVM_FEATURE_PREEMPT) | \
|
||||
BIT(KVM_FEATURE_STEAL_TIME) | \
|
||||
BIT(KVM_FEATURE_USER_HCALL) | \
|
||||
BIT(KVM_FEATURE_VIRT_EXTIOI))
|
||||
@@ -254,6 +255,7 @@ struct kvm_vcpu_arch {
|
||||
u64 guest_addr;
|
||||
u64 last_steal;
|
||||
struct gfn_to_hva_cache cache;
|
||||
u8 preempted;
|
||||
} st;
|
||||
};
|
||||
|
||||
|
||||
@@ -37,8 +37,10 @@ struct kvm_steal_time {
|
||||
__u64 steal;
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u32 pad[12];
|
||||
__u8 preempted;
|
||||
__u8 pad[47];
|
||||
};
|
||||
#define KVM_VCPU_PREEMPTED (1 << 0)
|
||||
|
||||
/*
|
||||
* Hypercall interface for KVM hypervisor
|
||||
|
||||
@@ -105,6 +105,7 @@ struct kvm_fpu {
|
||||
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
|
||||
#define KVM_LOONGARCH_VM_FEAT_PTW 8
|
||||
#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
|
||||
#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10
|
||||
|
||||
/* Device Control API on vcpu fd */
|
||||
#define KVM_LOONGARCH_VCPU_CPUCFG 0
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
|
||||
#define KVM_FEATURE_IPI 1
|
||||
#define KVM_FEATURE_STEAL_TIME 2
|
||||
#define KVM_FEATURE_PREEMPT 3
|
||||
/* BIT 24 - 31 are features configurable by user space vmm */
|
||||
#define KVM_FEATURE_VIRT_EXTIOI 24
|
||||
#define KVM_FEATURE_USER_HCALL 25
|
||||
|
||||
@@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
st = (struct kvm_steal_time __user *)ghc->hva;
|
||||
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
|
||||
unsafe_put_user(0, &st->preempted, out);
|
||||
vcpu->arch.st.preempted = 0;
|
||||
}
|
||||
|
||||
unsafe_get_user(version, &st->version, out);
|
||||
if (version & 1)
|
||||
version += 1; /* first time write, random junk */
|
||||
@@ -1795,11 +1800,57 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gpa_t gpa;
|
||||
struct gfn_to_hva_cache *ghc;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_steal_time __user *st;
|
||||
|
||||
gpa = vcpu->arch.st.guest_addr;
|
||||
if (!(gpa & KVM_STEAL_PHYS_VALID))
|
||||
return;
|
||||
|
||||
/* vCPU may be preempted for many times */
|
||||
if (vcpu->arch.st.preempted)
|
||||
return;
|
||||
|
||||
/* This happens on process exit */
|
||||
if (unlikely(current->mm != vcpu->kvm->mm))
|
||||
return;
|
||||
|
||||
gpa &= KVM_STEAL_PHYS_MASK;
|
||||
ghc = &vcpu->arch.st.cache;
|
||||
slots = kvm_memslots(vcpu->kvm);
|
||||
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
|
||||
ghc->gpa = INVALID_GPA;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
st = (struct kvm_steal_time __user *)ghc->hva;
|
||||
unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
|
||||
vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
||||
out:
|
||||
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu;
|
||||
int cpu, idx;
|
||||
unsigned long flags;
|
||||
|
||||
if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check
|
||||
* the gfn cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
kvm_vcpu_set_pv_preempted(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
vcpu->arch.last_sched_cpu = cpu;
|
||||
|
||||
@@ -52,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm)
|
||||
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
|
||||
kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
|
||||
if (kvm_pvtime_supported()) {
|
||||
kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
|
||||
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
|
||||
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT);
|
||||
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
|
||||
}
|
||||
}
|
||||
@@ -154,6 +156,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
|
||||
case KVM_LOONGARCH_VM_FEAT_MSGINT:
|
||||
case KVM_LOONGARCH_VM_FEAT_PMU:
|
||||
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
|
||||
case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT:
|
||||
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
|
||||
if (kvm_vm_support(&kvm->arch, attr->attr))
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user