diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c17eeeea8e64..a6d6d3973a6b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1069,9 +1069,10 @@ void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu) } /* Evaluate instruction intercepts that depend on guest CPUID features. */ -static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, - struct vcpu_svm *svm) +static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); + /* * Intercept INVPCID if shadow paging is enabled to sync/free shadow * roots, or if INVPCID is disabled in the guest to inject #UD. @@ -1090,11 +1091,6 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, else svm_set_intercept(svm, INTERCEPT_RDTSCP); } -} - -static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu) -{ - struct vcpu_svm *svm = to_svm(vcpu); if (guest_cpuid_is_intel_compatible(vcpu)) { svm_set_intercept(svm, INTERCEPT_VMLOAD); @@ -1111,7 +1107,11 @@ static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu) svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; } } +} +static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu) +{ + svm_recalc_instruction_intercepts(vcpu); svm_recalc_msr_intercepts(vcpu); } @@ -1237,8 +1237,6 @@ static void init_vmcb(struct kvm_vcpu *vcpu) svm_clr_intercept(svm, INTERCEPT_PAUSE); } - svm_recalc_instruction_intercepts(vcpu, svm); - if (kvm_vcpu_apicv_active(vcpu)) avic_init_vmcb(svm, vmcb); @@ -4499,8 +4497,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) if (guest_cpuid_is_intel_compatible(vcpu)) guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); - svm_recalc_instruction_intercepts(vcpu, svm); - if (sev_guest(vcpu->kvm)) sev_vcpu_after_set_cpuid(svm);