mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-14 11:11:22 -04:00
KVM: x86: Rename msr_filter_changed() => recalc_msr_intercepts()
Rename msr_filter_changed() to recalc_msr_intercepts() and drop the trampoline wrapper now that both SVM and VMX use a filter-agnostic recalc helper to react to the new userspace filter. No functional change intended. Reviewed-by: Xin Li (Intel) <xin@zytor.com> Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com> Link: https://lore.kernel.org/r/20250610225737.156318-21-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -138,7 +138,7 @@ KVM_X86_OP(check_emulate_instruction)
|
||||
KVM_X86_OP(apic_init_signal_blocked)
|
||||
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
|
||||
KVM_X86_OP_OPTIONAL(migrate_timers)
|
||||
KVM_X86_OP(msr_filter_changed)
|
||||
KVM_X86_OP(recalc_msr_intercepts)
|
||||
KVM_X86_OP(complete_emulated_msr)
|
||||
KVM_X86_OP(vcpu_deliver_sipi_vector)
|
||||
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
|
||||
|
||||
@@ -1897,7 +1897,7 @@ struct kvm_x86_ops {
|
||||
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*migrate_timers)(struct kvm_vcpu *vcpu);
|
||||
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
|
||||
void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
|
||||
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
|
||||
|
||||
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
|
||||
|
||||
@@ -890,11 +890,6 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
}
|
||||
|
||||
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
svm_recalc_msr_intercepts(vcpu);
|
||||
}
|
||||
|
||||
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
|
||||
{
|
||||
to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
|
||||
@@ -923,7 +918,6 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
|
||||
|
||||
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
|
||||
svm_recalc_lbr_msr_intercepts(vcpu);
|
||||
|
||||
@@ -5216,7 +5210,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
|
||||
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
|
||||
|
||||
.msr_filter_changed = svm_msr_filter_changed,
|
||||
.recalc_msr_intercepts = svm_recalc_msr_intercepts,
|
||||
.complete_emulated_msr = svm_complete_emulated_msr,
|
||||
|
||||
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
|
||||
|
||||
@@ -220,7 +220,7 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
return vmx_get_msr(vcpu, msr_info);
|
||||
}
|
||||
|
||||
static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
|
||||
static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* TDX doesn't allow VMM to configure interception of MSR accesses.
|
||||
@@ -231,7 +231,7 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
|
||||
if (is_td_vcpu(vcpu))
|
||||
return;
|
||||
|
||||
vmx_msr_filter_changed(vcpu);
|
||||
vmx_recalc_msr_intercepts(vcpu);
|
||||
}
|
||||
|
||||
static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
|
||||
@@ -1027,7 +1027,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
|
||||
.apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
|
||||
.migrate_timers = vmx_migrate_timers,
|
||||
|
||||
.msr_filter_changed = vt_op(msr_filter_changed),
|
||||
.recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
|
||||
.complete_emulated_msr = vt_op(complete_emulated_msr),
|
||||
|
||||
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
|
||||
|
||||
@@ -4085,7 +4085,7 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpu_has_vmx_msr_bitmap())
|
||||
return;
|
||||
@@ -4134,11 +4134,6 @@ static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
}
|
||||
|
||||
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vmx_recalc_msr_intercepts(vcpu);
|
||||
}
|
||||
|
||||
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
|
||||
int vector)
|
||||
{
|
||||
|
||||
@@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector);
|
||||
void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
|
||||
bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
|
||||
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu);
|
||||
void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
|
||||
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
|
||||
int vmx_get_feature_msr(u32 msr, u64 *data);
|
||||
|
||||
@@ -10927,8 +10927,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
kvm_vcpu_update_apicv(vcpu);
|
||||
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
|
||||
kvm_check_async_pf_completion(vcpu);
|
||||
|
||||
/*
|
||||
* Recalc MSR intercepts as userspace may want to intercept
|
||||
* accesses to MSRs that KVM would otherwise pass through to
|
||||
* the guest.
|
||||
*/
|
||||
if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
|
||||
kvm_x86_call(msr_filter_changed)(vcpu);
|
||||
kvm_x86_call(recalc_msr_intercepts)(vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
|
||||
kvm_x86_call(update_cpu_dirty_logging)(vcpu);
|
||||
|
||||
Reference in New Issue
Block a user