From c126b46e6fa87eb27e08e2120a732ec988f20eb2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 18:17:52 -0700 Subject: [PATCH 1/3] KVM: x86: Avoid calling kvm_is_mmio_pfn() when kvm_x86_ops.get_mt_mask is NULL Guard the call to kvm_x86_call(get_mt_mask) with an explicit check on kvm_x86_ops.get_mt_mask so as to avoid unnecessarily calling kvm_is_mmio_pfn(), which is moderately expensive for some backing types. E.g. lookup_memtype() conditionally takes a system-wide spinlock if KVM ends up being call pat_pfn_immune_to_uc_mtrr(), e.g. for DAX memory. While the call to kvm_x86_ops.get_mt_mask() itself is elided, the compiler still needs to compute all parameters, as it can't know at build time that the call will be squashed. <+243>: call 0xffffffff812ad880 <+248>: mov %r13,%rsi <+251>: mov %rbx,%rdi <+254>: movzbl %al,%edx <+257>: call 0xffffffff81c26af0 <__SCT__kvm_x86_get_mt_mask> Fixes: 3fee4837ef40 ("KVM: x86: remove shadow_memtype_mask") Tested-by: Pawan Gupta Link: https://lore.kernel.org/r/20250523011756.3243624-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/spte.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index cfce03d8f123..f262c380f40e 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -209,7 +209,9 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (level > PG_LEVEL_4K) spte |= PT_PAGE_SIZE_MASK; - spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); + if (kvm_x86_ops.get_mt_mask) + spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); + if (host_writable) spte |= shadow_host_writable_mask; else From ffe9d7966d0190a7f6db4dcacda0c8a12084ca09 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 18:17:53 -0700 Subject: [PATCH 2/3] KVM: x86/mmu: Locally cache whether a PFN is host MMIO when making a SPTE When making a SPTE, cache whether or not the target PFN is host MMIO in order to avoid multiple rounds of the slow path of kvm_is_mmio_pfn(), e.g. hitting pat_pfn_immune_to_uc_mtrr() in particular can be problematic. KVM currently avoids multiple calls by virtue of the two users being mutually exclusive (.get_mt_mask() is Intel-only, shadow_me_value is AMD-only), but that won't hold true if/when KVM needs to detect host MMIO mappings for other reasons, e.g. for mitigating the MMIO Stale Data vulnerability. No functional change intended. Tested-by: Pawan Gupta Link: https://lore.kernel.org/r/20250523011756.3243624-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/spte.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index f262c380f40e..3f16c91aa042 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) return spte; } -static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) +static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && @@ -125,6 +125,19 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } +static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio) +{ + /* + * Determining if a PFN is host MMIO is relative expensive. Cache the + * result locally (in the sole caller) to avoid doing the full query + * multiple times when creating a single SPTE. + */ + if (*is_host_mmio < 0) + *is_host_mmio = __kvm_is_mmio_pfn(pfn); + + return *is_host_mmio; +} + /* * Returns true if the SPTE needs to be updated atomically due to having bits * that may be changed without holding mmu_lock, and for which KVM must not @@ -162,6 +175,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, { int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; + int is_host_mmio = -1; bool wrprot = false; /* @@ -210,14 +224,14 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= PT_PAGE_SIZE_MASK; if (kvm_x86_ops.get_mt_mask) - spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); - + spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, + kvm_is_mmio_pfn(pfn, &is_host_mmio)); if (host_writable) spte |= shadow_host_writable_mask; else pte_access &= ~ACC_WRITE_MASK; - if (shadow_me_value && !kvm_is_mmio_pfn(pfn)) + if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio)) spte |= shadow_me_value; spte |= (u64)pfn << PAGE_SHIFT; From 83ebe715748314331f9639de2220d02debfe926d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 18:17:54 -0700 Subject: [PATCH 3/3] KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest Enforce the MMIO State Data mitigation if KVM has ever mapped host MMIO into the VM, not if the VM has an assigned device. VFIO is but one of many ways to map host MMIO into a KVM guest, and even within VFIO, formally attaching a device to a VM via KVM_DEV_VFIO_FILE_ADD is entirely optional. Track whether or not the guest can access host MMIO on a per-MMU basis, i.e. based on whether or not the vCPU has a mapping to host MMIO. For simplicity, track MMIO mappings in "special" rools (those without a kvm_mmu_page) at the VM level, as only Intel CPUs are vulnerable, and so only legacy 32-bit shadow paging is affected, i.e. lack of precise tracking is a complete non-issue. Make the per-MMU and per-VM flags sticky. Detecting when *all* MMIO mappings have been removed would be absurdly complex. And in practice, removing MMIO from a guest will be done by deleting the associated memslot, which by default will force KVM to re-allocate all roots. Special roots will forever be mitigated, but as above, the affected scenarios are not expected to be performance sensitive. Use a VMX_RUN flag to communicate the need for a buffers flush to vmx_vcpu_enter_exit() so that kvm_vcpu_can_access_host_mmio() and all its dependencies don't need to be marked __always_inline, e.g. so that KASAN doesn't trigger a noinstr violation. Cc: Pawan Gupta Cc: Borislav Petkov Fixes: 8cb861e9e3c9 ("x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data") Tested-by: Pawan Gupta Link: https://lore.kernel.org/r/20250523011756.3243624-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu_internal.h | 3 +++ arch/x86/kvm/mmu/spte.c | 21 +++++++++++++++++++++ arch/x86/kvm/mmu/spte.h | 10 ++++++++++ arch/x86/kvm/vmx/run_flags.h | 10 ++++++---- arch/x86/kvm/vmx/vmx.c | 8 +++++++- 6 files changed, 48 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4a391929cdb..6a172c7630f3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1451,6 +1451,7 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + bool has_mapped_host_mmio; bool guest_can_read_msr_platform_info; bool exception_payload_enabled; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index db8f33e4de62..65f3c89d7c5d 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -103,6 +103,9 @@ struct kvm_mmu_page { int root_count; refcount_t tdp_mmu_root_count; }; + + bool has_mapped_host_mmio; + union { /* These two members aren't used for TDP MMU */ struct { diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 3f16c91aa042..df31039b5d63 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -138,6 +138,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio) return *is_host_mmio; } +static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa); + + if (root) + WRITE_ONCE(root->has_mapped_host_mmio, true); + else + WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true); + + /* + * Force vCPUs to exit and flush CPU buffers if the vCPU is using the + * affected root(s). + */ + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE); +} + /* * Returns true if the SPTE needs to be updated atomically due to having bits * that may be changed without holding mmu_lock, and for which KVM must not @@ -276,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } + if (static_branch_unlikely(&cpu_buf_vm_clear) && + !kvm_vcpu_can_access_host_mmio(vcpu) && + kvm_is_mmio_pfn(pfn, &is_host_mmio)) + kvm_track_host_mmio_mapping(vcpu); + *new_spte = spte; return wrprot; } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1e94f081bdaf..3133f066927e 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep) return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep))); } +static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu) +{ + struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa); + + if (root) + return READ_ONCE(root->has_mapped_host_mmio); + + return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio); +} + static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) { return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value && diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h index 6a9bfdfbb6e5..2f20fb170def 100644 --- a/arch/x86/kvm/vmx/run_flags.h +++ b/arch/x86/kvm/vmx/run_flags.h @@ -2,10 +2,12 @@ #ifndef __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H -#define VMX_RUN_VMRESUME_SHIFT 0 -#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 +#define VMX_RUN_VMRESUME_SHIFT 0 +#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2 -#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) -#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) +#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) +#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT) #endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4953846cb30d..3025b11007fd 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -75,6 +75,8 @@ #include "vmx_onhyperv.h" #include "posted_intr.h" +#include "mmu/spte.h" + MODULE_AUTHOR("Qumranet"); MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions"); MODULE_LICENSE("GPL"); @@ -963,6 +965,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; + if (static_branch_unlikely(&cpu_buf_vm_clear) && + kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) + flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; + return flags; } @@ -7290,7 +7296,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&cpu_buf_vm_clear) && - kvm_arch_has_assigned_device(vcpu->kvm)) + (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) mds_clear_cpu_buffers(); vmx_disable_fb_clear(vmx);