mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 14:09:38 -04:00
KVM: x86/mmu: Add a dedicated flag to track if A/D bits are globally enabled
Add a dedicated flag to track if KVM has enabled A/D bits at the module level, instead of inferring the state based on whether or not the MMU's shadow_accessed_mask is non-zero. This will allow defining and using shadow_accessed_mask even when A/D bits aren't used by hardware. Link: https://lore.kernel.org/r/20241011021051.1557902-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -3357,7 +3357,7 @@ static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault
|
||||
* by setting the Writable bit, which can be done out of mmu_lock.
|
||||
*/
|
||||
if (!fault->present)
|
||||
return !kvm_ad_enabled();
|
||||
return !kvm_ad_enabled;
|
||||
|
||||
/*
|
||||
* Note, instruction fetches and writes are mutually exclusive, ignore
|
||||
@@ -3492,7 +3492,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
* uses A/D bits for non-nested MMUs. Thus, if A/D bits are
|
||||
* enabled, the SPTE can't be an access-tracked SPTE.
|
||||
*/
|
||||
if (unlikely(!kvm_ad_enabled()) && is_access_track_spte(spte))
|
||||
if (unlikely(!kvm_ad_enabled) && is_access_track_spte(spte))
|
||||
new_spte = restore_acc_track_spte(new_spte);
|
||||
|
||||
/*
|
||||
@@ -5469,7 +5469,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
|
||||
role.efer_nx = true;
|
||||
role.smm = cpu_role.base.smm;
|
||||
role.guest_mode = cpu_role.base.guest_mode;
|
||||
role.ad_disabled = !kvm_ad_enabled();
|
||||
role.ad_disabled = !kvm_ad_enabled;
|
||||
role.level = kvm_mmu_get_tdp_level(vcpu);
|
||||
role.direct = true;
|
||||
role.has_4_byte_gpte = false;
|
||||
|
||||
@@ -24,6 +24,8 @@ static bool __ro_after_init allow_mmio_caching;
|
||||
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
|
||||
EXPORT_SYMBOL_GPL(enable_mmio_caching);
|
||||
|
||||
bool __read_mostly kvm_ad_enabled;
|
||||
|
||||
u64 __read_mostly shadow_host_writable_mask;
|
||||
u64 __read_mostly shadow_mmu_writable_mask;
|
||||
u64 __read_mostly shadow_nx_mask;
|
||||
@@ -414,6 +416,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
|
||||
|
||||
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
|
||||
{
|
||||
kvm_ad_enabled = has_ad_bits;
|
||||
|
||||
shadow_user_mask = VMX_EPT_READABLE_MASK;
|
||||
shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
|
||||
shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
|
||||
@@ -447,6 +451,8 @@ void kvm_mmu_reset_all_pte_masks(void)
|
||||
u8 low_phys_bits;
|
||||
u64 mask;
|
||||
|
||||
kvm_ad_enabled = true;
|
||||
|
||||
/*
|
||||
* If the CPU has 46 or less physical address bits, then set an
|
||||
* appropriate mask to guard against L1TF attacks. Otherwise, it is
|
||||
|
||||
@@ -167,6 +167,15 @@ static_assert(!(SHADOW_NONPRESENT_VALUE & SPTE_MMU_PRESENT_MASK));
|
||||
#define SHADOW_NONPRESENT_VALUE 0ULL
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* True if A/D bits are supported in hardware and are enabled by KVM. When
|
||||
* enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can disable
|
||||
* A/D bits in EPTP12, SP and SPTE variants are needed to handle the scenario
|
||||
* where KVM is using A/D bits for L1, but not L2.
|
||||
*/
|
||||
extern bool __read_mostly kvm_ad_enabled;
|
||||
|
||||
extern u64 __read_mostly shadow_host_writable_mask;
|
||||
extern u64 __read_mostly shadow_mmu_writable_mask;
|
||||
extern u64 __read_mostly shadow_nx_mask;
|
||||
@@ -285,17 +294,6 @@ static inline bool is_ept_ve_possible(u64 spte)
|
||||
(spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
|
||||
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
|
||||
* disable A/D bits in EPTP12, SP and SPTE variants are needed to handle the
|
||||
* scenario where KVM is using A/D bits for L1, but not L2.
|
||||
*/
|
||||
static inline bool kvm_ad_enabled(void)
|
||||
{
|
||||
return !!shadow_accessed_mask;
|
||||
}
|
||||
|
||||
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
|
||||
{
|
||||
return sp->role.ad_disabled;
|
||||
|
||||
@@ -1075,7 +1075,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
|
||||
static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
|
||||
struct kvm_mmu_page *sp, bool shared)
|
||||
{
|
||||
u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
|
||||
u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled);
|
||||
int ret = 0;
|
||||
|
||||
if (shared) {
|
||||
@@ -1491,7 +1491,7 @@ static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
|
||||
* from level, so it is valid to key off any shadow page to determine if
|
||||
* write protection is needed for an entire tree.
|
||||
*/
|
||||
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
|
||||
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled;
|
||||
}
|
||||
|
||||
static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
||||
Reference in New Issue
Block a user