mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 04:29:11 -04:00
KVM: x86/mmu: Don't force flush if SPTE update clears Accessed bit
Don't force a TLB flush if mmu_spte_update() clears the Accessed bit, as access tracking tolerates false negatives, as evidenced by the mmu_notifier hooks that explicitly test and age SPTEs without doing a TLB flush. In practice, this is very nearly a nop. spte_write_protect() and spte_clear_dirty() never clear the Accessed bit. make_spte() always sets the Accessed bit for !prefetch scenarios. FNAME(sync_spte) only sets SPTE if the protection bits are changing, i.e. if a flush will be needed regardless of the Accessed bits. And FNAME(pte_prefetch) sets SPTE if and only if the old SPTE is !PRESENT. That leaves kvm_arch_async_page_ready() as the one path that will generate a !ACCESSED SPTE *and* overwrite a PRESENT SPTE. And that's very arguably a bug, as clobbering a valid SPTE in that case is nonsensical. Tested-by: Alex Bennée <alex.bennee@linaro.org> Link: https://lore.kernel.org/r/20241011021051.1557902-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -521,36 +521,24 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
|
||||
* not whether or not SPTEs were modified, i.e. only the write-tracking case
|
||||
* needs to flush at the time the SPTEs is modified, before dropping mmu_lock.
|
||||
*
|
||||
* Remote TLBs also need to be flushed if the Dirty bit is cleared, as false
|
||||
* negatives are not acceptable, e.g. if KVM is using D-bit based PML on VMX.
|
||||
*
|
||||
* Don't flush if the Accessed bit is cleared, as access tracking tolerates
|
||||
* false negatives, and the one path that does care about TLB flushes,
|
||||
* kvm_mmu_notifier_clear_flush_young(), uses mmu_spte_update_no_track().
|
||||
*
|
||||
* Returns true if the TLB needs to be flushed
|
||||
*/
|
||||
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
|
||||
{
|
||||
bool flush = false;
|
||||
u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
|
||||
|
||||
if (!is_shadow_present_pte(old_spte))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* For the spte updated out of mmu-lock is safe, since
|
||||
* we always atomically update it, see the comments in
|
||||
* spte_has_volatile_bits().
|
||||
*/
|
||||
if (is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte))
|
||||
flush = true;
|
||||
|
||||
/*
|
||||
* Flush TLB when accessed/dirty states are changed in the page tables,
|
||||
* to guarantee consistency between TLB and page tables.
|
||||
*/
|
||||
|
||||
if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte))
|
||||
flush = true;
|
||||
|
||||
if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte))
|
||||
flush = true;
|
||||
|
||||
return flush;
|
||||
return (is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte)) ||
|
||||
(is_dirty_spte(old_spte) && !is_dirty_spte(new_spte));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user