mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 13:57:54 -04:00
KVM: x86/mmu: Try "unprotect for retry" iff there are indirect SPs
Try to unprotect shadow pages if and only if indirect_shadow_pages is non- zero, i.e. iff there is at least one protected such shadow page. Pre- checking indirect_shadow_pages avoids taking mmu_lock for write when the gfn is write-protected by a third party, i.e. not for KVM shadow paging, and in the *extremely* unlikely case that a different task has already unprotected the last shadow page. Link: https://lore.kernel.org/r/20240831001538.336683-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -2718,6 +2718,17 @@ bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
|
||||
gpa_t gpa = cr2_or_gpa;
|
||||
bool r;
|
||||
|
||||
/*
|
||||
* Bail early if there aren't any write-protected shadow pages to avoid
|
||||
* unnecessarily taking mmu_lock lock, e.g. if the gfn is write-tracked
|
||||
* by a third party. Reading indirect_shadow_pages without holding
|
||||
* mmu_lock is safe, as this is purely an optimization, i.e. a false
|
||||
* positive is benign, and a false negative will simply result in KVM
|
||||
* skipping the unprotect+retry path, which is also an optimization.
|
||||
*/
|
||||
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
|
||||
return false;
|
||||
|
||||
if (!vcpu->arch.mmu->root_role.direct)
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user