KVM: LoongArch: Mark "struct page" pfns accessed only in "slow" page fault path

Mark pages accessed only in the slow path, before dropping mmu_lock when
faulting in guest memory so that LoongArch can convert to
kvm_release_faultin_page() without tripping its lockdep assertion on
mmu_lock being held.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-67-seanjc@google.com>
This commit is contained in:
Sean Christopherson
2024-10-10 11:24:08 -07:00
committed by Paolo Bonzini
parent 0fe133a33e
commit 4a2bc01b7a

View File

@@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
{
int ret = 0;
kvm_pfn_t pfn = 0;
kvm_pte_t *ptep, changed, new;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
struct kvm_memory_slot *slot;
struct page *page;
spin_lock(&kvm->mmu_lock);
@@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
/* Track access to pages marked old */
new = kvm_pte_mkyoung(*ptep);
/* call kvm_set_pfn_accessed() after unlock */
if (write && !kvm_pte_dirty(new)) {
if (!kvm_pte_write(new)) {
ret = -EFAULT;
@@ -595,23 +591,11 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
}
changed = new ^ (*ptep);
if (changed) {
if (changed)
kvm_set_pte(ptep, new);
pfn = kvm_pte_pfn(new);
page = kvm_pfn_to_refcounted_page(pfn);
if (page)
get_page(page);
}
spin_unlock(&kvm->mmu_lock);
if (changed) {
if (kvm_pte_young(changed))
kvm_set_pfn_accessed(pfn);
if (page)
put_page(page);
}
if (kvm_pte_dirty(changed))
mark_page_dirty(kvm, gfn);