KVM: x86/mmu: Add common helper to handle prefetching SPTEs

Deduplicate the prefetching code for indirect and direct MMUs.  The core
logic is the same, the only difference is that indirect MMUs need to
prefetch SPTEs one-at-a-time, as contiguous guest virtual addresses aren't
guaranteed to yield contiguous guest physical addresses.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-40-seanjc@google.com>
This commit is contained in:
Sean Christopherson
2024-10-10 11:23:41 -07:00
committed by Paolo Bonzini
parent 64d5cd99f7
commit fa8fe58d1e
2 changed files with 26 additions and 27 deletions

View File

@@ -2950,32 +2950,41 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
return ret;
}
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
static bool kvm_mmu_prefetch_sptes(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *sptep,
int nr_pages, unsigned int access)
{
struct page *pages[PTE_PREFETCH_NUM];
struct kvm_memory_slot *slot;
unsigned int access = sp->role.access;
int i, ret;
gfn_t gfn;
int i;
if (WARN_ON_ONCE(nr_pages > PTE_PREFETCH_NUM))
return false;
gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
if (!slot)
return -1;
return false;
ret = kvm_prefetch_pages(slot, gfn, pages, end - start);
if (ret <= 0)
return -1;
nr_pages = kvm_prefetch_pages(slot, gfn, pages, nr_pages);
if (nr_pages <= 0)
return false;
for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, slot, start, access, gfn,
for (i = 0; i < nr_pages; i++, gfn++, sptep++) {
mmu_set_spte(vcpu, slot, sptep, access, gfn,
page_to_pfn(pages[i]), NULL);
kvm_release_page_clean(pages[i]);
}
return 0;
return true;
}
static bool direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
{
gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
unsigned int access = sp->role.access;
return kvm_mmu_prefetch_sptes(vcpu, gfn, start, end - start, access);
}
static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
@@ -2993,8 +3002,9 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
if (is_shadow_present_pte(*spte) || spte == sptep) {
if (!start)
continue;
if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
if (!direct_pte_prefetch_many(vcpu, sp, start, spte))
return;
start = NULL;
} else if (!start)
start = spte;

View File

@@ -533,9 +533,7 @@ static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, pt_element_t gpte)
{
struct kvm_memory_slot *slot;
unsigned pte_access;
struct page *page;
gfn_t gfn;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
@@ -545,16 +543,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK);
if (!slot)
return false;
if (kvm_prefetch_pages(slot, gfn, &page, 1) != 1)
return false;
mmu_set_spte(vcpu, slot, spte, pte_access, gfn, page_to_pfn(page), NULL);
kvm_release_page_clean(page);
return true;
return kvm_mmu_prefetch_sptes(vcpu, gfn, spte, 1, pte_access);
}
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,