mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-22 02:49:02 -04:00
KVM: x86/mmu: Detect write #PF to shadow pages during FNAME(fetch) walk
Move the detection of write #PF to shadow pages, i.e. a fault on a write to a page table that is being shadowed by KVM that is used to translate the write itself, from FNAME(is_self_change_mapping) to FNAME(fetch). There is no need to detect the self-referential write before kvm_faultin_pfn() as KVM does not consume EMULTYPE_WRITE_PF_TO_SP for accesses that resolve to "error or no-slot" pfns, i.e. KVM doesn't allow retrying MMIO accesses or writes to read-only memslots. Detecting the EMULTYPE_WRITE_PF_TO_SP scenario in FNAME(fetch) will allow dropping FNAME(is_self_change_mapping) entirely, as the hugepage interaction can be deferred to kvm_mmu_hugepage_adjust(). Cc: Huang Hang <hhuang@linux.alibaba.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20221213125538.81209-1-jiangshanlai@gmail.com [sean: split to separate patch, write changelog] Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20230202182817.407394-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
258d985f6e
commit
39fda5d873
@@ -685,6 +685,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
|
||||
if (sp != ERR_PTR(-EEXIST))
|
||||
link_shadow_page(vcpu, it.sptep, sp);
|
||||
|
||||
if (fault->write && table_gfn == fault->gfn)
|
||||
fault->write_fault_to_shadow_pgtable = true;
|
||||
}
|
||||
|
||||
kvm_mmu_hugepage_adjust(vcpu, fault);
|
||||
@@ -741,17 +744,13 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
* created when kvm establishes shadow page table that stop kvm using large
|
||||
* page size. Do it early can avoid unnecessary #PF and emulation.
|
||||
*
|
||||
* @write_fault_to_shadow_pgtable will return true if the fault gfn is
|
||||
* currently used as its page table.
|
||||
*
|
||||
* Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
|
||||
* since the PDPT is always shadowed, that means, we can not use large page
|
||||
* size to map the gfn which is used as PDPT.
|
||||
*/
|
||||
static bool
|
||||
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
struct guest_walker *walker, bool user_fault,
|
||||
bool *write_fault_to_shadow_pgtable)
|
||||
struct guest_walker *walker, bool user_fault)
|
||||
{
|
||||
int level;
|
||||
gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
|
||||
@@ -765,7 +764,6 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
|
||||
|
||||
self_changed |= !(gfn & mask);
|
||||
*write_fault_to_shadow_pgtable |= !gfn;
|
||||
}
|
||||
|
||||
return self_changed;
|
||||
@@ -826,7 +824,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
return r;
|
||||
|
||||
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
|
||||
&walker, fault->user, &fault->write_fault_to_shadow_pgtable);
|
||||
&walker, fault->user);
|
||||
|
||||
if (is_self_change_mapping)
|
||||
fault->max_level = PG_LEVEL_4K;
|
||||
|
||||
Reference in New Issue
Block a user