mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 11:40:19 -04:00
KVM: x86/mmu: Move the check in FNAME(sync_page) as kvm_sync_page_check()
Prepare to check mmu->sync_page pointer before calling it. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20230216154115.710033-3-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
committed by
Sean Christopherson
parent
753b43c9d1
commit
90e444702a
@@ -1914,10 +1914,51 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
|
||||
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
|
||||
|
||||
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
|
||||
|
||||
/*
|
||||
* Ignore various flags when verifying that it's safe to sync a shadow
|
||||
* page using the current MMU context.
|
||||
*
|
||||
* - level: not part of the overall MMU role and will never match as the MMU's
|
||||
* level tracks the root level
|
||||
* - access: updated based on the new guest PTE
|
||||
* - quadrant: not part of the overall MMU role (similar to level)
|
||||
*/
|
||||
const union kvm_mmu_page_role sync_role_ign = {
|
||||
.level = 0xf,
|
||||
.access = 0x7,
|
||||
.quadrant = 0x3,
|
||||
.passthrough = 0x1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Direct pages can never be unsync, and KVM should never attempt to
|
||||
* sync a shadow page for a different MMU context, e.g. if the role
|
||||
* differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
|
||||
* reserved bits checks will be wrong, etc...
|
||||
*/
|
||||
if (WARN_ON_ONCE(sp->role.direct ||
|
||||
(sp->role.word ^ root_role.word) & ~sync_role_ign.word))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
if (!kvm_sync_page_check(vcpu, sp))
|
||||
return -1;
|
||||
|
||||
return vcpu->arch.mmu->sync_page(vcpu, sp);
|
||||
}
|
||||
|
||||
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct list_head *invalid_list)
|
||||
{
|
||||
int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
|
||||
int ret = __kvm_sync_page(vcpu, sp);
|
||||
|
||||
if (ret < 0)
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
||||
|
||||
@@ -943,38 +943,11 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
|
||||
int i;
|
||||
bool host_writable;
|
||||
gpa_t first_pte_gpa;
|
||||
bool flush = false;
|
||||
|
||||
/*
|
||||
* Ignore various flags when verifying that it's safe to sync a shadow
|
||||
* page using the current MMU context.
|
||||
*
|
||||
* - level: not part of the overall MMU role and will never match as the MMU's
|
||||
* level tracks the root level
|
||||
* - access: updated based on the new guest PTE
|
||||
* - quadrant: not part of the overall MMU role (similar to level)
|
||||
*/
|
||||
const union kvm_mmu_page_role sync_role_ign = {
|
||||
.level = 0xf,
|
||||
.access = 0x7,
|
||||
.quadrant = 0x3,
|
||||
.passthrough = 0x1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Direct pages can never be unsync, and KVM should never attempt to
|
||||
* sync a shadow page for a different MMU context, e.g. if the role
|
||||
* differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
|
||||
* reserved bits checks will be wrong, etc...
|
||||
*/
|
||||
if (WARN_ON_ONCE(sp->role.direct ||
|
||||
(sp->role.word ^ root_role.word) & ~sync_role_ign.word))
|
||||
return -1;
|
||||
|
||||
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
|
||||
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
|
||||
|
||||
Reference in New Issue
Block a user