mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-08 00:29:36 -04:00
KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu
The init_kvm_*mmu functions, with the exception of shadow NPT, do not need to know the full values of CR0/CR4/EFER; they only need to know the bits that make up the "role". This cleanup however will take quite a few incremental steps. As a start, pull the common computation of the struct kvm_mmu_role_regs into their caller: all of them extract the struct from the vcpu as the very first step. Reviewed-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -4821,12 +4821,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
|
||||
return role;
|
||||
}
|
||||
|
||||
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
||||
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_mmu_role_regs *regs)
|
||||
{
|
||||
struct kvm_mmu *context = &vcpu->arch.root_mmu;
|
||||
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
|
||||
union kvm_mmu_role new_role =
|
||||
kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, false);
|
||||
kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
|
||||
|
||||
if (new_role.as_u64 == context->mmu_role.as_u64)
|
||||
return;
|
||||
@@ -4840,7 +4840,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
||||
context->get_guest_pgd = get_cr3;
|
||||
context->get_pdptr = kvm_pdptr_read;
|
||||
context->inject_page_fault = kvm_inject_page_fault;
|
||||
context->root_level = role_regs_to_root_level(®s);
|
||||
context->root_level = role_regs_to_root_level(regs);
|
||||
|
||||
if (!is_cr0_pg(context))
|
||||
context->gva_to_gpa = nonpaging_gva_to_gpa;
|
||||
@@ -5009,12 +5009,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
|
||||
|
||||
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
|
||||
static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_mmu_role_regs *regs)
|
||||
{
|
||||
struct kvm_mmu *context = &vcpu->arch.root_mmu;
|
||||
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
|
||||
|
||||
kvm_init_shadow_mmu(vcpu, ®s);
|
||||
kvm_init_shadow_mmu(vcpu, regs);
|
||||
|
||||
context->get_guest_pgd = get_cr3;
|
||||
context->get_pdptr = kvm_pdptr_read;
|
||||
@@ -5038,10 +5038,10 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
|
||||
return role;
|
||||
}
|
||||
|
||||
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_mmu_role_regs *regs)
|
||||
{
|
||||
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
|
||||
union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, ®s);
|
||||
union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs);
|
||||
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
|
||||
|
||||
if (new_role.as_u64 == g_context->mmu_role.as_u64)
|
||||
@@ -5081,12 +5081,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_init_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
|
||||
|
||||
if (mmu_is_nested(vcpu))
|
||||
init_kvm_nested_mmu(vcpu);
|
||||
init_kvm_nested_mmu(vcpu, ®s);
|
||||
else if (tdp_enabled)
|
||||
init_kvm_tdp_mmu(vcpu);
|
||||
init_kvm_tdp_mmu(vcpu, ®s);
|
||||
else
|
||||
init_kvm_softmmu(vcpu);
|
||||
init_kvm_softmmu(vcpu, ®s);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_init_mmu);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user