mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-19 06:38:26 -05:00
KVM: x86: Introduce kvm_x86_call() to simplify static calls of kvm_x86_ops
Introduces kvm_x86_call(), to streamline the usage of static calls of kvm_x86_ops. The current implementation of these calls is verbose and could lead to alignment challenges. This makes the code susceptible to exceeding the "80 columns per single line of code" limit as defined in the coding-style document. Another issue with the existing implementation is that the addition of kvm_x86_ prefix to hooks at the static_call sites hinders code readability and navigation. kvm_x86_call() is added to improve code readability and maintainability, while adhering to the coding style guidelines. Signed-off-by: Wei Wang <wei.w.wang@intel.com> Link: https://lore.kernel.org/r/20240507133103.15052-3-wei.w.wang@intel.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -1874,6 +1874,8 @@ extern bool __read_mostly allow_smaller_maxphyaddr;
|
||||
extern bool __read_mostly enable_apicv;
|
||||
extern struct kvm_x86_ops kvm_x86_ops;
|
||||
|
||||
#define kvm_x86_call(func) static_call(kvm_x86_##func)
|
||||
|
||||
#define KVM_X86_OP(func) \
|
||||
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
|
||||
#define KVM_X86_OP_OPTIONAL KVM_X86_OP
|
||||
@@ -1897,7 +1899,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
|
||||
static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
|
||||
{
|
||||
if (kvm_x86_ops.flush_remote_tlbs &&
|
||||
!static_call(kvm_x86_flush_remote_tlbs)(kvm))
|
||||
!kvm_x86_call(flush_remote_tlbs)(kvm))
|
||||
return 0;
|
||||
else
|
||||
return -ENOTSUPP;
|
||||
@@ -1910,7 +1912,7 @@ static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
|
||||
if (!kvm_x86_ops.flush_remote_tlbs_range)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
|
||||
return kvm_x86_call(flush_remote_tlbs_range)(kvm, gfn, nr_pages);
|
||||
}
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
@@ -2309,12 +2311,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
|
||||
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
static_call(kvm_x86_vcpu_blocking)(vcpu);
|
||||
kvm_x86_call(vcpu_blocking)(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
static_call(kvm_x86_vcpu_unblocking)(vcpu);
|
||||
kvm_x86_call(vcpu_unblocking)(vcpu);
|
||||
}
|
||||
|
||||
static inline int kvm_cpu_get_apicid(int mps_cpu)
|
||||
|
||||
@@ -400,7 +400,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.cpuid_nent));
|
||||
|
||||
/* Invoke the vendor callback only after the above state is updated. */
|
||||
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
|
||||
kvm_x86_call(vcpu_after_set_cpuid)(vcpu);
|
||||
|
||||
/*
|
||||
* Except for the MMU, which needs to do its thing any vendor specific
|
||||
|
||||
@@ -1417,7 +1417,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
||||
}
|
||||
|
||||
/* vmcall/vmmcall */
|
||||
static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
|
||||
kvm_x86_call(patch_hypercall)(vcpu, instructions + i);
|
||||
i += 3;
|
||||
|
||||
/* ret */
|
||||
@@ -1986,7 +1986,7 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
gva = entries[i] & PAGE_MASK;
|
||||
for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
|
||||
static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
|
||||
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
|
||||
|
||||
++vcpu->stat.tlb_flush;
|
||||
}
|
||||
@@ -2527,7 +2527,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
* hypercall generates UD from non zero cpl and real mode
|
||||
* per HYPER-V spec
|
||||
*/
|
||||
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
if (kvm_x86_call(get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__kvm_migrate_apic_timer(vcpu);
|
||||
__kvm_migrate_pit_timer(vcpu);
|
||||
static_call(kvm_x86_migrate_timers)(vcpu);
|
||||
kvm_x86_call(migrate_timers)(vcpu);
|
||||
}
|
||||
|
||||
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
|
||||
|
||||
@@ -98,7 +98,7 @@ static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg
|
||||
return 0;
|
||||
|
||||
if (!kvm_register_is_available(vcpu, reg))
|
||||
static_call(kvm_x86_cache_reg)(vcpu, reg);
|
||||
kvm_x86_call(cache_reg)(vcpu, reg);
|
||||
|
||||
return vcpu->arch.regs[reg];
|
||||
}
|
||||
@@ -138,7 +138,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
|
||||
might_sleep(); /* on svm */
|
||||
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
|
||||
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
|
||||
|
||||
return vcpu->arch.walk_mmu->pdptrs[index];
|
||||
}
|
||||
@@ -153,7 +153,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
|
||||
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
|
||||
return vcpu->arch.cr0 & mask;
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
|
||||
if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
|
||||
!kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
|
||||
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
|
||||
return vcpu->arch.cr4 & mask;
|
||||
}
|
||||
|
||||
@@ -190,7 +190,7 @@ static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
|
||||
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
|
||||
static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
|
||||
kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
|
||||
return vcpu->arch.cr3;
|
||||
}
|
||||
|
||||
|
||||
@@ -738,8 +738,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
||||
if (unlikely(apic->apicv_active)) {
|
||||
/* need to update RVI */
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
static_call(kvm_x86_hwapic_irr_update)(apic->vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
kvm_x86_call(hwapic_irr_update)(apic->vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
} else {
|
||||
apic->irr_pending = false;
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
@@ -765,7 +765,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
|
||||
* just set SVI.
|
||||
*/
|
||||
if (unlikely(apic->apicv_active))
|
||||
static_call(kvm_x86_hwapic_isr_update)(vec);
|
||||
kvm_x86_call(hwapic_isr_update)(vec);
|
||||
else {
|
||||
++apic->isr_count;
|
||||
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
|
||||
@@ -810,7 +810,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
|
||||
* and must be left alone.
|
||||
*/
|
||||
if (unlikely(apic->apicv_active))
|
||||
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||
kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||
else {
|
||||
--apic->isr_count;
|
||||
BUG_ON(apic->isr_count < 0);
|
||||
@@ -946,7 +946,7 @@ static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
|
||||
{
|
||||
int highest_irr;
|
||||
if (kvm_x86_ops.sync_pir_to_irr)
|
||||
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
|
||||
highest_irr = kvm_x86_call(sync_pir_to_irr)(apic->vcpu);
|
||||
else
|
||||
highest_irr = apic_find_highest_irr(apic);
|
||||
if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
|
||||
@@ -1338,8 +1338,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
|
||||
apic->regs + APIC_TMR);
|
||||
}
|
||||
|
||||
static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
|
||||
trig_mode, vector);
|
||||
kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
|
||||
trig_mode, vector);
|
||||
break;
|
||||
|
||||
case APIC_DM_REMRD:
|
||||
@@ -2105,7 +2105,7 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
|
||||
{
|
||||
WARN_ON(preemptible());
|
||||
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
|
||||
static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
|
||||
kvm_x86_call(cancel_hv_timer)(apic->vcpu);
|
||||
apic->lapic_timer.hv_timer_in_use = false;
|
||||
}
|
||||
|
||||
@@ -2122,7 +2122,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
|
||||
if (!ktimer->tscdeadline)
|
||||
return false;
|
||||
|
||||
if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
|
||||
if (kvm_x86_call(set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
|
||||
return false;
|
||||
|
||||
ktimer->hv_timer_in_use = true;
|
||||
@@ -2577,7 +2577,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
|
||||
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
|
||||
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
||||
static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
|
||||
kvm_x86_call(set_virtual_apic_mode)(vcpu);
|
||||
}
|
||||
|
||||
apic->base_address = apic->vcpu->arch.apic_base &
|
||||
@@ -2687,7 +2687,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
u64 msr_val;
|
||||
int i;
|
||||
|
||||
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
|
||||
kvm_x86_call(apicv_pre_state_restore)(vcpu);
|
||||
|
||||
if (!init_event) {
|
||||
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
@@ -2742,9 +2742,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
vcpu->arch.pv_eoi.msr_val = 0;
|
||||
apic_update_ppr(apic);
|
||||
if (apic->apicv_active) {
|
||||
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||
static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
|
||||
static_call(kvm_x86_hwapic_isr_update)(-1);
|
||||
kvm_x86_call(apicv_post_state_restore)(vcpu);
|
||||
kvm_x86_call(hwapic_irr_update)(vcpu, -1);
|
||||
kvm_x86_call(hwapic_isr_update)(-1);
|
||||
}
|
||||
|
||||
vcpu->arch.apic_arb_prio = 0;
|
||||
@@ -2840,7 +2840,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.apic = apic;
|
||||
|
||||
if (kvm_x86_ops.alloc_apic_backing_page)
|
||||
apic->regs = static_call(kvm_x86_alloc_apic_backing_page)(vcpu);
|
||||
apic->regs = kvm_x86_call(alloc_apic_backing_page)(vcpu);
|
||||
else
|
||||
apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!apic->regs) {
|
||||
@@ -3019,7 +3019,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
int r;
|
||||
|
||||
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
|
||||
kvm_x86_call(apicv_pre_state_restore)(vcpu);
|
||||
|
||||
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
|
||||
/* set SPIV separately to get count of SW disabled APICs right */
|
||||
@@ -3046,9 +3046,10 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
|
||||
kvm_apic_update_apicv(vcpu);
|
||||
if (apic->apicv_active) {
|
||||
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
|
||||
static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
|
||||
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||
kvm_x86_call(apicv_post_state_restore)(vcpu);
|
||||
kvm_x86_call(hwapic_irr_update)(vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
|
||||
}
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
if (ioapic_in_kernel(vcpu->kvm))
|
||||
@@ -3336,7 +3337,8 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
||||
/* evaluate pending_events before reading the vector */
|
||||
smp_rmb();
|
||||
sipi_vector = apic->sipi_vector;
|
||||
static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
|
||||
kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
|
||||
sipi_vector);
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ static inline bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !is_smm(vcpu) &&
|
||||
!static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
|
||||
!kvm_x86_call(apic_init_signal_blocked)(vcpu);
|
||||
}
|
||||
|
||||
static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
|
||||
|
||||
@@ -138,8 +138,8 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
|
||||
if (!VALID_PAGE(root_hpa))
|
||||
return;
|
||||
|
||||
static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
|
||||
vcpu->arch.mmu->root_role.level);
|
||||
kvm_x86_call(load_mmu_pgd)(vcpu, root_hpa,
|
||||
vcpu->arch.mmu->root_role.level);
|
||||
}
|
||||
|
||||
static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
|
||||
@@ -174,7 +174,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
{
|
||||
/* strip nested paging fault error codes */
|
||||
unsigned int pfec = access;
|
||||
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
|
||||
unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
|
||||
|
||||
/*
|
||||
* For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
|
||||
|
||||
@@ -4331,7 +4331,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
|
||||
if (max_level == PG_LEVEL_4K)
|
||||
return PG_LEVEL_4K;
|
||||
|
||||
req_max_level = static_call(kvm_x86_private_max_mapping_level)(kvm, pfn);
|
||||
req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
|
||||
if (req_max_level)
|
||||
max_level = min(max_level, req_max_level);
|
||||
|
||||
@@ -5741,7 +5741,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
|
||||
* stale entries. Flushing on alloc also allows KVM to skip the TLB
|
||||
* flush when freeing a root (see kvm_tdp_mmu_put_root()).
|
||||
*/
|
||||
static_call(kvm_x86_flush_tlb_current)(vcpu);
|
||||
kvm_x86_call(flush_tlb_current)(vcpu);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
@@ -6113,7 +6113,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
if (is_noncanonical_address(addr, vcpu))
|
||||
return;
|
||||
|
||||
static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
|
||||
kvm_x86_call(flush_tlb_gva)(vcpu, addr);
|
||||
}
|
||||
|
||||
if (!mmu->sync_spte)
|
||||
|
||||
@@ -210,8 +210,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
spte |= PT_PAGE_SIZE_MASK;
|
||||
|
||||
if (shadow_memtype_mask)
|
||||
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
|
||||
kvm_is_mmio_pfn(pfn));
|
||||
if (host_writable)
|
||||
spte |= shadow_host_writable_mask;
|
||||
else
|
||||
|
||||
@@ -596,7 +596,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
return 1;
|
||||
|
||||
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
|
||||
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
|
||||
(kvm_x86_call(get_cpl)(vcpu) != 0) &&
|
||||
kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
|
||||
return 1;
|
||||
|
||||
@@ -857,7 +857,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
|
||||
if (select_os == select_user)
|
||||
return select_os;
|
||||
|
||||
return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
|
||||
return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os :
|
||||
select_user;
|
||||
}
|
||||
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
|
||||
|
||||
@@ -200,11 +200,11 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
|
||||
enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
|
||||
enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
|
||||
|
||||
static_call(kvm_x86_get_gdt)(vcpu, &dt);
|
||||
kvm_x86_call(get_gdt)(vcpu, &dt);
|
||||
smram->gdtr.base = dt.address;
|
||||
smram->gdtr.limit = dt.size;
|
||||
|
||||
static_call(kvm_x86_get_idt)(vcpu, &dt);
|
||||
kvm_x86_call(get_idt)(vcpu, &dt);
|
||||
smram->idtr.base = dt.address;
|
||||
smram->idtr.limit = dt.size;
|
||||
|
||||
@@ -220,7 +220,7 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
|
||||
smram->smm_revision = 0x00020000;
|
||||
smram->smbase = vcpu->arch.smbase;
|
||||
|
||||
smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
|
||||
smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -250,13 +250,13 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
|
||||
|
||||
enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
|
||||
|
||||
static_call(kvm_x86_get_idt)(vcpu, &dt);
|
||||
kvm_x86_call(get_idt)(vcpu, &dt);
|
||||
smram->idtr.limit = dt.size;
|
||||
smram->idtr.base = dt.address;
|
||||
|
||||
enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
|
||||
|
||||
static_call(kvm_x86_get_gdt)(vcpu, &dt);
|
||||
kvm_x86_call(get_gdt)(vcpu, &dt);
|
||||
smram->gdtr.limit = dt.size;
|
||||
smram->gdtr.base = dt.address;
|
||||
|
||||
@@ -267,7 +267,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
|
||||
enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
|
||||
enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
|
||||
|
||||
smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
|
||||
smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -297,7 +297,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
|
||||
* Kill the VM in the unlikely case of failure, because the VM
|
||||
* can be in undefined state in this case.
|
||||
*/
|
||||
if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
|
||||
if (kvm_x86_call(enter_smm)(vcpu, &smram))
|
||||
goto error;
|
||||
|
||||
kvm_smm_changed(vcpu, true);
|
||||
@@ -305,24 +305,24 @@ void enter_smm(struct kvm_vcpu *vcpu)
|
||||
if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
|
||||
goto error;
|
||||
|
||||
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
|
||||
if (kvm_x86_call(get_nmi_mask)(vcpu))
|
||||
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
||||
else
|
||||
static_call(kvm_x86_set_nmi_mask)(vcpu, true);
|
||||
kvm_x86_call(set_nmi_mask)(vcpu, true);
|
||||
|
||||
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
|
||||
kvm_rip_write(vcpu, 0x8000);
|
||||
|
||||
static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
|
||||
kvm_x86_call(set_interrupt_shadow)(vcpu, 0);
|
||||
|
||||
cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
|
||||
static_call(kvm_x86_set_cr0)(vcpu, cr0);
|
||||
kvm_x86_call(set_cr0)(vcpu, cr0);
|
||||
|
||||
static_call(kvm_x86_set_cr4)(vcpu, 0);
|
||||
kvm_x86_call(set_cr4)(vcpu, 0);
|
||||
|
||||
/* Undocumented: IDT limit is set to zero on entry to SMM. */
|
||||
dt.address = dt.size = 0;
|
||||
static_call(kvm_x86_set_idt)(vcpu, &dt);
|
||||
kvm_x86_call(set_idt)(vcpu, &dt);
|
||||
|
||||
if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
|
||||
goto error;
|
||||
@@ -354,7 +354,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||
if (static_call(kvm_x86_set_efer)(vcpu, 0))
|
||||
if (kvm_x86_call(set_efer)(vcpu, 0))
|
||||
goto error;
|
||||
#endif
|
||||
|
||||
@@ -479,11 +479,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
dt.address = smstate->gdtr.base;
|
||||
dt.size = smstate->gdtr.limit;
|
||||
static_call(kvm_x86_set_gdt)(vcpu, &dt);
|
||||
kvm_x86_call(set_gdt)(vcpu, &dt);
|
||||
|
||||
dt.address = smstate->idtr.base;
|
||||
dt.size = smstate->idtr.limit;
|
||||
static_call(kvm_x86_set_idt)(vcpu, &dt);
|
||||
kvm_x86_call(set_idt)(vcpu, &dt);
|
||||
|
||||
rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
|
||||
rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
|
||||
@@ -501,7 +501,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return r;
|
||||
|
||||
static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
|
||||
kvm_x86_call(set_interrupt_shadow)(vcpu, 0);
|
||||
ctxt->interruptibility = (u8)smstate->int_shadow;
|
||||
|
||||
return r;
|
||||
@@ -535,13 +535,13 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
dt.size = smstate->idtr.limit;
|
||||
dt.address = smstate->idtr.base;
|
||||
static_call(kvm_x86_set_idt)(vcpu, &dt);
|
||||
kvm_x86_call(set_idt)(vcpu, &dt);
|
||||
|
||||
rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR);
|
||||
|
||||
dt.size = smstate->gdtr.limit;
|
||||
dt.address = smstate->gdtr.base;
|
||||
static_call(kvm_x86_set_gdt)(vcpu, &dt);
|
||||
kvm_x86_call(set_gdt)(vcpu, &dt);
|
||||
|
||||
r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
@@ -554,7 +554,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||
rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
|
||||
rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
|
||||
|
||||
static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
|
||||
kvm_x86_call(set_interrupt_shadow)(vcpu, 0);
|
||||
ctxt->interruptibility = (u8)smstate->int_shadow;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
@@ -576,7 +576,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
|
||||
static_call(kvm_x86_set_nmi_mask)(vcpu, false);
|
||||
kvm_x86_call(set_nmi_mask)(vcpu, false);
|
||||
|
||||
kvm_smm_changed(vcpu, false);
|
||||
|
||||
@@ -628,7 +628,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
|
||||
* state (e.g. enter guest mode) before loading state from the SMM
|
||||
* state-save area.
|
||||
*/
|
||||
if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
|
||||
if (kvm_x86_call(leave_smm)(vcpu, &smram))
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
@@ -314,12 +314,12 @@ TRACE_EVENT(name, \
|
||||
__entry->guest_rip = kvm_rip_read(vcpu); \
|
||||
__entry->isa = isa; \
|
||||
__entry->vcpu_id = vcpu->vcpu_id; \
|
||||
static_call(kvm_x86_get_exit_info)(vcpu, \
|
||||
&__entry->exit_reason, \
|
||||
&__entry->info1, \
|
||||
&__entry->info2, \
|
||||
&__entry->intr_info, \
|
||||
&__entry->error_code); \
|
||||
kvm_x86_call(get_exit_info)(vcpu, \
|
||||
&__entry->exit_reason, \
|
||||
&__entry->info1, \
|
||||
&__entry->info2, \
|
||||
&__entry->intr_info, \
|
||||
&__entry->error_code); \
|
||||
), \
|
||||
\
|
||||
TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \
|
||||
@@ -828,7 +828,8 @@ TRACE_EVENT(kvm_emulate_insn,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS);
|
||||
__entry->csbase = kvm_x86_call(get_segment_base)(vcpu,
|
||||
VCPU_SREG_CS);
|
||||
__entry->len = vcpu->arch.emulate_ctxt->fetch.ptr
|
||||
- vcpu->arch.emulate_ctxt->fetch.data;
|
||||
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -173,7 +173,7 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (!is_long_mode(vcpu))
|
||||
return false;
|
||||
static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
|
||||
kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
|
||||
return cs_l;
|
||||
}
|
||||
|
||||
|
||||
@@ -1270,7 +1270,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
||||
instructions[0] = 0xb8;
|
||||
|
||||
/* vmcall / vmmcall */
|
||||
static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
|
||||
kvm_x86_call(patch_hypercall)(vcpu, instructions + 5);
|
||||
|
||||
/* ret */
|
||||
instructions[8] = 0xc3;
|
||||
@@ -1650,7 +1650,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
|
||||
params[5] = (u64)kvm_r9_read(vcpu);
|
||||
}
|
||||
#endif
|
||||
cpl = static_call(kvm_x86_get_cpl)(vcpu);
|
||||
cpl = kvm_x86_call(get_cpl)(vcpu);
|
||||
trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
|
||||
params[3], params[4], params[5]);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user