mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 05:58:24 -04:00
KVM: TDX: Add support for find pending IRQ in a protected local APIC
Add flag and hook to KVM's local APIC management to support determining whether or not a TDX guest has a pending IRQ. For TDX vCPUs, the virtual APIC page is owned by the TDX module and cannot be accessed by KVM. As a result, registers that are virtualized by the CPU, e.g. PPR, cannot be read or written by KVM. To deliver interrupts for TDX guests, KVM must send an IRQ to the CPU on the posted interrupt notification vector. And to determine if TDX vCPU has a pending interrupt, KVM must check if there is an outstanding notification. Return "no interrupt" in kvm_apic_has_interrupt() if the guest APIC is protected to short-circuit the various other flows that try to pull an IRQ out of the vAPIC, the only valid operation is querying _if_ an IRQ is pending, KVM can't do anything based on _which_ IRQ is pending. Intentionally omit sanity checks from other flows, e.g. PPR update, so as not to degrade non-TDX guests with unnecessary checks. A well-behaved KVM and userspace will never reach those flows for TDX guests, but reaching them is not fatal if something does go awry. For the TD exits not due to HLT TDCALL, skip checking RVI pending in tdx_protected_apic_has_interrupt(). Except for the guest being stupid (e.g., non-HLT TDCALL in an interrupt shadow), it's not even possible to have an interrupt in RVI that is fully unmasked. There is no any CPU flows that modify RVI in the middle of instruction execution. I.e. if RVI is non-zero, then either the interrupt has been pending since before the TD exit, or the instruction caused the TD exit is in an STI/SS shadow. KVM doesn't care about STI/SS shadows outside of the HALTED case. And if the interrupt was pending before TD exit, then it _must_ be blocked, otherwise the interrupt would have been serviced at the instruction boundary. For the HLT TDCALL case, it will be handled in a future patch when HLT TDCALL is supported. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Message-ID: <20250222014757.897978-2-binbin.wu@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
bb723bebde
commit
90cfe144c8
@@ -116,6 +116,7 @@ KVM_X86_OP_OPTIONAL(pi_start_assignment)
|
||||
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
|
||||
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
|
||||
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
|
||||
KVM_X86_OP_OPTIONAL(protected_apic_has_interrupt)
|
||||
KVM_X86_OP_OPTIONAL(set_hv_timer)
|
||||
KVM_X86_OP_OPTIONAL(cancel_hv_timer)
|
||||
KVM_X86_OP(setup_mce)
|
||||
|
||||
@@ -1842,6 +1842,7 @@ struct kvm_x86_ops {
|
||||
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
|
||||
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
|
||||
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
|
||||
bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
|
||||
bool *expired);
|
||||
|
||||
@@ -100,6 +100,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
|
||||
if (kvm_cpu_has_extint(v))
|
||||
return 1;
|
||||
|
||||
if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected)
|
||||
return kvm_x86_call(protected_apic_has_interrupt)(v);
|
||||
|
||||
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
|
||||
|
||||
@@ -2967,6 +2967,9 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_apic_present(vcpu))
|
||||
return -1;
|
||||
|
||||
if (apic->guest_apic_protected)
|
||||
return -1;
|
||||
|
||||
__apic_update_ppr(apic, &ppr);
|
||||
return apic_has_interrupt_for_ppr(apic, ppr);
|
||||
}
|
||||
|
||||
@@ -65,6 +65,8 @@ struct kvm_lapic {
|
||||
bool sw_enabled;
|
||||
bool irr_pending;
|
||||
bool lvt0_in_nmi_mode;
|
||||
/* Select registers in the vAPIC cannot be read/written. */
|
||||
bool guest_apic_protected;
|
||||
/* Number of bits set in ISR. */
|
||||
s16 isr_count;
|
||||
/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
|
||||
|
||||
@@ -62,6 +62,7 @@ static __init int vt_hardware_setup(void)
|
||||
vt_x86_ops.set_external_spte = tdx_sept_set_private_spte;
|
||||
vt_x86_ops.free_external_spt = tdx_sept_free_private_spt;
|
||||
vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte;
|
||||
vt_x86_ops.protected_apic_has_interrupt = tdx_protected_apic_has_interrupt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -649,6 +649,7 @@ int tdx_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
return -EINVAL;
|
||||
|
||||
fpstate_set_confidential(&vcpu->arch.guest_fpu);
|
||||
vcpu->arch.apic->guest_apic_protected = true;
|
||||
|
||||
vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
|
||||
|
||||
@@ -695,6 +696,11 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return pi_has_pending_interrupt(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compared to vmx_prepare_switch_to_guest(), there is not much to do
|
||||
* as SEAMCALL/SEAMRET calls take care of most of save and restore.
|
||||
|
||||
@@ -135,6 +135,7 @@ int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
|
||||
fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
|
||||
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int tdx_handle_exit(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion fastpath);
|
||||
void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
|
||||
@@ -172,6 +173,7 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat
|
||||
}
|
||||
static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
|
||||
static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
|
||||
static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
|
||||
static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion fastpath) { return 0; }
|
||||
static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,
|
||||
|
||||
Reference in New Issue
Block a user