mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-14 18:25:48 -04:00
KVM: SVM: Fold avic_set_pi_irte_mode() into its sole caller
Fold avic_set_pi_irte_mode() into avic_refresh_apicv_exec_ctrl() in
anticipation of moving the __avic_vcpu_{load,put}() calls into the
critical section, and because having a one-off helper with a name that's
easily confused with avic_pi_update_irte() is unnecessary.
No functional change intended.
Tested-by: Sairaj Kodilkar <sarunkod@amd.com>
Link: https://lore.kernel.org/r/20250611224604.313496-59-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
@@ -729,34 +729,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
||||
avic_handle_ldr_update(vcpu);
|
||||
}
|
||||
|
||||
static void avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
|
||||
{
|
||||
int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
|
||||
unsigned long flags;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_kernel_irqfd *irqfd;
|
||||
|
||||
/*
|
||||
* Here, we go through the per-vcpu ir_list to update all existing
|
||||
* interrupt remapping table entry targeting this vcpu.
|
||||
*/
|
||||
spin_lock_irqsave(&svm->ir_list_lock, flags);
|
||||
|
||||
if (list_empty(&svm->ir_list))
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
|
||||
void *data = irqfd->irq_bypass_data;
|
||||
|
||||
if (activate)
|
||||
WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
|
||||
else
|
||||
WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
|
||||
}
|
||||
|
||||
static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu;
|
||||
@@ -991,6 +963,10 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
|
||||
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool activated = kvm_vcpu_apicv_active(vcpu);
|
||||
int apic_id = kvm_cpu_get_apicid(vcpu->cpu);
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
struct kvm_kernel_irqfd *irqfd;
|
||||
unsigned long flags;
|
||||
|
||||
if (!enable_apicv)
|
||||
return;
|
||||
@@ -1002,7 +978,25 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
|
||||
else
|
||||
avic_vcpu_put(vcpu);
|
||||
|
||||
avic_set_pi_irte_mode(vcpu, activated);
|
||||
/*
|
||||
* Here, we go through the per-vcpu ir_list to update all existing
|
||||
* interrupt remapping table entry targeting this vcpu.
|
||||
*/
|
||||
spin_lock_irqsave(&svm->ir_list_lock, flags);
|
||||
|
||||
if (list_empty(&svm->ir_list))
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
|
||||
void *data = irqfd->irq_bypass_data;
|
||||
|
||||
if (activated)
|
||||
WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, apic_id));
|
||||
else
|
||||
WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data));
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
|
||||
}
|
||||
|
||||
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
|
||||
Reference in New Issue
Block a user