diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b6acb519fea5..340ae2150f91 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3167,26 +3167,44 @@ const struct iommu_ops amd_iommu_ops = { static struct irq_chip amd_ir_chip; static DEFINE_SPINLOCK(iommu_table_lock); +static int iommu_flush_dev_irt(struct pci_dev *unused, u16 devid, void *data) +{ + int ret; + struct iommu_cmd cmd; + struct amd_iommu *iommu = data; + + build_inv_irt(&cmd, devid); + ret = __iommu_queue_command_sync(iommu, &cmd, true); + return ret; +} + static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) { int ret; u64 data; unsigned long flags; - struct iommu_cmd cmd, cmd2; + struct iommu_cmd cmd; + struct pci_dev *pdev = NULL; + struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); if (iommu->irtcachedis_enabled) return; - build_inv_irt(&cmd, devid); + if (dev_data && dev_data->dev && dev_is_pci(dev_data->dev)) + pdev = to_pci_dev(dev_data->dev); raw_spin_lock_irqsave(&iommu->lock, flags); data = get_cmdsem_val(iommu); - build_completion_wait(&cmd2, iommu, data); + build_completion_wait(&cmd, iommu, data); - ret = __iommu_queue_command_sync(iommu, &cmd, true); + if (pdev) + ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu); + else + ret = iommu_flush_dev_irt(NULL, devid, iommu); if (ret) goto out_err; - ret = __iommu_queue_command_sync(iommu, &cmd2, false); + + ret = __iommu_queue_command_sync(iommu, &cmd, false); if (ret) goto out_err; raw_spin_unlock_irqrestore(&iommu->lock, flags);