mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
iommu: Replace per-group resetting_domain with per-gdev blocked flag
The core tracks device resetting states with a per-group resetting_domain,
while a reset is actually per group-device. Such a mismatch might lead to
confusion and even difficulty to untangle per-gdev handling requirement.
Shuai found that cxl_reset_bus_function() calls pci_reset_bus_function()
internally while both are calling pci_dev_reset_iommu_prepare/done(). And
the solution requires the core to track at the group_device level as well.
Introduce a 'blocked' flag to struct group_device, to allow a multi-device
group to isolate concurrent device resets independently.
As the reset routine is per gdev, it cannot clear group->resetting_domain
without iterating over the device list to ensure no other device is being
reset. Simplify it by replacing the resetting_domain with a 'recovery_cnt'
in the struct iommu_group.
No functional change. But this is essential to apply following bug fixes.
Fixes: c279e83953 ("iommu: Introduce pci_dev_reset_iommu_prepare/done()")
Cc: stable@vger.kernel.org
Reported-by: Shuai Xue <xueshuai@linux.alibaba.com>
Closes: https://lore.kernel.org/all/absKsk7qQOwzhpzv@Asurada-Nvidia/
Reviewed-by: Shuai Xue <xueshuai@linux.alibaba.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
committed by
Joerg Roedel
parent
834ab85aa9
commit
b296ca1fb4
@@ -62,14 +62,14 @@ struct iommu_group {
|
||||
int id;
|
||||
struct iommu_domain *default_domain;
|
||||
struct iommu_domain *blocking_domain;
|
||||
/*
|
||||
* During a group device reset, @resetting_domain points to the physical
|
||||
* domain, while @domain points to the attached domain before the reset.
|
||||
*/
|
||||
struct iommu_domain *resetting_domain;
|
||||
struct iommu_domain *domain;
|
||||
struct list_head entry;
|
||||
unsigned int owner_cnt;
|
||||
/*
|
||||
* Number of devices in the group undergoing or awaiting recovery.
|
||||
* If non-zero, concurrent domain attachments are rejected.
|
||||
*/
|
||||
unsigned int recovery_cnt;
|
||||
void *owner;
|
||||
};
|
||||
|
||||
@@ -77,12 +77,32 @@ struct group_device {
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
char *name;
|
||||
/*
|
||||
* Device is blocked for a pending recovery while its group->domain is
|
||||
* retained. This can happen when:
|
||||
* - Device is undergoing a reset
|
||||
*/
|
||||
bool blocked;
|
||||
};
|
||||
|
||||
/* Iterate over each struct group_device in a struct iommu_group */
|
||||
#define for_each_group_device(group, pos) \
|
||||
list_for_each_entry(pos, &(group)->devices, list)
|
||||
|
||||
static struct group_device *__dev_to_gdev(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *gdev;
|
||||
|
||||
lockdep_assert_held(&group->mutex);
|
||||
|
||||
for_each_group_device(group, gdev) {
|
||||
if (gdev->dev == dev)
|
||||
return gdev;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct iommu_group_attribute {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct iommu_group *group, char *buf);
|
||||
@@ -2196,6 +2216,8 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
|
||||
|
||||
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
|
||||
{
|
||||
struct group_device *gdev;
|
||||
|
||||
/*
|
||||
* This is called on the dma mapping fast path so avoid locking. This is
|
||||
* racy, but we have an expectation that the driver will setup its DMAs
|
||||
@@ -2206,14 +2228,18 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
|
||||
|
||||
guard(mutex)(&dev->iommu_group->mutex);
|
||||
|
||||
gdev = __dev_to_gdev(dev);
|
||||
if (WARN_ON(!gdev))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* This is a concurrent attach during a device reset. Reject it until
|
||||
* This is a concurrent attach during device recovery. Reject it until
|
||||
* pci_dev_reset_iommu_done() attaches the device to group->domain.
|
||||
*
|
||||
* Note that this might fail the iommu_dma_map(). But there's nothing
|
||||
* more we can do here.
|
||||
*/
|
||||
if (dev->iommu_group->resetting_domain)
|
||||
if (gdev->blocked)
|
||||
return -EBUSY;
|
||||
return __iommu_attach_device(domain, dev, NULL);
|
||||
}
|
||||
@@ -2270,19 +2296,24 @@ EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
|
||||
struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *gdev;
|
||||
|
||||
lockdep_assert_held(&group->mutex);
|
||||
|
||||
gdev = __dev_to_gdev(dev);
|
||||
if (WARN_ON(!gdev))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Driver handles the low-level __iommu_attach_device(), including the
|
||||
* one invoked by pci_dev_reset_iommu_done() re-attaching the device to
|
||||
* the cached group->domain. In this case, the driver must get the old
|
||||
* domain from group->resetting_domain rather than group->domain. This
|
||||
* domain from group->blocking_domain rather than group->domain. This
|
||||
* prevents it from re-attaching the device from group->domain (old) to
|
||||
* group->domain (new).
|
||||
*/
|
||||
if (group->resetting_domain)
|
||||
return group->resetting_domain;
|
||||
if (gdev->blocked)
|
||||
return group->blocking_domain;
|
||||
|
||||
return group->domain;
|
||||
}
|
||||
@@ -2441,10 +2472,10 @@ static int __iommu_group_set_domain_internal(struct iommu_group *group,
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* This is a concurrent attach during a device reset. Reject it until
|
||||
* This is a concurrent attach during device recovery. Reject it until
|
||||
* pci_dev_reset_iommu_done() attaches the device to group->domain.
|
||||
*/
|
||||
if (group->resetting_domain)
|
||||
if (group->recovery_cnt)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
@@ -3615,10 +3646,10 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/*
|
||||
* This is a concurrent attach during a device reset. Reject it until
|
||||
* This is a concurrent attach during device recovery. Reject it until
|
||||
* pci_dev_reset_iommu_done() attaches the device to group->domain.
|
||||
*/
|
||||
if (group->resetting_domain) {
|
||||
if (group->recovery_cnt) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -3708,10 +3739,10 @@ int iommu_replace_device_pasid(struct iommu_domain *domain,
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/*
|
||||
* This is a concurrent attach during a device reset. Reject it until
|
||||
* This is a concurrent attach during device recovery. Reject it until
|
||||
* pci_dev_reset_iommu_done() attaches the device to group->domain.
|
||||
*/
|
||||
if (group->resetting_domain) {
|
||||
if (group->recovery_cnt) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -3982,12 +4013,12 @@ EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");
|
||||
* routine wants to block any IOMMU activity: translation and ATS invalidation.
|
||||
*
|
||||
* This function attaches the device's RID/PASID(s) the group->blocking_domain,
|
||||
* setting the group->resetting_domain. This allows the IOMMU driver pausing any
|
||||
* incrementing the group->recovery_cnt, to allow the IOMMU driver pausing any
|
||||
* IOMMU activity while leaving the group->domain pointer intact. Later when the
|
||||
* reset is finished, pci_dev_reset_iommu_done() can restore everything.
|
||||
*
|
||||
* Caller must use pci_dev_reset_iommu_prepare() with pci_dev_reset_iommu_done()
|
||||
* before/after the core-level reset routine, to unset the resetting_domain.
|
||||
* before/after the core-level reset routine, to decrement the recovery_cnt.
|
||||
*
|
||||
* Return: 0 on success or negative error code if the preparation failed.
|
||||
*
|
||||
@@ -4000,6 +4031,7 @@ EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");
|
||||
int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
|
||||
{
|
||||
struct iommu_group *group = pdev->dev.iommu_group;
|
||||
struct group_device *gdev;
|
||||
unsigned long pasid;
|
||||
void *entry;
|
||||
int ret;
|
||||
@@ -4009,8 +4041,12 @@ int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
|
||||
|
||||
guard(mutex)(&group->mutex);
|
||||
|
||||
gdev = __dev_to_gdev(&pdev->dev);
|
||||
if (WARN_ON(!gdev))
|
||||
return -ENODEV;
|
||||
|
||||
/* Re-entry is not allowed */
|
||||
if (WARN_ON(group->resetting_domain))
|
||||
if (WARN_ON(gdev->blocked))
|
||||
return -EBUSY;
|
||||
|
||||
ret = __iommu_group_alloc_blocking_domain(group);
|
||||
@@ -4025,6 +4061,13 @@ int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update gdev->blocked upon the domain change, as it is used to return
|
||||
* the correct domain in iommu_driver_get_domain_for_dev() that might be
|
||||
* called in a set_dev_pasid callback function.
|
||||
*/
|
||||
gdev->blocked = true;
|
||||
|
||||
/*
|
||||
* Stage PASID domains at blocking_domain while retaining pasid_array.
|
||||
*
|
||||
@@ -4035,7 +4078,7 @@ int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
|
||||
iommu_remove_dev_pasid(&pdev->dev, pasid,
|
||||
pasid_array_entry_to_domain(entry));
|
||||
|
||||
group->resetting_domain = group->blocking_domain;
|
||||
group->recovery_cnt++;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_dev_reset_iommu_prepare);
|
||||
@@ -4057,6 +4100,7 @@ EXPORT_SYMBOL_GPL(pci_dev_reset_iommu_prepare);
|
||||
void pci_dev_reset_iommu_done(struct pci_dev *pdev)
|
||||
{
|
||||
struct iommu_group *group = pdev->dev.iommu_group;
|
||||
struct group_device *gdev;
|
||||
unsigned long pasid;
|
||||
void *entry;
|
||||
|
||||
@@ -4065,11 +4109,13 @@ void pci_dev_reset_iommu_done(struct pci_dev *pdev)
|
||||
|
||||
guard(mutex)(&group->mutex);
|
||||
|
||||
/* pci_dev_reset_iommu_prepare() was bypassed for the device */
|
||||
if (!group->resetting_domain)
|
||||
gdev = __dev_to_gdev(&pdev->dev);
|
||||
if (WARN_ON(!gdev))
|
||||
return;
|
||||
|
||||
if (!gdev->blocked)
|
||||
return;
|
||||
|
||||
/* pci_dev_reset_iommu_prepare() was not successfully called */
|
||||
if (WARN_ON(!group->blocking_domain))
|
||||
return;
|
||||
|
||||
@@ -4084,6 +4130,13 @@ void pci_dev_reset_iommu_done(struct pci_dev *pdev)
|
||||
group->blocking_domain));
|
||||
}
|
||||
|
||||
/*
|
||||
* Update gdev->blocked upon the domain change, as it is used to return
|
||||
* the correct domain in iommu_driver_get_domain_for_dev() that might be
|
||||
* called in a set_dev_pasid callback function.
|
||||
*/
|
||||
gdev->blocked = false;
|
||||
|
||||
/*
|
||||
* Re-attach PASID domains back to the domains retained in pasid_array.
|
||||
*
|
||||
@@ -4095,7 +4148,8 @@ void pci_dev_reset_iommu_done(struct pci_dev *pdev)
|
||||
pasid_array_entry_to_domain(entry), group, pasid,
|
||||
group->blocking_domain));
|
||||
|
||||
group->resetting_domain = NULL;
|
||||
if (!WARN_ON(group->recovery_cnt == 0))
|
||||
group->recovery_cnt--;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_dev_reset_iommu_done);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user