iommu/vt-d: Split paging_domain_compatible()

Make First/Second stage specific functions that follow the same pattern in
intel_iommu_domain_alloc_first/second_stage() for computing
EOPNOTSUPP. This makes the code easier to understand as if we couldn't
create a domain with the parameters for this IOMMU instance then we
certainly are not compatible with it.

Check superpage support directly against the per-stage cap bits and the
pgsize_bitmap.

Add a note that the force_snooping is read without locking. The locking
needs to cover the compatible check and the add of the device to the list.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/7-v3-dbbe6f7e7ae3+124ffe-vtd_prep_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20250714045028.958850-10-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Jason Gunthorpe
2025-07-14 12:50:26 +08:00
committed by Will Deacon
parent 0fa6f08934
commit 85cfaacc99

View File

@@ -3431,33 +3431,75 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
kfree(dmar_domain);
}
static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
struct intel_iommu *iommu)
{
if (WARN_ON(dmar_domain->domain.dirty_ops ||
dmar_domain->nested_parent))
return -EINVAL;
/* Only SL is available in legacy mode */
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return -EINVAL;
/* Same page size support */
if (!cap_fl1gp_support(iommu->cap) &&
(dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
return 0;
}
static int
paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
struct intel_iommu *iommu)
{
unsigned int sslps = cap_super_page_val(iommu->cap);
if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu))
return -EINVAL;
if (dmar_domain->nested_parent && !nested_supported(iommu))
return -EINVAL;
/* Legacy mode always supports second stage */
if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
return -EINVAL;
/* Same page size support */
if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M))
return -EINVAL;
if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
return 0;
}
int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
int ret = -EINVAL;
int addr_width;
if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EPERM;
if (intel_domain_is_fs_paging(dmar_domain))
ret = paging_domain_compatible_first_stage(dmar_domain, iommu);
else if (intel_domain_is_ss_paging(dmar_domain))
ret = paging_domain_compatible_second_stage(dmar_domain, iommu);
else if (WARN_ON(true))
ret = -EINVAL;
if (ret)
return ret;
/*
* FIXME this is locked wrong, it needs to be under the
* dmar_domain->lock
*/
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;
if (domain->dirty_ops && !ssads_supported(iommu))
return -EINVAL;
if (dmar_domain->iommu_coherency !=
iommu_paging_structure_coherency(iommu))
return -EINVAL;
if (dmar_domain->iommu_superpage !=
iommu_superpage_capability(iommu, dmar_domain->use_first_level))
return -EINVAL;
if (dmar_domain->use_first_level &&
(!sm_supported(iommu) || !ecap_flts(iommu->ecap)))
return -EINVAL;
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);