mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 17:39:23 -04:00
iommu/arm-smmu-v3: Report IOMMU_CAP_ENFORCE_CACHE_COHERENCY for CANWBS
HW with CANWBS is always cache coherent and ignores PCI No Snoop requests as well. This meets the requirement for IOMMU_CAP_ENFORCE_CACHE_COHERENCY, so let's return it. Implement the enforce_cache_coherency() op to reject attaching devices that don't have CANWBS. Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Mostafa Saleh <smostafa@google.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Reviewed-by: Donald Dutile <ddutile@redhat.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/4-v4-9e99b76f3518+3a8-smmuv3_nesting_jgg@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
committed by
Will Deacon
parent
807404d66f
commit
e89573cf4a
@@ -2293,6 +2293,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
|
||||
case IOMMU_CAP_CACHE_COHERENCY:
|
||||
/* Assume that a coherent TCU implies coherent TBUs */
|
||||
return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
|
||||
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
|
||||
return arm_smmu_master_canwbs(master);
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
case IOMMU_CAP_DEFERRED_FLUSH:
|
||||
return true;
|
||||
@@ -2303,6 +2305,26 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
|
||||
}
|
||||
}
|
||||
|
||||
static bool arm_smmu_enforce_cache_coherency(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_master_domain *master_domain;
|
||||
unsigned long flags;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
list_for_each_entry(master_domain, &smmu_domain->devices,
|
||||
devices_elm) {
|
||||
if (!arm_smmu_master_canwbs(master_domain->master)) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
smmu_domain->enforce_cache_coherency = ret;
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct arm_smmu_domain *arm_smmu_domain_alloc(void)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
@@ -2731,6 +2753,14 @@ static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
|
||||
* one of them.
|
||||
*/
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
if (smmu_domain->enforce_cache_coherency &&
|
||||
!arm_smmu_master_canwbs(master)) {
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock,
|
||||
flags);
|
||||
kfree(master_domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (state->ats_enabled)
|
||||
atomic_inc(&smmu_domain->nr_ats_masters);
|
||||
list_add(&master_domain->devices_elm, &smmu_domain->devices);
|
||||
@@ -3493,6 +3523,7 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.enforce_cache_coherency = arm_smmu_enforce_cache_coherency,
|
||||
.set_dev_pasid = arm_smmu_s1_set_dev_pasid,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
|
||||
@@ -811,6 +811,7 @@ struct arm_smmu_domain {
|
||||
/* List of struct arm_smmu_master_domain */
|
||||
struct list_head devices;
|
||||
spinlock_t devices_lock;
|
||||
bool enforce_cache_coherency : 1;
|
||||
|
||||
struct mmu_notifier mmu_notifier;
|
||||
};
|
||||
@@ -893,6 +894,12 @@ int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
|
||||
int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_cmdq *cmdq);
|
||||
|
||||
static inline bool arm_smmu_master_canwbs(struct arm_smmu_master *master)
|
||||
{
|
||||
return dev_iommu_fwspec_get(master->dev)->flags &
|
||||
IOMMU_FWSPEC_PCI_RC_CANWBS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_SMMU_V3_SVA
|
||||
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
|
||||
bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
|
||||
|
||||
Reference in New Issue
Block a user