mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-13 12:02:27 -04:00
iommu/vt-d: Remove intel_svm_dev
The intel_svm_dev data structure used in the sva implementation for the Intel IOMMU driver stores information about a device attached to an SVA domain. It is a duplicate of dev_pasid_info that serves the same purpose. Replace intel_svm_dev with dev_pasid_info and clean up the use of intel_svm_dev. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240416080656.60968-11-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
@@ -4387,11 +4387,8 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
* notification. Before consolidating that code into iommu core, let
|
||||
* the intel sva code handle it.
|
||||
*/
|
||||
if (domain->type == IOMMU_DOMAIN_SVA) {
|
||||
intel_svm_remove_dev_pasid(dev, pasid);
|
||||
cache_tag_unassign_domain(dmar_domain, dev, pasid);
|
||||
goto out_tear_down;
|
||||
}
|
||||
if (domain->type == IOMMU_DOMAIN_SVA)
|
||||
intel_svm_remove_dev_pasid(domain);
|
||||
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
|
||||
|
||||
@@ -649,6 +649,7 @@ struct dmar_domain {
|
||||
struct list_head s2_link;
|
||||
};
|
||||
};
|
||||
struct intel_svm *svm;
|
||||
|
||||
struct iommu_domain domain; /* generic domain data structure for
|
||||
iommu core */
|
||||
@@ -1149,23 +1150,13 @@ int intel_svm_finish_prq(struct intel_iommu *iommu);
|
||||
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct iommu_page_response *msg);
|
||||
struct iommu_domain *intel_svm_domain_alloc(void);
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
|
||||
void intel_svm_remove_dev_pasid(struct iommu_domain *domain);
|
||||
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
|
||||
|
||||
struct intel_svm_dev {
|
||||
struct list_head list;
|
||||
struct rcu_head rcu;
|
||||
struct device *dev;
|
||||
struct intel_iommu *iommu;
|
||||
u16 did;
|
||||
u16 sid, qdep;
|
||||
};
|
||||
|
||||
struct intel_svm {
|
||||
struct mmu_notifier notifier;
|
||||
struct mm_struct *mm;
|
||||
u32 pasid;
|
||||
struct list_head devs;
|
||||
struct dmar_domain *domain;
|
||||
};
|
||||
#else
|
||||
@@ -1176,7 +1167,7 @@ static inline struct iommu_domain *intel_svm_domain_alloc(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
static inline void intel_svm_remove_dev_pasid(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -43,23 +43,6 @@ static void *pasid_private_find(ioasid_t pasid)
|
||||
return xa_load(&pasid_private_array, pasid);
|
||||
}
|
||||
|
||||
static struct intel_svm_dev *
|
||||
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
|
||||
{
|
||||
struct intel_svm_dev *sdev = NULL, *t;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(t, &svm->devs, list) {
|
||||
if (t->dev == dev) {
|
||||
sdev = t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return sdev;
|
||||
}
|
||||
|
||||
int intel_svm_enable_prq(struct intel_iommu *iommu)
|
||||
{
|
||||
struct iopf_queue *iopfq;
|
||||
@@ -192,7 +175,10 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
struct intel_svm_dev *sdev;
|
||||
struct dmar_domain *domain = svm->domain;
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
/* This might end up being called from exit_mmap(), *before* the page
|
||||
* tables are cleared. And __mmu_notifier_release() will delete us from
|
||||
@@ -206,11 +192,13 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
* page) so that we end up taking a fault that the hardware really
|
||||
* *has* to handle gracefully without affecting other processes.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
|
||||
svm->pasid, true);
|
||||
rcu_read_unlock();
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
|
||||
info = dev_iommu_priv_get(dev_pasid->dev);
|
||||
intel_pasid_tear_down_entry(info->iommu, dev_pasid->dev,
|
||||
dev_pasid->pasid, true);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
}
|
||||
|
||||
@@ -219,47 +207,17 @@ static const struct mmu_notifier_ops intel_mmuops = {
|
||||
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
|
||||
};
|
||||
|
||||
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
||||
struct intel_svm **rsvm,
|
||||
struct intel_svm_dev **rsdev)
|
||||
{
|
||||
struct intel_svm_dev *sdev = NULL;
|
||||
struct intel_svm *svm;
|
||||
|
||||
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
svm = pasid_private_find(pasid);
|
||||
if (IS_ERR(svm))
|
||||
return PTR_ERR(svm);
|
||||
|
||||
if (!svm)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If we found svm for the PASID, there must be at least one device
|
||||
* bond.
|
||||
*/
|
||||
if (WARN_ON(list_empty(&svm->devs)))
|
||||
return -EINVAL;
|
||||
sdev = svm_lookup_device_by_dev(svm, dev);
|
||||
|
||||
out:
|
||||
*rsvm = svm;
|
||||
*rsdev = sdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct mm_struct *mm = domain->mm;
|
||||
struct intel_svm_dev *sdev;
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct intel_svm *svm;
|
||||
unsigned long sflags;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
svm = pasid_private_find(pasid);
|
||||
@@ -270,7 +228,6 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
|
||||
svm->pasid = pasid;
|
||||
svm->mm = mm;
|
||||
INIT_LIST_HEAD_RCU(&svm->devs);
|
||||
|
||||
svm->notifier.ops = &intel_mmuops;
|
||||
svm->domain = to_dmar_domain(domain);
|
||||
@@ -288,25 +245,17 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
}
|
||||
}
|
||||
|
||||
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
|
||||
if (!sdev) {
|
||||
ret = -ENOMEM;
|
||||
dmar_domain->svm = svm;
|
||||
dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
|
||||
if (!dev_pasid)
|
||||
goto free_svm;
|
||||
}
|
||||
|
||||
sdev->dev = dev;
|
||||
sdev->iommu = iommu;
|
||||
sdev->did = FLPT_DEFAULT_DID;
|
||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||
if (info->ats_enabled) {
|
||||
sdev->qdep = info->ats_qdep;
|
||||
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
|
||||
sdev->qdep = 0;
|
||||
}
|
||||
dev_pasid->dev = dev;
|
||||
dev_pasid->pasid = pasid;
|
||||
|
||||
ret = cache_tag_assign_domain(to_dmar_domain(domain), dev, pasid);
|
||||
if (ret)
|
||||
goto free_sdev;
|
||||
goto free_dev_pasid;
|
||||
|
||||
/* Setup the pasid table: */
|
||||
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
|
||||
@@ -315,16 +264,18 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
if (ret)
|
||||
goto unassign_tag;
|
||||
|
||||
list_add_rcu(&sdev->list, &svm->devs);
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
unassign_tag:
|
||||
cache_tag_unassign_domain(to_dmar_domain(domain), dev, pasid);
|
||||
free_sdev:
|
||||
kfree(sdev);
|
||||
free_dev_pasid:
|
||||
kfree(dev_pasid);
|
||||
free_svm:
|
||||
if (list_empty(&svm->devs)) {
|
||||
if (list_empty(&dmar_domain->dev_pasids)) {
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
pasid_private_remove(pasid);
|
||||
kfree(svm);
|
||||
@@ -333,26 +284,17 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
|
||||
void intel_svm_remove_dev_pasid(struct iommu_domain *domain)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
struct intel_svm *svm;
|
||||
struct mm_struct *mm;
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct intel_svm *svm = dmar_domain->svm;
|
||||
struct mm_struct *mm = domain->mm;
|
||||
|
||||
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
|
||||
return;
|
||||
mm = svm->mm;
|
||||
|
||||
if (sdev) {
|
||||
list_del_rcu(&sdev->list);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
if (svm->notifier.ops)
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
pasid_private_remove(svm->pasid);
|
||||
kfree(svm);
|
||||
}
|
||||
if (list_empty(&dmar_domain->dev_pasids)) {
|
||||
if (svm->notifier.ops)
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
pasid_private_remove(svm->pasid);
|
||||
kfree(svm);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -686,8 +628,10 @@ struct iommu_domain *intel_svm_domain_alloc(void)
|
||||
return NULL;
|
||||
domain->domain.ops = &intel_svm_domain_ops;
|
||||
domain->use_first_level = true;
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
INIT_LIST_HEAD(&domain->cache_tags);
|
||||
spin_lock_init(&domain->cache_lock);
|
||||
spin_lock_init(&domain->lock);
|
||||
|
||||
return &domain->domain;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user