mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-07 22:08:33 -04:00
Merge branch 'core' into x86/vt-d
This commit is contained in:
@@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
|
||||
* or equal to the system's PAGE_SIZE, with a preference if
|
||||
* both are equal.
|
||||
*/
|
||||
pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
|
||||
pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
|
||||
if (pgsize_bitmap & PAGE_SIZE) {
|
||||
tdev->iommu.pgshift = PAGE_SHIFT;
|
||||
} else {
|
||||
|
||||
@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
|
||||
|
||||
|
||||
extern bool translation_pre_enabled(struct amd_iommu *iommu);
|
||||
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern bool amd_iommu_is_attach_deferred(struct device *dev);
|
||||
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
|
||||
bool cmd_line);
|
||||
|
||||
|
||||
@@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
||||
bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
bool amd_iommu_is_attach_deferred(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
|
||||
@@ -2269,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
|
||||
const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_alloc = amd_iommu_domain_alloc,
|
||||
.domain_free = amd_iommu_domain_free,
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map,
|
||||
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.probe_device = amd_iommu_probe_device,
|
||||
.release_device = amd_iommu_release_device,
|
||||
.probe_finalize = amd_iommu_probe_finalize,
|
||||
@@ -2284,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = {
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.is_attach_deferred = amd_iommu_is_attach_deferred,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = amd_iommu_iotlb_sync,
|
||||
.def_domain_type = amd_iommu_def_domain_type,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = amd_iommu_iotlb_sync,
|
||||
.free = amd_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
|
||||
@@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
||||
ret = NOTIFY_DONE;
|
||||
|
||||
/* In kdump kernel pci dev is not initialized yet -> send INVALID */
|
||||
if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
|
||||
if (amd_iommu_is_attach_deferred(&pdev->dev)) {
|
||||
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
|
||||
PPR_INVALID, tag);
|
||||
goto out;
|
||||
|
||||
@@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
|
||||
|
||||
static const struct iommu_ops apple_dart_iommu_ops = {
|
||||
.domain_alloc = apple_dart_domain_alloc,
|
||||
.domain_free = apple_dart_domain_free,
|
||||
.attach_dev = apple_dart_attach_dev,
|
||||
.detach_dev = apple_dart_detach_dev,
|
||||
.map_pages = apple_dart_map_pages,
|
||||
.unmap_pages = apple_dart_unmap_pages,
|
||||
.flush_iotlb_all = apple_dart_flush_iotlb_all,
|
||||
.iotlb_sync = apple_dart_iotlb_sync,
|
||||
.iotlb_sync_map = apple_dart_iotlb_sync_map,
|
||||
.iova_to_phys = apple_dart_iova_to_phys,
|
||||
.probe_device = apple_dart_probe_device,
|
||||
.release_device = apple_dart_release_device,
|
||||
.device_group = apple_dart_device_group,
|
||||
@@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
|
||||
.get_resv_regions = apple_dart_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = apple_dart_attach_dev,
|
||||
.detach_dev = apple_dart_detach_dev,
|
||||
.map_pages = apple_dart_map_pages,
|
||||
.unmap_pages = apple_dart_unmap_pages,
|
||||
.flush_iotlb_all = apple_dart_flush_iotlb_all,
|
||||
.iotlb_sync = apple_dart_iotlb_sync,
|
||||
.iotlb_sync_map = apple_dart_iotlb_sync_map,
|
||||
.iova_to_phys = apple_dart_iova_to_phys,
|
||||
.free = apple_dart_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static irqreturn_t apple_dart_irq(int irq, void *dev)
|
||||
|
||||
@@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
|
||||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.domain_free = arm_smmu_domain_free,
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
@@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.page_response = arm_smmu_page_response,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.free = arm_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/* Probing and initialisation functions */
|
||||
|
||||
@@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
|
||||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.domain_free = arm_smmu_domain_free,
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.probe_finalize = arm_smmu_probe_finalize,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.def_domain_type = arm_smmu_def_domain_type,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
.free = arm_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||
|
||||
@@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
static const struct iommu_ops qcom_iommu_ops = {
|
||||
.capable = qcom_iommu_capable,
|
||||
.domain_alloc = qcom_iommu_domain_alloc,
|
||||
.domain_free = qcom_iommu_domain_free,
|
||||
.attach_dev = qcom_iommu_attach_dev,
|
||||
.detach_dev = qcom_iommu_detach_dev,
|
||||
.map = qcom_iommu_map,
|
||||
.unmap = qcom_iommu_unmap,
|
||||
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = qcom_iommu_iotlb_sync,
|
||||
.iova_to_phys = qcom_iommu_iova_to_phys,
|
||||
.probe_device = qcom_iommu_probe_device,
|
||||
.release_device = qcom_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = qcom_iommu_attach_dev,
|
||||
.detach_dev = qcom_iommu_detach_dev,
|
||||
.map = qcom_iommu_map,
|
||||
.unmap = qcom_iommu_unmap,
|
||||
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = qcom_iommu_iotlb_sync,
|
||||
.iova_to_phys = qcom_iommu_iova_to_phys,
|
||||
.free = qcom_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int qcom_iommu_sec_ptbl_init(struct device *dev)
|
||||
|
||||
@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
unsigned long order, base_pfn;
|
||||
struct iova_domain *iovad;
|
||||
int ret;
|
||||
|
||||
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||
return -EINVAL;
|
||||
@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
}
|
||||
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||
ret = iova_domain_init_rcaches(iovad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If the FQ fails we can simply fall back to strict mode */
|
||||
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
|
||||
|
||||
@@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
||||
|
||||
static const struct iommu_ops exynos_iommu_ops = {
|
||||
.domain_alloc = exynos_iommu_domain_alloc,
|
||||
.domain_free = exynos_iommu_domain_free,
|
||||
.attach_dev = exynos_iommu_attach_device,
|
||||
.detach_dev = exynos_iommu_detach_device,
|
||||
.map = exynos_iommu_map,
|
||||
.unmap = exynos_iommu_unmap,
|
||||
.iova_to_phys = exynos_iommu_iova_to_phys,
|
||||
.device_group = generic_device_group,
|
||||
.probe_device = exynos_iommu_probe_device,
|
||||
.release_device = exynos_iommu_release_device,
|
||||
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
|
||||
.of_xlate = exynos_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = exynos_iommu_attach_device,
|
||||
.detach_dev = exynos_iommu_detach_device,
|
||||
.map = exynos_iommu_map,
|
||||
.unmap = exynos_iommu_unmap,
|
||||
.iova_to_phys = exynos_iommu_iova_to_phys,
|
||||
.free = exynos_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init exynos_iommu_init(void)
|
||||
|
||||
@@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev)
|
||||
static const struct iommu_ops fsl_pamu_ops = {
|
||||
.capable = fsl_pamu_capable,
|
||||
.domain_alloc = fsl_pamu_domain_alloc,
|
||||
.domain_free = fsl_pamu_domain_free,
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
.detach_dev = fsl_pamu_detach_device,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.probe_device = fsl_pamu_probe_device,
|
||||
.release_device = fsl_pamu_release_device,
|
||||
.device_group = fsl_pamu_device_group,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
.detach_dev = fsl_pamu_detach_device,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.free = fsl_pamu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
int __init pamu_domain_init(void)
|
||||
|
||||
@@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data)
|
||||
if (!domain)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "Device %s with pasid %d @0x%llx\n",
|
||||
dev_name(dev), domain->default_pasid,
|
||||
seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
|
||||
(u64)virt_to_phys(domain->pgd));
|
||||
seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
|
||||
|
||||
|
||||
@@ -1573,18 +1573,6 @@ static void domain_update_iotlb(struct dmar_domain *domain)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!has_iotlb_device) {
|
||||
struct subdev_domain_info *sinfo;
|
||||
|
||||
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
|
||||
info = get_domain_info(sinfo->pdev);
|
||||
if (info && info->ats_enabled) {
|
||||
has_iotlb_device = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
domain->has_iotlb_device = has_iotlb_device;
|
||||
}
|
||||
|
||||
@@ -1682,7 +1670,6 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
{
|
||||
unsigned long flags;
|
||||
struct device_domain_info *info;
|
||||
struct subdev_domain_info *sinfo;
|
||||
|
||||
if (!domain->has_iotlb_device)
|
||||
return;
|
||||
@@ -1691,27 +1678,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
list_for_each_entry(info, &domain->devices, link)
|
||||
__iommu_flush_dev_iotlb(info, addr, mask);
|
||||
|
||||
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
|
||||
info = get_domain_info(sinfo->pdev);
|
||||
__iommu_flush_dev_iotlb(info, addr, mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
}
|
||||
|
||||
static void domain_flush_piotlb(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
u64 addr, unsigned long npages, bool ih)
|
||||
{
|
||||
u16 did = domain->iommu_did[iommu->seq_id];
|
||||
|
||||
if (domain->default_pasid)
|
||||
qi_flush_piotlb(iommu, did, domain->default_pasid,
|
||||
addr, npages, ih);
|
||||
|
||||
if (!list_empty(&domain->devices))
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
|
||||
}
|
||||
|
||||
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages,
|
||||
@@ -1727,7 +1696,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
||||
ih = 1 << 6;
|
||||
|
||||
if (domain_use_first_level(domain)) {
|
||||
domain_flush_piotlb(iommu, domain, addr, pages, ih);
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
|
||||
} else {
|
||||
/*
|
||||
* Fallback to domain selective flush if no PSI support or
|
||||
@@ -1776,7 +1745,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
|
||||
u16 did = dmar_domain->iommu_did[iommu->seq_id];
|
||||
|
||||
if (domain_use_first_level(dmar_domain))
|
||||
domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
|
||||
qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
@@ -1983,7 +1952,6 @@ static struct dmar_domain *alloc_domain(unsigned int type)
|
||||
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
|
||||
domain->has_iotlb_device = false;
|
||||
INIT_LIST_HEAD(&domain->devices);
|
||||
INIT_LIST_HEAD(&domain->subdevices);
|
||||
|
||||
return domain;
|
||||
}
|
||||
@@ -2676,8 +2644,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||
info->domain = domain;
|
||||
info->iommu = iommu;
|
||||
info->pasid_table = NULL;
|
||||
info->auxd_enabled = 0;
|
||||
INIT_LIST_HEAD(&info->subdevices);
|
||||
|
||||
if (dev && dev_is_pci(dev)) {
|
||||
struct pci_dev *pdev = to_pci_dev(info->dev);
|
||||
@@ -4637,183 +4603,6 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
|
||||
domain_exit(to_dmar_domain(domain));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether a @domain could be attached to the @dev through the
|
||||
* aux-domain attach/detach APIs.
|
||||
*/
|
||||
static inline bool
|
||||
is_aux_domain(struct device *dev, struct iommu_domain *domain)
|
||||
{
|
||||
struct device_domain_info *info = get_domain_info(dev);
|
||||
|
||||
return info && info->auxd_enabled &&
|
||||
domain->type == IOMMU_DOMAIN_UNMANAGED;
|
||||
}
|
||||
|
||||
static inline struct subdev_domain_info *
|
||||
lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
|
||||
{
|
||||
struct subdev_domain_info *sinfo;
|
||||
|
||||
if (!list_empty(&domain->subdevices)) {
|
||||
list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
|
||||
if (sinfo->pdev == dev)
|
||||
return sinfo;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int auxiliary_link_device(struct dmar_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info = get_domain_info(dev);
|
||||
struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
|
||||
|
||||
assert_spin_locked(&device_domain_lock);
|
||||
if (WARN_ON(!info))
|
||||
return -EINVAL;
|
||||
|
||||
if (!sinfo) {
|
||||
sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
|
||||
if (!sinfo)
|
||||
return -ENOMEM;
|
||||
sinfo->domain = domain;
|
||||
sinfo->pdev = dev;
|
||||
list_add(&sinfo->link_phys, &info->subdevices);
|
||||
list_add(&sinfo->link_domain, &domain->subdevices);
|
||||
}
|
||||
|
||||
return ++sinfo->users;
|
||||
}
|
||||
|
||||
static int auxiliary_unlink_device(struct dmar_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info = get_domain_info(dev);
|
||||
struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
|
||||
int ret;
|
||||
|
||||
assert_spin_locked(&device_domain_lock);
|
||||
if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
|
||||
return -EINVAL;
|
||||
|
||||
ret = --sinfo->users;
|
||||
if (!ret) {
|
||||
list_del(&sinfo->link_phys);
|
||||
list_del(&sinfo->link_domain);
|
||||
kfree(sinfo);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aux_domain_add_dev(struct dmar_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
iommu = device_to_iommu(dev, NULL, NULL);
|
||||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
if (domain->default_pasid <= 0) {
|
||||
u32 pasid;
|
||||
|
||||
/* No private data needed for the default pasid */
|
||||
pasid = ioasid_alloc(NULL, PASID_MIN,
|
||||
pci_max_pasids(to_pci_dev(dev)) - 1,
|
||||
NULL);
|
||||
if (pasid == INVALID_IOASID) {
|
||||
pr_err("Can't allocate default pasid\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
domain->default_pasid = pasid;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
ret = auxiliary_link_device(domain, dev);
|
||||
if (ret <= 0)
|
||||
goto link_failed;
|
||||
|
||||
/*
|
||||
* Subdevices from the same physical device can be attached to the
|
||||
* same domain. For such cases, only the first subdevice attachment
|
||||
* needs to go through the full steps in this function. So if ret >
|
||||
* 1, just goto out.
|
||||
*/
|
||||
if (ret > 1)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* iommu->lock must be held to attach domain to iommu and setup the
|
||||
* pasid entry for second level translation.
|
||||
*/
|
||||
spin_lock(&iommu->lock);
|
||||
ret = domain_attach_iommu(domain, iommu);
|
||||
if (ret)
|
||||
goto attach_failed;
|
||||
|
||||
/* Setup the PASID entry for mediated devices: */
|
||||
if (domain_use_first_level(domain))
|
||||
ret = domain_setup_first_level(iommu, domain, dev,
|
||||
domain->default_pasid);
|
||||
else
|
||||
ret = intel_pasid_setup_second_level(iommu, domain, dev,
|
||||
domain->default_pasid);
|
||||
if (ret)
|
||||
goto table_failed;
|
||||
|
||||
spin_unlock(&iommu->lock);
|
||||
out:
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
table_failed:
|
||||
domain_detach_iommu(domain, iommu);
|
||||
attach_failed:
|
||||
spin_unlock(&iommu->lock);
|
||||
auxiliary_unlink_device(domain, dev);
|
||||
link_failed:
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
|
||||
ioasid_put(domain->default_pasid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void aux_domain_remove_dev(struct dmar_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct intel_iommu *iommu;
|
||||
unsigned long flags;
|
||||
|
||||
if (!is_aux_domain(dev, &domain->domain))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
info = get_domain_info(dev);
|
||||
iommu = info->iommu;
|
||||
|
||||
if (!auxiliary_unlink_device(domain, dev)) {
|
||||
spin_lock(&iommu->lock);
|
||||
intel_pasid_tear_down_entry(iommu, dev,
|
||||
domain->default_pasid, false);
|
||||
domain_detach_iommu(domain, iommu);
|
||||
spin_unlock(&iommu->lock);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
|
||||
ioasid_put(domain->default_pasid);
|
||||
}
|
||||
|
||||
static int prepare_domain_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
@@ -4825,13 +4614,6 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
|
||||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) &&
|
||||
!ecap_nest(iommu->ecap)) {
|
||||
dev_err(dev, "%s: iommu not support nested translation\n",
|
||||
iommu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check if this iommu agaw is sufficient for max mapped address */
|
||||
addr_width = agaw_to_width(iommu->agaw);
|
||||
if (addr_width > cap_mgaw(iommu->cap))
|
||||
@@ -4873,9 +4655,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (is_aux_domain(dev, domain))
|
||||
return -EPERM;
|
||||
|
||||
/* normally dev is not mapped */
|
||||
if (unlikely(domain_context_mapped(dev))) {
|
||||
struct dmar_domain *old_domain;
|
||||
@@ -4892,212 +4671,12 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||
return domain_add_dev_info(to_dmar_domain(domain), dev);
|
||||
}
|
||||
|
||||
static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!is_aux_domain(dev, domain))
|
||||
return -EPERM;
|
||||
|
||||
ret = prepare_domain_attach_device(domain, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return aux_domain_add_dev(to_dmar_domain(domain), dev);
|
||||
}
|
||||
|
||||
static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
dmar_remove_one_dev_info(dev);
|
||||
}
|
||||
|
||||
static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
aux_domain_remove_dev(to_dmar_domain(domain), dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
/*
|
||||
* 2D array for converting and sanitizing IOMMU generic TLB granularity to
|
||||
* VT-d granularity. Invalidation is typically included in the unmap operation
|
||||
* as a result of DMA or VFIO unmap. However, for assigned devices guest
|
||||
* owns the first level page tables. Invalidations of translation caches in the
|
||||
* guest are trapped and passed down to the host.
|
||||
*
|
||||
* vIOMMU in the guest will only expose first level page tables, therefore
|
||||
* we do not support IOTLB granularity for request without PASID (second level).
|
||||
*
|
||||
* For example, to find the VT-d granularity encoding for IOTLB
|
||||
* type and page selective granularity within PASID:
|
||||
* X: indexed by iommu cache type
|
||||
* Y: indexed by enum iommu_inv_granularity
|
||||
* [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
|
||||
*/
|
||||
|
||||
static const int
|
||||
inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
|
||||
/*
|
||||
* PASID based IOTLB invalidation: PASID selective (per PASID),
|
||||
* page selective (address granularity)
|
||||
*/
|
||||
{-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
|
||||
/* PASID based dev TLBs */
|
||||
{-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
|
||||
/* PASID cache */
|
||||
{-EINVAL, -EINVAL, -EINVAL}
|
||||
};
|
||||
|
||||
static inline int to_vtd_granularity(int type, int granu)
|
||||
{
|
||||
return inv_type_granu_table[type][granu];
|
||||
}
|
||||
|
||||
static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
|
||||
{
|
||||
u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
|
||||
|
||||
/* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
|
||||
* IOMMU cache invalidate API passes granu_size in bytes, and number of
|
||||
* granu size in contiguous memory.
|
||||
*/
|
||||
return order_base_2(nr_pages);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_cache_invalidate_info *inv_info)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct device_domain_info *info;
|
||||
struct intel_iommu *iommu;
|
||||
unsigned long flags;
|
||||
int cache_type;
|
||||
u8 bus, devfn;
|
||||
u16 did, sid;
|
||||
int ret = 0;
|
||||
u64 size = 0;
|
||||
|
||||
if (!inv_info || !dmar_domain)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev || !dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
|
||||
iommu = device_to_iommu(dev, &bus, &devfn);
|
||||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
spin_lock(&iommu->lock);
|
||||
info = get_domain_info(dev);
|
||||
if (!info) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
did = dmar_domain->iommu_did[iommu->seq_id];
|
||||
sid = PCI_DEVID(bus, devfn);
|
||||
|
||||
/* Size is only valid in address selective invalidation */
|
||||
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
|
||||
size = to_vtd_size(inv_info->granu.addr_info.granule_size,
|
||||
inv_info->granu.addr_info.nb_granules);
|
||||
|
||||
for_each_set_bit(cache_type,
|
||||
(unsigned long *)&inv_info->cache,
|
||||
IOMMU_CACHE_INV_TYPE_NR) {
|
||||
int granu = 0;
|
||||
u64 pasid = 0;
|
||||
u64 addr = 0;
|
||||
|
||||
granu = to_vtd_granularity(cache_type, inv_info->granularity);
|
||||
if (granu == -EINVAL) {
|
||||
pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
|
||||
cache_type, inv_info->granularity);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* PASID is stored in different locations based on the
|
||||
* granularity.
|
||||
*/
|
||||
if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
|
||||
(inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
|
||||
pasid = inv_info->granu.pasid_info.pasid;
|
||||
else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
|
||||
(inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
|
||||
pasid = inv_info->granu.addr_info.pasid;
|
||||
|
||||
switch (BIT(cache_type)) {
|
||||
case IOMMU_CACHE_INV_TYPE_IOTLB:
|
||||
/* HW will ignore LSB bits based on address mask */
|
||||
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
|
||||
size &&
|
||||
(inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
|
||||
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
|
||||
inv_info->granu.addr_info.addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* If granu is PASID-selective, address is ignored.
|
||||
* We use npages = -1 to indicate that.
|
||||
*/
|
||||
qi_flush_piotlb(iommu, did, pasid,
|
||||
mm_to_dma_pfn(inv_info->granu.addr_info.addr),
|
||||
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
|
||||
inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
|
||||
|
||||
if (!info->ats_enabled)
|
||||
break;
|
||||
/*
|
||||
* Always flush device IOTLB if ATS is enabled. vIOMMU
|
||||
* in the guest may assume IOTLB flush is inclusive,
|
||||
* which is more efficient.
|
||||
*/
|
||||
fallthrough;
|
||||
case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
|
||||
/*
|
||||
* PASID based device TLB invalidation does not support
|
||||
* IOMMU_INV_GRANU_PASID granularity but only supports
|
||||
* IOMMU_INV_GRANU_ADDR.
|
||||
* The equivalent of that is we set the size to be the
|
||||
* entire range of 64 bit. User only provides PASID info
|
||||
* without address info. So we set addr to 0.
|
||||
*/
|
||||
if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
|
||||
size = 64 - VTD_PAGE_SHIFT;
|
||||
addr = 0;
|
||||
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
|
||||
addr = inv_info->granu.addr_info.addr;
|
||||
}
|
||||
|
||||
if (info->ats_enabled)
|
||||
qi_flush_dev_iotlb_pasid(iommu, sid,
|
||||
info->pfsid, pasid,
|
||||
info->ats_qdep, addr,
|
||||
size);
|
||||
else
|
||||
pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
|
||||
break;
|
||||
default:
|
||||
dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
|
||||
cache_type);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
spin_unlock(&iommu->lock);
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int intel_iommu_map(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t hpa,
|
||||
size_t size, int iommu_prot, gfp_t gfp)
|
||||
@@ -5391,46 +4970,6 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
|
||||
return generic_device_group(dev);
|
||||
}
|
||||
|
||||
static int intel_iommu_enable_auxd(struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct intel_iommu *iommu;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
iommu = device_to_iommu(dev, NULL, NULL);
|
||||
if (!iommu || dmar_disabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sm_supported(iommu) || !pasid_supported(iommu))
|
||||
return -EINVAL;
|
||||
|
||||
ret = intel_iommu_enable_pasid(iommu, dev);
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
info = get_domain_info(dev);
|
||||
info->auxd_enabled = 1;
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_iommu_disable_auxd(struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
info = get_domain_info(dev);
|
||||
if (!WARN_ON(!info))
|
||||
info->auxd_enabled = 0;
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_iommu_enable_sva(struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info = get_domain_info(dev);
|
||||
@@ -5487,9 +5026,6 @@ static int
|
||||
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_AUX:
|
||||
return intel_iommu_enable_auxd(dev);
|
||||
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return intel_iommu_enable_iopf(dev);
|
||||
|
||||
@@ -5505,9 +5041,6 @@ static int
|
||||
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_AUX:
|
||||
return intel_iommu_disable_auxd(dev);
|
||||
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
return 0;
|
||||
|
||||
@@ -5519,50 +5052,11 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
struct device_domain_info *info = get_domain_info(dev);
|
||||
|
||||
if (feat == IOMMU_DEV_FEAT_AUX)
|
||||
return scalable_mode_support() && info && info->auxd_enabled;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
|
||||
return dmar_domain->default_pasid > 0 ?
|
||||
dmar_domain->default_pasid : -EINVAL;
|
||||
}
|
||||
|
||||
static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
static bool intel_iommu_is_attach_deferred(struct device *dev)
|
||||
{
|
||||
return attach_deferred(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_iommu_enable_nesting(struct iommu_domain *domain)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
unsigned long flags;
|
||||
int ret = -ENODEV;
|
||||
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
if (list_empty(&dmar_domain->devices)) {
|
||||
dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
|
||||
dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the device does not live on an external facing PCI port that is
|
||||
* marked as untrusted. Such devices should not be able to apply quirks and
|
||||
@@ -5598,40 +5092,34 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
const struct iommu_ops intel_iommu_ops = {
|
||||
.capable = intel_iommu_capable,
|
||||
.domain_alloc = intel_iommu_domain_alloc,
|
||||
.domain_free = intel_iommu_domain_free,
|
||||
.enable_nesting = intel_iommu_enable_nesting,
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.detach_dev = intel_iommu_detach_device,
|
||||
.aux_attach_dev = intel_iommu_aux_attach_device,
|
||||
.aux_detach_dev = intel_iommu_aux_detach_device,
|
||||
.aux_get_pasid = intel_iommu_aux_get_pasid,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.probe_device = intel_iommu_probe_device,
|
||||
.probe_finalize = intel_iommu_probe_finalize,
|
||||
.release_device = intel_iommu_release_device,
|
||||
.get_resv_regions = intel_iommu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.device_group = intel_iommu_device_group,
|
||||
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
|
||||
.dev_enable_feat = intel_iommu_dev_enable_feat,
|
||||
.dev_disable_feat = intel_iommu_dev_disable_feat,
|
||||
.is_attach_deferred = intel_iommu_is_attach_deferred,
|
||||
.def_domain_type = device_def_domain_type,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
.cache_invalidate = intel_iommu_sva_invalidate,
|
||||
.sva_bind_gpasid = intel_svm_bind_gpasid,
|
||||
.sva_unbind_gpasid = intel_svm_unbind_gpasid,
|
||||
.sva_bind = intel_svm_bind,
|
||||
.sva_unbind = intel_svm_unbind,
|
||||
.sva_get_pasid = intel_svm_get_pasid,
|
||||
.page_response = intel_svm_page_response,
|
||||
#endif
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.detach_dev = intel_iommu_detach_device,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.free = intel_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void quirk_iommu_igfx(struct pci_dev *dev)
|
||||
|
||||
@@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
|
||||
struct iommu_gpasid_bind_data_vtd *pasid_data)
|
||||
{
|
||||
/*
|
||||
* Not all guest PASID table entry fields are passed down during bind,
|
||||
* here we only set up the ones that are dependent on guest settings.
|
||||
* Execution related bits such as NXE, SMEP are not supported.
|
||||
* Other fields, such as snoop related, are set based on host needs
|
||||
* regardless of guest settings.
|
||||
*/
|
||||
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
|
||||
if (!ecap_srs(iommu->ecap)) {
|
||||
pr_err_ratelimited("No supervisor request support on %s\n",
|
||||
iommu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
pasid_set_sre(pte);
|
||||
/* Enable write protect WP if guest requested */
|
||||
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
|
||||
pasid_set_wpe(pte);
|
||||
}
|
||||
|
||||
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
|
||||
if (!ecap_eafs(iommu->ecap)) {
|
||||
pr_err_ratelimited("No extended access flag support on %s\n",
|
||||
iommu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
pasid_set_eafe(pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory type is only applicable to devices inside processor coherent
|
||||
* domain. Will add MTS support once coherent devices are available.
|
||||
*/
|
||||
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
|
||||
pr_warn_ratelimited("No memory type support %s\n",
|
||||
iommu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pasid_setup_nested() - Set up PASID entry for nested translation.
|
||||
* This could be used for guest shared virtual address. In this case, the
|
||||
* first level page tables are used for GVA-GPA translation in the guest,
|
||||
* second level page tables are used for GPA-HPA translation.
|
||||
*
|
||||
* @iommu: IOMMU which the device belong to
|
||||
* @dev: Device to be set up for translation
|
||||
* @gpgd: FLPTPTR: First Level Page translation pointer in GPA
|
||||
* @pasid: PASID to be programmed in the device PASID table
|
||||
* @pasid_data: Additional PASID info from the guest bind request
|
||||
* @domain: Domain info for setting up second level page tables
|
||||
* @addr_width: Address width of the first level (guest)
|
||||
*/
|
||||
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
|
||||
pgd_t *gpgd, u32 pasid,
|
||||
struct iommu_gpasid_bind_data_vtd *pasid_data,
|
||||
struct dmar_domain *domain, int addr_width)
|
||||
{
|
||||
struct pasid_entry *pte;
|
||||
struct dma_pte *pgd;
|
||||
int ret = 0;
|
||||
u64 pgd_val;
|
||||
int agaw;
|
||||
u16 did;
|
||||
|
||||
if (!ecap_nest(iommu->ecap)) {
|
||||
pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
|
||||
iommu->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
|
||||
pr_err_ratelimited("Domain is not in nesting mode, %x\n",
|
||||
domain->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pte = intel_pasid_get_entry(dev, pasid);
|
||||
if (WARN_ON(!pte))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Caller must ensure PASID entry is not in use, i.e. not bind the
|
||||
* same PASID to the same device twice.
|
||||
*/
|
||||
if (pasid_pte_is_present(pte))
|
||||
return -EBUSY;
|
||||
|
||||
pasid_clear_entry(pte);
|
||||
|
||||
/* Sanity checking performed by caller to make sure address
|
||||
* width matching in two dimensions:
|
||||
* 1. CPU vs. IOMMU
|
||||
* 2. Guest vs. Host.
|
||||
*/
|
||||
switch (addr_width) {
|
||||
#ifdef CONFIG_X86
|
||||
case ADDR_WIDTH_5LEVEL:
|
||||
if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
|
||||
!cap_5lp_support(iommu->cap)) {
|
||||
dev_err_ratelimited(dev,
|
||||
"5-level paging not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pasid_set_flpm(pte, 1);
|
||||
break;
|
||||
#endif
|
||||
case ADDR_WIDTH_4LEVEL:
|
||||
pasid_set_flpm(pte, 0);
|
||||
break;
|
||||
default:
|
||||
dev_err_ratelimited(dev, "Invalid guest address width %d\n",
|
||||
addr_width);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* First level PGD is in GPA, must be supported by the second level */
|
||||
if ((uintptr_t)gpgd > domain->max_addr) {
|
||||
dev_err_ratelimited(dev,
|
||||
"Guest PGD %lx not supported, max %llx\n",
|
||||
(uintptr_t)gpgd, domain->max_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
pasid_set_flptr(pte, (uintptr_t)gpgd);
|
||||
|
||||
ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Setup the second level based on the given domain */
|
||||
pgd = domain->pgd;
|
||||
|
||||
agaw = iommu_skip_agaw(domain, iommu, &pgd);
|
||||
if (agaw < 0) {
|
||||
dev_err_ratelimited(dev, "Invalid domain page table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pgd_val = virt_to_phys(pgd);
|
||||
pasid_set_slptr(pte, pgd_val);
|
||||
pasid_set_fault_enable(pte);
|
||||
|
||||
did = domain->iommu_did[iommu->seq_id];
|
||||
pasid_set_domain_id(pte, did);
|
||||
|
||||
pasid_set_address_width(pte, agaw);
|
||||
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
|
||||
|
||||
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
|
||||
pasid_set_present(pte);
|
||||
pasid_flush_caches(iommu, pte, pasid, did);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
|
||||
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
struct device *dev, u32 pasid);
|
||||
int intel_pasid_setup_nested(struct intel_iommu *iommu,
|
||||
struct device *dev, pgd_t *pgd, u32 pasid,
|
||||
struct iommu_gpasid_bind_data_vtd *pasid_data,
|
||||
struct dmar_domain *domain, int addr_width);
|
||||
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
|
||||
struct device *dev, u32 pasid,
|
||||
bool fault_ignore);
|
||||
|
||||
@@ -318,193 +318,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_gpasid_bind_data *data)
|
||||
{
|
||||
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
|
||||
struct intel_svm_dev *sdev = NULL;
|
||||
struct dmar_domain *dmar_domain;
|
||||
struct device_domain_info *info;
|
||||
struct intel_svm *svm = NULL;
|
||||
unsigned long iflags;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!iommu) || !data)
|
||||
return -EINVAL;
|
||||
|
||||
if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
|
||||
return -EINVAL;
|
||||
|
||||
/* IOMMU core ensures argsz is more than the start of the union */
|
||||
if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
|
||||
return -EINVAL;
|
||||
|
||||
/* Make sure no undefined flags are used in vendor data */
|
||||
if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* VT-d supports devices with full 20 bit PASIDs only */
|
||||
if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We only check host PASID range, we have no knowledge to check
|
||||
* guest PASID range.
|
||||
*/
|
||||
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
info = get_domain_info(dev);
|
||||
if (!info)
|
||||
return -EINVAL;
|
||||
|
||||
dmar_domain = to_dmar_domain(domain);
|
||||
|
||||
mutex_lock(&pasid_mutex);
|
||||
ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (sdev) {
|
||||
/*
|
||||
* Do not allow multiple bindings of the same device-PASID since
|
||||
* there is only one SL page tables per PASID. We may revisit
|
||||
* once sharing PGD across domains are supported.
|
||||
*/
|
||||
dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
|
||||
svm->pasid);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!svm) {
|
||||
/* We come here when PASID has never been bond to a device. */
|
||||
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
|
||||
if (!svm) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/* REVISIT: upper layer/VFIO can track host process that bind
|
||||
* the PASID. ioasid_set = mm might be sufficient for vfio to
|
||||
* check pasid VMM ownership. We can drop the following line
|
||||
* once VFIO and IOASID set check is in place.
|
||||
*/
|
||||
svm->mm = get_task_mm(current);
|
||||
svm->pasid = data->hpasid;
|
||||
if (data->flags & IOMMU_SVA_GPASID_VAL) {
|
||||
svm->gpasid = data->gpasid;
|
||||
svm->flags |= SVM_FLAG_GUEST_PASID;
|
||||
}
|
||||
pasid_private_add(data->hpasid, svm);
|
||||
INIT_LIST_HEAD_RCU(&svm->devs);
|
||||
mmput(svm->mm);
|
||||
}
|
||||
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
|
||||
if (!sdev) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
sdev->dev = dev;
|
||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||
sdev->iommu = iommu;
|
||||
|
||||
/* Only count users if device has aux domains */
|
||||
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
|
||||
sdev->users = 1;
|
||||
|
||||
/* Set up device context entry for PASID if not enabled already */
|
||||
ret = intel_iommu_enable_pasid(iommu, sdev->dev);
|
||||
if (ret) {
|
||||
dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
|
||||
kfree(sdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* PASID table is per device for better security. Therefore, for
|
||||
* each bind of a new device even with an existing PASID, we need to
|
||||
* call the nested mode setup function here.
|
||||
*/
|
||||
spin_lock_irqsave(&iommu->lock, iflags);
|
||||
ret = intel_pasid_setup_nested(iommu, dev,
|
||||
(pgd_t *)(uintptr_t)data->gpgd,
|
||||
data->hpasid, &data->vendor.vtd, dmar_domain,
|
||||
data->addr_width);
|
||||
spin_unlock_irqrestore(&iommu->lock, iflags);
|
||||
if (ret) {
|
||||
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
|
||||
data->hpasid, ret);
|
||||
/*
|
||||
* PASID entry should be in cleared state if nested mode
|
||||
* set up failed. So we only need to clear IOASID tracking
|
||||
* data such that free call will succeed.
|
||||
*/
|
||||
kfree(sdev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
svm->flags |= SVM_FLAG_GUEST_MODE;
|
||||
|
||||
init_rcu_head(&sdev->rcu);
|
||||
list_add_rcu(&sdev->list, &svm->devs);
|
||||
out:
|
||||
if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
|
||||
pasid_private_remove(data->hpasid);
|
||||
kfree(svm);
|
||||
}
|
||||
|
||||
mutex_unlock(&pasid_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
|
||||
{
|
||||
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
|
||||
struct intel_svm_dev *sdev;
|
||||
struct intel_svm *svm;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!iommu))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&pasid_mutex);
|
||||
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (sdev) {
|
||||
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
|
||||
sdev->users--;
|
||||
if (!sdev->users) {
|
||||
list_del_rcu(&sdev->list);
|
||||
intel_pasid_tear_down_entry(iommu, dev,
|
||||
svm->pasid, false);
|
||||
intel_svm_drain_prq(dev, svm->pasid);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
/*
|
||||
* We do not free the IOASID here in that
|
||||
* IOMMU driver did not allocate it.
|
||||
* Unlike native SVM, IOASID for guest use was
|
||||
* allocated prior to the bind call.
|
||||
* In any case, if the free call comes before
|
||||
* the unbind, IOMMU driver will get notified
|
||||
* and perform cleanup.
|
||||
*/
|
||||
pasid_private_remove(pasid);
|
||||
kfree(svm);
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&pasid_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
|
||||
unsigned int flags)
|
||||
{
|
||||
@@ -1125,28 +938,6 @@ int intel_svm_page_response(struct device *dev,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* For responses from userspace, need to make sure that the
|
||||
* pasid has been bound to its mm.
|
||||
*/
|
||||
if (svm->flags & SVM_FLAG_GUEST_MODE) {
|
||||
struct mm_struct *mm;
|
||||
|
||||
mm = get_task_mm(current);
|
||||
if (!mm) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mm != svm->mm) {
|
||||
ret = -ENODEV;
|
||||
mmput(mm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per VT-d spec. v3.0 ch7.7, system software must respond
|
||||
* with page group response if private data is present (PDP)
|
||||
|
||||
@@ -323,13 +323,14 @@ int iommu_probe_device(struct device *dev)
|
||||
|
||||
void iommu_release_device(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops;
|
||||
|
||||
if (!dev->iommu)
|
||||
return;
|
||||
|
||||
iommu_device_unlink(dev->iommu->iommu_dev, dev);
|
||||
|
||||
ops = dev_iommu_ops(dev);
|
||||
ops->release_device(dev);
|
||||
|
||||
iommu_group_remove_device(dev);
|
||||
@@ -790,9 +791,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
|
||||
dma_addr_t start, end, addr;
|
||||
size_t map_size = 0;
|
||||
|
||||
if (domain->ops->apply_resv_region)
|
||||
domain->ops->apply_resv_region(dev, domain, entry);
|
||||
|
||||
start = ALIGN(entry->start, pg_size);
|
||||
end = ALIGN(entry->start + entry->length, pg_size);
|
||||
|
||||
@@ -833,11 +831,12 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool iommu_is_attach_deferred(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
static bool iommu_is_attach_deferred(struct device *dev)
|
||||
{
|
||||
if (domain->ops->is_attach_deferred)
|
||||
return domain->ops->is_attach_deferred(domain, dev);
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (ops->is_attach_deferred)
|
||||
return ops->is_attach_deferred(dev);
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -894,7 +893,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&device->list, &group->devices);
|
||||
if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
|
||||
if (group->domain && !iommu_is_attach_deferred(dev))
|
||||
ret = __iommu_attach_device(group->domain, dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
if (ret)
|
||||
@@ -1255,10 +1254,10 @@ int iommu_page_response(struct device *dev,
|
||||
struct iommu_fault_event *evt;
|
||||
struct iommu_fault_page_request *prm;
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain || !domain->ops->page_response)
|
||||
if (!ops->page_response)
|
||||
return -ENODEV;
|
||||
|
||||
if (!param || !param->fault_param)
|
||||
@@ -1299,7 +1298,7 @@ int iommu_page_response(struct device *dev,
|
||||
msg->pasid = 0;
|
||||
}
|
||||
|
||||
ret = domain->ops->page_response(dev, evt, msg);
|
||||
ret = ops->page_response(dev, evt, msg);
|
||||
list_del(&evt->list);
|
||||
kfree(evt);
|
||||
break;
|
||||
@@ -1524,7 +1523,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
|
||||
|
||||
static int iommu_get_def_domain_type(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
|
||||
return IOMMU_DOMAIN_DMA;
|
||||
@@ -1583,7 +1582,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
|
||||
*/
|
||||
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
@@ -1591,9 +1590,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
if (!ops)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
group = ops->device_group(dev);
|
||||
if (WARN_ON_ONCE(group == NULL))
|
||||
return ERR_PTR(-EINVAL);
|
||||
@@ -1748,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data)
|
||||
struct iommu_domain *domain = data;
|
||||
int ret = 0;
|
||||
|
||||
if (!iommu_is_attach_deferred(domain, dev))
|
||||
if (!iommu_is_attach_deferred(dev))
|
||||
ret = __iommu_attach_device(domain, dev);
|
||||
|
||||
return ret;
|
||||
@@ -1762,10 +1758,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
|
||||
|
||||
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
|
||||
{
|
||||
struct iommu_domain *domain = data;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (domain->ops->probe_finalize)
|
||||
domain->ops->probe_finalize(dev);
|
||||
if (ops->probe_finalize)
|
||||
ops->probe_finalize(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1954,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->ops = bus->iommu_ops;
|
||||
domain->type = type;
|
||||
/* Assume all sizes by default; the driver may override this later */
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
if (!domain->ops)
|
||||
domain->ops = bus->iommu_ops->default_domain_ops;
|
||||
|
||||
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
|
||||
iommu_domain_free(domain);
|
||||
@@ -1975,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
||||
void iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
iommu_put_dma_cookie(domain);
|
||||
domain->ops->domain_free(domain);
|
||||
domain->ops->free(domain);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_free);
|
||||
|
||||
@@ -2023,228 +2020,16 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
|
||||
|
||||
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
|
||||
if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
|
||||
if (iommu_is_attach_deferred(dev))
|
||||
return __iommu_attach_device(domain, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check flags and other user provided data for valid combinations. We also
|
||||
* make sure no reserved fields or unused flags are set. This is to ensure
|
||||
* not breaking userspace in the future when these fields or flags are used.
|
||||
*/
|
||||
static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
|
||||
{
|
||||
u32 mask;
|
||||
int i;
|
||||
|
||||
if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
|
||||
return -EINVAL;
|
||||
|
||||
mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
|
||||
if (info->cache & ~mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (info->granularity >= IOMMU_INV_GRANU_NR)
|
||||
return -EINVAL;
|
||||
|
||||
switch (info->granularity) {
|
||||
case IOMMU_INV_GRANU_ADDR:
|
||||
if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
|
||||
return -EINVAL;
|
||||
|
||||
mask = IOMMU_INV_ADDR_FLAGS_PASID |
|
||||
IOMMU_INV_ADDR_FLAGS_ARCHID |
|
||||
IOMMU_INV_ADDR_FLAGS_LEAF;
|
||||
|
||||
if (info->granu.addr_info.flags & ~mask)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IOMMU_INV_GRANU_PASID:
|
||||
mask = IOMMU_INV_PASID_FLAGS_PASID |
|
||||
IOMMU_INV_PASID_FLAGS_ARCHID;
|
||||
if (info->granu.pasid_info.flags & ~mask)
|
||||
return -EINVAL;
|
||||
|
||||
break;
|
||||
case IOMMU_INV_GRANU_DOMAIN:
|
||||
if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check reserved padding fields */
|
||||
for (i = 0; i < sizeof(info->padding); i++) {
|
||||
if (info->padding[i])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
|
||||
void __user *uinfo)
|
||||
{
|
||||
struct iommu_cache_invalidate_info inv_info = { 0 };
|
||||
u32 minsz;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!domain->ops->cache_invalidate))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* No new spaces can be added before the variable sized union, the
|
||||
* minimum size is the offset to the union.
|
||||
*/
|
||||
minsz = offsetof(struct iommu_cache_invalidate_info, granu);
|
||||
|
||||
/* Copy minsz from user to get flags and argsz */
|
||||
if (copy_from_user(&inv_info, uinfo, minsz))
|
||||
return -EFAULT;
|
||||
|
||||
/* Fields before the variable size union are mandatory */
|
||||
if (inv_info.argsz < minsz)
|
||||
return -EINVAL;
|
||||
|
||||
/* PASID and address granu require additional info beyond minsz */
|
||||
if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
|
||||
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
|
||||
return -EINVAL;
|
||||
|
||||
if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
|
||||
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* User might be using a newer UAPI header which has a larger data
|
||||
* size, we shall support the existing flags within the current
|
||||
* size. Copy the remaining user data _after_ minsz but not more
|
||||
* than the current kernel supported size.
|
||||
*/
|
||||
if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
|
||||
min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
|
||||
return -EFAULT;
|
||||
|
||||
/* Now the argsz is validated, check the content */
|
||||
ret = iommu_check_cache_invl_data(&inv_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return domain->ops->cache_invalidate(domain, dev, &inv_info);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
|
||||
|
||||
static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
|
||||
{
|
||||
u64 mask;
|
||||
int i;
|
||||
|
||||
if (data->version != IOMMU_GPASID_BIND_VERSION_1)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the range of supported formats */
|
||||
if (data->format >= IOMMU_PASID_FORMAT_LAST)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check all flags */
|
||||
mask = IOMMU_SVA_GPASID_VAL;
|
||||
if (data->flags & ~mask)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check reserved padding fields */
|
||||
for (i = 0; i < sizeof(data->padding); i++) {
|
||||
if (data->padding[i])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_sva_prepare_bind_data(void __user *udata,
|
||||
struct iommu_gpasid_bind_data *data)
|
||||
{
|
||||
u32 minsz;
|
||||
|
||||
/*
|
||||
* No new spaces can be added before the variable sized union, the
|
||||
* minimum size is the offset to the union.
|
||||
*/
|
||||
minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
|
||||
|
||||
/* Copy minsz from user to get flags and argsz */
|
||||
if (copy_from_user(data, udata, minsz))
|
||||
return -EFAULT;
|
||||
|
||||
/* Fields before the variable size union are mandatory */
|
||||
if (data->argsz < minsz)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* User might be using a newer UAPI header, we shall let IOMMU vendor
|
||||
* driver decide on what size it needs. Since the guest PASID bind data
|
||||
* can be vendor specific, larger argsz could be the result of extension
|
||||
* for one vendor but it should not affect another vendor.
|
||||
* Copy the remaining user data _after_ minsz
|
||||
*/
|
||||
if (copy_from_user((void *)data + minsz, udata + minsz,
|
||||
min_t(u32, data->argsz, sizeof(*data)) - minsz))
|
||||
return -EFAULT;
|
||||
|
||||
return iommu_check_bind_data(data);
|
||||
}
|
||||
|
||||
int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
|
||||
void __user *udata)
|
||||
{
|
||||
struct iommu_gpasid_bind_data data = { 0 };
|
||||
int ret;
|
||||
|
||||
if (unlikely(!domain->ops->sva_bind_gpasid))
|
||||
return -ENODEV;
|
||||
|
||||
ret = iommu_sva_prepare_bind_data(udata, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return domain->ops->sva_bind_gpasid(domain, dev, &data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
|
||||
|
||||
int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
|
||||
ioasid_t pasid)
|
||||
{
|
||||
if (unlikely(!domain->ops->sva_unbind_gpasid))
|
||||
return -ENODEV;
|
||||
|
||||
return domain->ops->sva_unbind_gpasid(dev, pasid);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
|
||||
|
||||
int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
|
||||
void __user *udata)
|
||||
{
|
||||
struct iommu_gpasid_bind_data data = { 0 };
|
||||
int ret;
|
||||
|
||||
if (unlikely(!domain->ops->sva_bind_gpasid))
|
||||
return -ENODEV;
|
||||
|
||||
ret = iommu_sva_prepare_bind_data(udata, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
|
||||
|
||||
static void __iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
if (iommu_is_attach_deferred(domain, dev))
|
||||
if (iommu_is_attach_deferred(dev))
|
||||
return;
|
||||
|
||||
if (unlikely(domain->ops->detach_dev == NULL))
|
||||
@@ -2458,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot,
|
||||
gfp_t gfp, size_t *mapped)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t pgsize, count;
|
||||
int ret;
|
||||
|
||||
@@ -2481,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
|
||||
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
unsigned long orig_iova = iova;
|
||||
unsigned int min_pagesz;
|
||||
size_t orig_size = size;
|
||||
@@ -2541,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
int ret;
|
||||
|
||||
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
||||
@@ -2570,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size,
|
||||
struct iommu_iotlb_gather *iotlb_gather)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t pgsize, count;
|
||||
|
||||
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
|
||||
@@ -2583,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size,
|
||||
struct iommu_iotlb_gather *iotlb_gather)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t unmapped_page, unmapped = 0;
|
||||
unsigned long orig_iova = iova;
|
||||
unsigned int min_pagesz;
|
||||
@@ -2659,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot,
|
||||
gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t len = 0, mapped = 0;
|
||||
phys_addr_t start;
|
||||
unsigned int i = 0;
|
||||
@@ -2792,17 +2577,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
|
||||
|
||||
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (ops && ops->get_resv_regions)
|
||||
if (ops->get_resv_regions)
|
||||
ops->get_resv_regions(dev, list);
|
||||
}
|
||||
|
||||
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (ops && ops->put_resv_regions)
|
||||
if (ops->put_resv_regions)
|
||||
ops->put_resv_regions(dev, list);
|
||||
}
|
||||
|
||||
@@ -2959,8 +2744,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
|
||||
|
||||
/*
|
||||
* The device drivers should do the necessary cleanups before calling this.
|
||||
* For example, before disabling the aux-domain feature, the device driver
|
||||
* should detach all aux-domains. Otherwise, this will return -EBUSY.
|
||||
*/
|
||||
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
@@ -2988,50 +2771,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
|
||||
|
||||
/*
|
||||
* Aux-domain specific attach/detach.
|
||||
*
|
||||
* Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
|
||||
* true. Also, as long as domains are attached to a device through this
|
||||
* interface, any tries to call iommu_attach_device() should fail
|
||||
* (iommu_detach_device() can't fail, so we fail when trying to re-attach).
|
||||
* This should make us safe against a device being attached to a guest as a
|
||||
* whole while there are still pasid users on it (aux and sva).
|
||||
*/
|
||||
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (domain->ops->aux_attach_dev)
|
||||
ret = domain->ops->aux_attach_dev(domain, dev);
|
||||
|
||||
if (!ret)
|
||||
trace_attach_device_to_domain(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
|
||||
|
||||
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
if (domain->ops->aux_detach_dev) {
|
||||
domain->ops->aux_detach_dev(domain, dev);
|
||||
trace_detach_device_from_domain(dev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
|
||||
|
||||
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (domain->ops->aux_get_pasid)
|
||||
ret = domain->ops->aux_get_pasid(domain, dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
||||
|
||||
/**
|
||||
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||
* @dev: the device
|
||||
@@ -3053,9 +2792,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct iommu_sva *handle = ERR_PTR(-EINVAL);
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (!ops || !ops->sva_bind)
|
||||
if (!ops->sva_bind)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
@@ -3096,9 +2835,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct device *dev = handle->dev;
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
|
||||
if (!ops || !ops->sva_unbind)
|
||||
if (!ops->sva_unbind)
|
||||
return;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
@@ -3115,9 +2854,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
|
||||
|
||||
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
{
|
||||
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
|
||||
|
||||
if (!ops || !ops->sva_get_pasid)
|
||||
if (!ops->sva_get_pasid)
|
||||
return IOMMU_PASID_INVALID;
|
||||
|
||||
return ops->sva_get_pasid(handle);
|
||||
|
||||
@@ -15,13 +15,14 @@
|
||||
/* The anchor node sits above the top of the usable address space */
|
||||
#define IOVA_ANCHOR ~0UL
|
||||
|
||||
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
|
||||
|
||||
static bool iova_rcache_insert(struct iova_domain *iovad,
|
||||
unsigned long pfn,
|
||||
unsigned long size);
|
||||
static unsigned long iova_rcache_get(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn);
|
||||
static void init_iova_rcaches(struct iova_domain *iovad);
|
||||
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
||||
static void free_iova_rcaches(struct iova_domain *iovad);
|
||||
|
||||
@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
|
||||
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
|
||||
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
|
||||
init_iova_rcaches(iovad);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_iova_domain);
|
||||
|
||||
@@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
||||
cached_iova = to_iova(iovad->cached32_node);
|
||||
if (free == cached_iova ||
|
||||
(free->pfn_hi < iovad->dma_32bit_pfn &&
|
||||
free->pfn_lo >= cached_iova->pfn_lo)) {
|
||||
free->pfn_lo >= cached_iova->pfn_lo))
|
||||
iovad->cached32_node = rb_next(&free->node);
|
||||
|
||||
if (free->pfn_lo < iovad->dma_32bit_pfn)
|
||||
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
|
||||
}
|
||||
|
||||
cached_iova = to_iova(iovad->cached_node);
|
||||
if (free->pfn_lo >= cached_iova->pfn_lo)
|
||||
@@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_iova_fast);
|
||||
|
||||
static void iova_domain_free_rcaches(struct iova_domain *iovad)
|
||||
{
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
|
||||
&iovad->cpuhp_dead);
|
||||
free_iova_rcaches(iovad);
|
||||
}
|
||||
|
||||
/**
|
||||
* put_iova_domain - destroys the iova domain
|
||||
* @iovad: - iova domain in question.
|
||||
@@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad)
|
||||
{
|
||||
struct iova *iova, *tmp;
|
||||
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
|
||||
&iovad->cpuhp_dead);
|
||||
free_iova_rcaches(iovad);
|
||||
if (iovad->rcaches)
|
||||
iova_domain_free_rcaches(iovad);
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
|
||||
free_iova_mem(iova);
|
||||
}
|
||||
@@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
|
||||
*/
|
||||
|
||||
#define IOVA_MAG_SIZE 128
|
||||
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
|
||||
|
||||
struct iova_magazine {
|
||||
unsigned long size;
|
||||
@@ -620,6 +628,13 @@ struct iova_cpu_rcache {
|
||||
struct iova_magazine *prev;
|
||||
};
|
||||
|
||||
struct iova_rcache {
|
||||
spinlock_t lock;
|
||||
unsigned long depot_size;
|
||||
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
|
||||
struct iova_cpu_rcache __percpu *cpu_rcaches;
|
||||
};
|
||||
|
||||
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
|
||||
{
|
||||
return kzalloc(sizeof(struct iova_magazine), flags);
|
||||
@@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
|
||||
mag->pfns[mag->size++] = pfn;
|
||||
}
|
||||
|
||||
static void init_iova_rcaches(struct iova_domain *iovad)
|
||||
int iova_domain_init_rcaches(struct iova_domain *iovad)
|
||||
{
|
||||
struct iova_cpu_rcache *cpu_rcache;
|
||||
struct iova_rcache *rcache;
|
||||
unsigned int cpu;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
|
||||
sizeof(struct iova_rcache),
|
||||
GFP_KERNEL);
|
||||
if (!iovad->rcaches)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
|
||||
struct iova_cpu_rcache *cpu_rcache;
|
||||
struct iova_rcache *rcache;
|
||||
|
||||
rcache = &iovad->rcaches[i];
|
||||
spin_lock_init(&rcache->lock);
|
||||
rcache->depot_size = 0;
|
||||
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
|
||||
if (WARN_ON(!rcache->cpu_rcaches))
|
||||
continue;
|
||||
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
|
||||
cache_line_size());
|
||||
if (!rcache->cpu_rcaches) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
|
||||
|
||||
spin_lock_init(&cpu_rcache->lock);
|
||||
cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
|
||||
cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
|
||||
if (!cpu_rcache->loaded || !cpu_rcache->prev) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
|
||||
&iovad->cpuhp_dead);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
free_iova_rcaches(iovad);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
|
||||
|
||||
/*
|
||||
* Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
|
||||
@@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
|
||||
{
|
||||
unsigned int log_size = order_base_2(size);
|
||||
|
||||
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
|
||||
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
|
||||
return 0;
|
||||
|
||||
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
|
||||
@@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
|
||||
|
||||
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
|
||||
rcache = &iovad->rcaches[i];
|
||||
if (!rcache->cpu_rcaches)
|
||||
break;
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
|
||||
iova_magazine_free(cpu_rcache->loaded);
|
||||
@@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
|
||||
for (j = 0; j < rcache->depot_size; ++j)
|
||||
iova_magazine_free(rcache->depot[j]);
|
||||
}
|
||||
|
||||
kfree(iovad->rcaches);
|
||||
iovad->rcaches = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
|
||||
|
||||
static const struct iommu_ops ipmmu_ops = {
|
||||
.domain_alloc = ipmmu_domain_alloc,
|
||||
.domain_free = ipmmu_domain_free,
|
||||
.attach_dev = ipmmu_attach_device,
|
||||
.detach_dev = ipmmu_detach_device,
|
||||
.map = ipmmu_map,
|
||||
.unmap = ipmmu_unmap,
|
||||
.flush_iotlb_all = ipmmu_flush_iotlb_all,
|
||||
.iotlb_sync = ipmmu_iotlb_sync,
|
||||
.iova_to_phys = ipmmu_iova_to_phys,
|
||||
.probe_device = ipmmu_probe_device,
|
||||
.release_device = ipmmu_release_device,
|
||||
.probe_finalize = ipmmu_probe_finalize,
|
||||
@@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = {
|
||||
? generic_device_group : ipmmu_find_group,
|
||||
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
|
||||
.of_xlate = ipmmu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = ipmmu_attach_device,
|
||||
.detach_dev = ipmmu_detach_device,
|
||||
.map = ipmmu_map,
|
||||
.unmap = ipmmu_unmap,
|
||||
.flush_iotlb_all = ipmmu_flush_iotlb_all,
|
||||
.iotlb_sync = ipmmu_iotlb_sync,
|
||||
.iova_to_phys = ipmmu_iova_to_phys,
|
||||
.free = ipmmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
|
||||
@@ -558,11 +558,6 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool msm_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void print_ctx_regs(void __iomem *base, int ctx)
|
||||
{
|
||||
unsigned int fsr = GET_FSR(base, ctx);
|
||||
@@ -672,27 +667,28 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
static struct iommu_ops msm_iommu_ops = {
|
||||
.capable = msm_iommu_capable,
|
||||
.domain_alloc = msm_iommu_domain_alloc,
|
||||
.domain_free = msm_iommu_domain_free,
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
/*
|
||||
* Nothing is needed here, the barrier to guarantee
|
||||
* completion of the tlb sync operation is implicitly
|
||||
* taken care when the iommu client does a writel before
|
||||
* kick starting the other master.
|
||||
*/
|
||||
.iotlb_sync = NULL,
|
||||
.iotlb_sync_map = msm_iommu_sync_map,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.probe_device = msm_iommu_probe_device,
|
||||
.release_device = msm_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
/*
|
||||
* Nothing is needed here, the barrier to guarantee
|
||||
* completion of the tlb sync operation is implicitly
|
||||
* taken care when the iommu client does a writel before
|
||||
* kick starting the other master.
|
||||
*/
|
||||
.iotlb_sync = NULL,
|
||||
.iotlb_sync_map = msm_iommu_sync_map,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.free = msm_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int msm_iommu_probe(struct platform_device *pdev)
|
||||
|
||||
@@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
|
||||
|
||||
static const struct iommu_ops mtk_iommu_ops = {
|
||||
.domain_alloc = mtk_iommu_domain_alloc,
|
||||
.domain_free = mtk_iommu_domain_free,
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = mtk_iommu_iotlb_sync,
|
||||
.iotlb_sync_map = mtk_iommu_sync_map,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.probe_device = mtk_iommu_probe_device,
|
||||
.release_device = mtk_iommu_release_device,
|
||||
.device_group = mtk_iommu_device_group,
|
||||
@@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = {
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = mtk_iommu_iotlb_sync,
|
||||
.iotlb_sync_map = mtk_iommu_sync_map,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.free = mtk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
|
||||
|
||||
@@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
|
||||
|
||||
static const struct iommu_ops mtk_iommu_ops = {
|
||||
.domain_alloc = mtk_iommu_domain_alloc,
|
||||
.domain_free = mtk_iommu_domain_free,
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.probe_device = mtk_iommu_probe_device,
|
||||
.probe_finalize = mtk_iommu_probe_finalize,
|
||||
.release_device = mtk_iommu_release_device,
|
||||
@@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = {
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.free = mtk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct of_device_id mtk_iommu_of_ids[] = {
|
||||
|
||||
@@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
|
||||
|
||||
static const struct iommu_ops omap_iommu_ops = {
|
||||
.domain_alloc = omap_iommu_domain_alloc,
|
||||
.domain_free = omap_iommu_domain_free,
|
||||
.attach_dev = omap_iommu_attach_dev,
|
||||
.detach_dev = omap_iommu_detach_dev,
|
||||
.map = omap_iommu_map,
|
||||
.unmap = omap_iommu_unmap,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.probe_device = omap_iommu_probe_device,
|
||||
.release_device = omap_iommu_release_device,
|
||||
.device_group = omap_iommu_device_group,
|
||||
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = omap_iommu_attach_dev,
|
||||
.detach_dev = omap_iommu_detach_dev,
|
||||
.map = omap_iommu_map,
|
||||
.unmap = omap_iommu_unmap,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.free = omap_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init omap_iommu_init(void)
|
||||
|
||||
@@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev,
|
||||
|
||||
static const struct iommu_ops rk_iommu_ops = {
|
||||
.domain_alloc = rk_iommu_domain_alloc,
|
||||
.domain_free = rk_iommu_domain_free,
|
||||
.attach_dev = rk_iommu_attach_device,
|
||||
.detach_dev = rk_iommu_detach_device,
|
||||
.map = rk_iommu_map,
|
||||
.unmap = rk_iommu_unmap,
|
||||
.probe_device = rk_iommu_probe_device,
|
||||
.release_device = rk_iommu_release_device,
|
||||
.iova_to_phys = rk_iommu_iova_to_phys,
|
||||
.device_group = rk_iommu_device_group,
|
||||
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
|
||||
.of_xlate = rk_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = rk_iommu_attach_device,
|
||||
.detach_dev = rk_iommu_detach_device,
|
||||
.map = rk_iommu_map,
|
||||
.unmap = rk_iommu_unmap,
|
||||
.iova_to_phys = rk_iommu_iova_to_phys,
|
||||
.free = rk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int rk_iommu_probe(struct platform_device *pdev)
|
||||
|
||||
@@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
|
||||
static const struct iommu_ops s390_iommu_ops = {
|
||||
.capable = s390_iommu_capable,
|
||||
.domain_alloc = s390_domain_alloc,
|
||||
.domain_free = s390_domain_free,
|
||||
.attach_dev = s390_iommu_attach_device,
|
||||
.detach_dev = s390_iommu_detach_device,
|
||||
.map = s390_iommu_map,
|
||||
.unmap = s390_iommu_unmap,
|
||||
.iova_to_phys = s390_iommu_iova_to_phys,
|
||||
.probe_device = s390_iommu_probe_device,
|
||||
.release_device = s390_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = S390_IOMMU_PGSIZES,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = s390_iommu_attach_device,
|
||||
.detach_dev = s390_iommu_detach_device,
|
||||
.map = s390_iommu_map,
|
||||
.unmap = s390_iommu_unmap,
|
||||
.iova_to_phys = s390_iommu_iova_to_phys,
|
||||
.free = s390_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init s390_iommu_init(void)
|
||||
|
||||
@@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
|
||||
static const struct iommu_ops sprd_iommu_ops = {
|
||||
.domain_alloc = sprd_iommu_domain_alloc,
|
||||
.domain_free = sprd_iommu_domain_free,
|
||||
.attach_dev = sprd_iommu_attach_device,
|
||||
.detach_dev = sprd_iommu_detach_device,
|
||||
.map = sprd_iommu_map,
|
||||
.unmap = sprd_iommu_unmap,
|
||||
.iotlb_sync_map = sprd_iommu_sync_map,
|
||||
.iotlb_sync = sprd_iommu_sync,
|
||||
.iova_to_phys = sprd_iommu_iova_to_phys,
|
||||
.probe_device = sprd_iommu_probe_device,
|
||||
.release_device = sprd_iommu_release_device,
|
||||
.device_group = sprd_iommu_device_group,
|
||||
.of_xlate = sprd_iommu_of_xlate,
|
||||
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = sprd_iommu_attach_device,
|
||||
.detach_dev = sprd_iommu_detach_device,
|
||||
.map = sprd_iommu_map,
|
||||
.unmap = sprd_iommu_unmap,
|
||||
.iotlb_sync_map = sprd_iommu_sync_map,
|
||||
.iotlb_sync = sprd_iommu_sync,
|
||||
.iova_to_phys = sprd_iommu_iova_to_phys,
|
||||
.free = sprd_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct of_device_id sprd_iommu_of_match[] = {
|
||||
|
||||
@@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev,
|
||||
|
||||
static const struct iommu_ops sun50i_iommu_ops = {
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.attach_dev = sun50i_iommu_attach_device,
|
||||
.detach_dev = sun50i_iommu_detach_device,
|
||||
.device_group = sun50i_iommu_device_group,
|
||||
.domain_alloc = sun50i_iommu_domain_alloc,
|
||||
.domain_free = sun50i_iommu_domain_free,
|
||||
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = sun50i_iommu_iotlb_sync,
|
||||
.iova_to_phys = sun50i_iommu_iova_to_phys,
|
||||
.map = sun50i_iommu_map,
|
||||
.of_xlate = sun50i_iommu_of_xlate,
|
||||
.probe_device = sun50i_iommu_probe_device,
|
||||
.release_device = sun50i_iommu_release_device,
|
||||
.unmap = sun50i_iommu_unmap,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = sun50i_iommu_attach_device,
|
||||
.detach_dev = sun50i_iommu_detach_device,
|
||||
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = sun50i_iommu_iotlb_sync,
|
||||
.iova_to_phys = sun50i_iommu_iova_to_phys,
|
||||
.map = sun50i_iommu_map,
|
||||
.unmap = sun50i_iommu_unmap,
|
||||
.free = sun50i_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
|
||||
|
||||
@@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
return pte & GART_PAGE_MASK;
|
||||
}
|
||||
|
||||
static bool gart_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct iommu_device *gart_iommu_probe_device(struct device *dev)
|
||||
{
|
||||
if (!dev_iommu_fwspec_get(dev))
|
||||
@@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.capable = gart_iommu_capable,
|
||||
.domain_alloc = gart_iommu_domain_alloc,
|
||||
.domain_free = gart_iommu_domain_free,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.probe_device = gart_iommu_probe_device,
|
||||
.release_device = gart_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = GART_IOMMU_PGSIZES,
|
||||
.of_xlate = gart_iommu_of_xlate,
|
||||
.iotlb_sync_map = gart_iommu_sync_map,
|
||||
.iotlb_sync = gart_iommu_sync,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.iotlb_sync_map = gart_iommu_sync_map,
|
||||
.iotlb_sync = gart_iommu_sync,
|
||||
.free = gart_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
int tegra_gart_suspend(struct gart_device *gart)
|
||||
|
||||
@@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
|
||||
clear_bit(id, smmu->asids);
|
||||
}
|
||||
|
||||
static bool tegra_smmu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||
{
|
||||
struct tegra_smmu_as *as;
|
||||
@@ -967,19 +962,20 @@ static int tegra_smmu_of_xlate(struct device *dev,
|
||||
}
|
||||
|
||||
static const struct iommu_ops tegra_smmu_ops = {
|
||||
.capable = tegra_smmu_capable,
|
||||
.domain_alloc = tegra_smmu_domain_alloc,
|
||||
.domain_free = tegra_smmu_domain_free,
|
||||
.attach_dev = tegra_smmu_attach_dev,
|
||||
.detach_dev = tegra_smmu_detach_dev,
|
||||
.probe_device = tegra_smmu_probe_device,
|
||||
.release_device = tegra_smmu_release_device,
|
||||
.device_group = tegra_smmu_device_group,
|
||||
.map = tegra_smmu_map,
|
||||
.unmap = tegra_smmu_unmap,
|
||||
.iova_to_phys = tegra_smmu_iova_to_phys,
|
||||
.of_xlate = tegra_smmu_of_xlate,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = tegra_smmu_attach_dev,
|
||||
.detach_dev = tegra_smmu_detach_dev,
|
||||
.map = tegra_smmu_map,
|
||||
.unmap = tegra_smmu_unmap,
|
||||
.iova_to_phys = tegra_smmu_iova_to_phys,
|
||||
.free = tegra_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void tegra_smmu_ahb_enable(void)
|
||||
|
||||
@@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
|
||||
static struct iommu_ops viommu_ops = {
|
||||
.domain_alloc = viommu_domain_alloc,
|
||||
.domain_free = viommu_domain_free,
|
||||
.attach_dev = viommu_attach_dev,
|
||||
.map = viommu_map,
|
||||
.unmap = viommu_unmap,
|
||||
.iova_to_phys = viommu_iova_to_phys,
|
||||
.iotlb_sync = viommu_iotlb_sync,
|
||||
.probe_device = viommu_probe_device,
|
||||
.probe_finalize = viommu_probe_finalize,
|
||||
.release_device = viommu_release_device,
|
||||
@@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = {
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.of_xlate = viommu_of_xlate,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = viommu_attach_dev,
|
||||
.map = viommu_map,
|
||||
.unmap = viommu_unmap,
|
||||
.iova_to_phys = viommu_iova_to_phys,
|
||||
.iotlb_sync = viommu_iotlb_sync,
|
||||
.free = viommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int viommu_init_vqs(struct viommu_dev *viommu)
|
||||
|
||||
@@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
|
||||
struct file *file;
|
||||
struct vduse_bounce_map *map;
|
||||
unsigned long pfn, bounce_pfns;
|
||||
int ret;
|
||||
|
||||
bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
|
||||
if (iova_limit <= bounce_size)
|
||||
@@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
|
||||
spin_lock_init(&domain->iotlb_lock);
|
||||
init_iova_domain(&domain->stream_iovad,
|
||||
PAGE_SIZE, IOVA_START_PFN);
|
||||
ret = iova_domain_init_rcaches(&domain->stream_iovad);
|
||||
if (ret)
|
||||
goto err_iovad_stream;
|
||||
init_iova_domain(&domain->consistent_iovad,
|
||||
PAGE_SIZE, bounce_pfns);
|
||||
ret = iova_domain_init_rcaches(&domain->consistent_iovad);
|
||||
if (ret)
|
||||
goto err_iovad_consistent;
|
||||
|
||||
return domain;
|
||||
err_iovad_consistent:
|
||||
put_iova_domain(&domain->stream_iovad);
|
||||
err_iovad_stream:
|
||||
fput(file);
|
||||
err_file:
|
||||
vfree(domain->bounce_maps);
|
||||
err_map:
|
||||
|
||||
@@ -525,12 +525,6 @@ struct context_entry {
|
||||
*/
|
||||
#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
|
||||
|
||||
/*
|
||||
* Domain represents a virtual machine which demands iommu nested
|
||||
* translation mode support.
|
||||
*/
|
||||
#define DOMAIN_FLAG_NESTING_MODE BIT(2)
|
||||
|
||||
struct dmar_domain {
|
||||
int nid; /* node id */
|
||||
|
||||
@@ -548,7 +542,6 @@ struct dmar_domain {
|
||||
u8 iommu_snooping: 1; /* indicate snooping control feature */
|
||||
|
||||
struct list_head devices; /* all devices' list */
|
||||
struct list_head subdevices; /* all subdevices' list */
|
||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||
|
||||
struct dma_pte *pgd; /* virtual address */
|
||||
@@ -563,11 +556,6 @@ struct dmar_domain {
|
||||
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
||||
u64 max_addr; /* maximum mapped address */
|
||||
|
||||
u32 default_pasid; /*
|
||||
* The default pasid used for non-SVM
|
||||
* traffic on mediated devices.
|
||||
*/
|
||||
|
||||
struct iommu_domain domain; /* generic domain data structure for
|
||||
iommu core */
|
||||
};
|
||||
@@ -620,21 +608,11 @@ struct intel_iommu {
|
||||
void *perf_statistic;
|
||||
};
|
||||
|
||||
/* Per subdevice private data */
|
||||
struct subdev_domain_info {
|
||||
struct list_head link_phys; /* link to phys device siblings */
|
||||
struct list_head link_domain; /* link to domain siblings */
|
||||
struct device *pdev; /* physical device derived from */
|
||||
struct dmar_domain *domain; /* aux-domain */
|
||||
int users; /* user count */
|
||||
};
|
||||
|
||||
/* PCI domain-device relationship */
|
||||
struct device_domain_info {
|
||||
struct list_head link; /* link to domain siblings */
|
||||
struct list_head global; /* link to global list */
|
||||
struct list_head table; /* link to pasid table */
|
||||
struct list_head subdevices; /* subdevices sibling */
|
||||
u32 segment; /* PCI segment number */
|
||||
u8 bus; /* PCI bus number */
|
||||
u8 devfn; /* PCI devfn number */
|
||||
@@ -645,7 +623,6 @@ struct device_domain_info {
|
||||
u8 pri_enabled:1;
|
||||
u8 ats_supported:1;
|
||||
u8 ats_enabled:1;
|
||||
u8 auxd_enabled:1; /* Multiple domains per device */
|
||||
u8 ats_qdep;
|
||||
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
|
||||
struct intel_iommu *iommu; /* IOMMU used by this device */
|
||||
@@ -765,9 +742,6 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
|
||||
extern void intel_svm_check(struct intel_iommu *iommu);
|
||||
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
|
||||
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
|
||||
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_gpasid_bind_data *data);
|
||||
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid);
|
||||
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void intel_svm_unbind(struct iommu_sva *handle);
|
||||
@@ -795,7 +769,6 @@ struct intel_svm {
|
||||
|
||||
unsigned int flags;
|
||||
u32 pasid;
|
||||
int gpasid; /* In case that guest PASID is different from host PASID */
|
||||
struct list_head devs;
|
||||
};
|
||||
#else
|
||||
|
||||
@@ -25,17 +25,5 @@
|
||||
* do such IOTLB flushes automatically.
|
||||
*/
|
||||
#define SVM_FLAG_SUPERVISOR_MODE BIT(0)
|
||||
/*
|
||||
* The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
|
||||
* processes. Compared to the host bind, the primary differences are:
|
||||
* 1. mm life cycle management
|
||||
* 2. fault reporting
|
||||
*/
|
||||
#define SVM_FLAG_GUEST_MODE BIT(1)
|
||||
/*
|
||||
* The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
|
||||
* which requires guest and host PASID translation at both directions.
|
||||
*/
|
||||
#define SVM_FLAG_GUEST_PASID BIT(2)
|
||||
|
||||
#endif /* __INTEL_SVM_H__ */
|
||||
|
||||
@@ -37,6 +37,7 @@ struct iommu_group;
|
||||
struct bus_type;
|
||||
struct device;
|
||||
struct iommu_domain;
|
||||
struct iommu_domain_ops;
|
||||
struct notifier_block;
|
||||
struct iommu_sva;
|
||||
struct iommu_fault_event;
|
||||
@@ -88,7 +89,7 @@ struct iommu_domain_geometry {
|
||||
|
||||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
const struct iommu_domain_ops *ops;
|
||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
@@ -144,7 +145,6 @@ struct iommu_resv_region {
|
||||
|
||||
/**
|
||||
* enum iommu_dev_features - Per device IOMMU features
|
||||
* @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature
|
||||
* @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
|
||||
* @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
|
||||
* enabling %IOMMU_DEV_FEAT_SVA requires
|
||||
@@ -157,7 +157,6 @@ struct iommu_resv_region {
|
||||
* iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
|
||||
*/
|
||||
enum iommu_dev_features {
|
||||
IOMMU_DEV_FEAT_AUX,
|
||||
IOMMU_DEV_FEAT_SVA,
|
||||
IOMMU_DEV_FEAT_IOPF,
|
||||
};
|
||||
@@ -194,9 +193,75 @@ struct iommu_iotlb_gather {
|
||||
* struct iommu_ops - iommu ops and capabilities
|
||||
* @capable: check capability
|
||||
* @domain_alloc: allocate iommu domain
|
||||
* @domain_free: free iommu domain
|
||||
* @attach_dev: attach device to an iommu domain
|
||||
* @detach_dev: detach device from an iommu domain
|
||||
* @probe_device: Add device to iommu driver handling
|
||||
* @release_device: Remove device from iommu driver handling
|
||||
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
||||
* group and attached to the groups domain
|
||||
* @device_group: find iommu group for a particular device
|
||||
* @get_resv_regions: Request list of reserved regions for a device
|
||||
* @put_resv_regions: Free list of reserved regions for a device
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @is_attach_deferred: Check if domain attach should be deferred from iommu
|
||||
* driver init to device driver init (default no)
|
||||
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
|
||||
* iommu specific features.
|
||||
* @dev_feat_enabled: check enabled feature
|
||||
* @sva_bind: Bind process address space to device
|
||||
* @sva_unbind: Unbind process address space from device
|
||||
* @sva_get_pasid: Get PASID associated to a SVA handle
|
||||
* @page_response: handle page request response
|
||||
* @def_domain_type: device default domain type, return value:
|
||||
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
|
||||
* - IOMMU_DOMAIN_DMA: must use a dma domain
|
||||
* - 0: use the default setting
|
||||
* @default_domain_ops: the default ops for domains
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
* @owner: Driver module providing these ops
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
||||
/* Domain allocation and freeing by the iommu driver */
|
||||
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
|
||||
|
||||
struct iommu_device *(*probe_device)(struct device *dev);
|
||||
void (*release_device)(struct device *dev);
|
||||
void (*probe_finalize)(struct device *dev);
|
||||
struct iommu_group *(*device_group)(struct device *dev);
|
||||
|
||||
/* Request/Free a list of reserved regions for a device */
|
||||
void (*get_resv_regions)(struct device *dev, struct list_head *list);
|
||||
void (*put_resv_regions)(struct device *dev, struct list_head *list);
|
||||
|
||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
bool (*is_attach_deferred)(struct device *dev);
|
||||
|
||||
/* Per device IOMMU features */
|
||||
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
|
||||
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
|
||||
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void (*sva_unbind)(struct iommu_sva *handle);
|
||||
u32 (*sva_get_pasid)(struct iommu_sva *handle);
|
||||
|
||||
int (*page_response)(struct device *dev,
|
||||
struct iommu_fault_event *evt,
|
||||
struct iommu_page_response *msg);
|
||||
|
||||
int (*def_domain_type)(struct device *dev);
|
||||
|
||||
const struct iommu_domain_ops *default_domain_ops;
|
||||
unsigned long pgsize_bitmap;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_domain_ops - domain specific operations
|
||||
* @attach_dev: attach an iommu domain to a device
|
||||
* @detach_dev: detach an iommu domain from a device
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @map_pages: map a physically contiguous set of pages of the same size to
|
||||
* an iommu domain.
|
||||
@@ -207,111 +272,39 @@ struct iommu_iotlb_gather {
|
||||
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
* queue
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @probe_device: Add device to iommu driver handling
|
||||
* @release_device: Remove device from iommu driver handling
|
||||
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
||||
* group and attached to the groups domain
|
||||
* @device_group: find iommu group for a particular device
|
||||
* @enable_nesting: Enable nesting
|
||||
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
|
||||
* @get_resv_regions: Request list of reserved regions for a device
|
||||
* @put_resv_regions: Free list of reserved regions for a device
|
||||
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @is_attach_deferred: Check if domain attach should be deferred from iommu
|
||||
* driver init to device driver init (default no)
|
||||
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
|
||||
* iommu specific features.
|
||||
* @dev_feat_enabled: check enabled feature
|
||||
* @aux_attach/detach_dev: aux-domain specific attach/detach entries.
|
||||
* @aux_get_pasid: get the pasid given an aux-domain
|
||||
* @sva_bind: Bind process address space to device
|
||||
* @sva_unbind: Unbind process address space from device
|
||||
* @sva_get_pasid: Get PASID associated to a SVA handle
|
||||
* @page_response: handle page request response
|
||||
* @cache_invalidate: invalidate translation caches
|
||||
* @sva_bind_gpasid: bind guest pasid and mm
|
||||
* @sva_unbind_gpasid: unbind guest pasid and mm
|
||||
* @def_domain_type: device default domain type, return value:
|
||||
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
|
||||
* - IOMMU_DOMAIN_DMA: must use a dma domain
|
||||
* - 0: use the default setting
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
* @owner: Driver module providing these ops
|
||||
* @free: Release the domain after use.
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
||||
/* Domain allocation and freeing by the iommu driver */
|
||||
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
|
||||
void (*domain_free)(struct iommu_domain *);
|
||||
|
||||
struct iommu_domain_ops {
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t pgsize, size_t pgcount,
|
||||
int prot, gfp_t gfp, size_t *mapped);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size, struct iommu_iotlb_gather *iotlb_gather);
|
||||
size_t size, struct iommu_iotlb_gather *iotlb_gather);
|
||||
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t pgsize, size_t pgcount,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
void (*iotlb_sync)(struct iommu_domain *domain,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
struct iommu_device *(*probe_device)(struct device *dev);
|
||||
void (*release_device)(struct device *dev);
|
||||
void (*probe_finalize)(struct device *dev);
|
||||
struct iommu_group *(*device_group)(struct device *dev);
|
||||
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
int (*enable_nesting)(struct iommu_domain *domain);
|
||||
int (*set_pgtable_quirks)(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
||||
/* Request/Free a list of reserved regions for a device */
|
||||
void (*get_resv_regions)(struct device *dev, struct list_head *list);
|
||||
void (*put_resv_regions)(struct device *dev, struct list_head *list);
|
||||
void (*apply_resv_region)(struct device *dev,
|
||||
struct iommu_domain *domain,
|
||||
struct iommu_resv_region *region);
|
||||
|
||||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
/* Per device IOMMU features */
|
||||
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
|
||||
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
|
||||
|
||||
/* Aux-domain specific attach/detach entries */
|
||||
int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
|
||||
void *drvdata);
|
||||
void (*sva_unbind)(struct iommu_sva *handle);
|
||||
u32 (*sva_get_pasid)(struct iommu_sva *handle);
|
||||
|
||||
int (*page_response)(struct device *dev,
|
||||
struct iommu_fault_event *evt,
|
||||
struct iommu_page_response *msg);
|
||||
int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_cache_invalidate_info *inv_info);
|
||||
int (*sva_bind_gpasid)(struct iommu_domain *domain,
|
||||
struct device *dev, struct iommu_gpasid_bind_data *data);
|
||||
|
||||
int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
|
||||
|
||||
int (*def_domain_type)(struct device *dev);
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
struct module *owner;
|
||||
void (*free)(struct iommu_domain *domain);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -403,6 +396,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
|
||||
};
|
||||
}
|
||||
|
||||
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* Assume that valid ops must be installed if iommu_probe_device()
|
||||
* has succeeded. The device ops are essentially for internal use
|
||||
* within the IOMMU subsystem itself, so we should be able to trust
|
||||
* ourselves not to misuse the helper.
|
||||
*/
|
||||
return dev->iommu->iommu_dev->ops;
|
||||
}
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
|
||||
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
|
||||
@@ -421,14 +425,6 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
|
||||
struct device *dev,
|
||||
void __user *uinfo);
|
||||
|
||||
extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, void __user *udata);
|
||||
extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, void __user *udata);
|
||||
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||
@@ -672,9 +668,6 @@ void iommu_release_device(struct device *dev);
|
||||
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
|
||||
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
|
||||
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
|
||||
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
|
||||
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
|
||||
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
|
||||
struct mm_struct *mm,
|
||||
@@ -1019,23 +1012,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void
|
||||
iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iommu_sva *
|
||||
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
@@ -1051,33 +1027,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
return IOMMU_PASID_INVALID;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_uapi_cache_invalidate(struct iommu_domain *domain,
|
||||
struct device *dev,
|
||||
struct iommu_cache_invalidate_info *inv_info)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, void __user *udata)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev, void __user *udata)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
|
||||
struct device *dev,
|
||||
ioasid_t pasid)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
|
||||
@@ -21,18 +21,8 @@ struct iova {
|
||||
unsigned long pfn_lo; /* Lowest allocated pfn */
|
||||
};
|
||||
|
||||
struct iova_magazine;
|
||||
struct iova_cpu_rcache;
|
||||
|
||||
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
|
||||
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
|
||||
|
||||
struct iova_rcache {
|
||||
spinlock_t lock;
|
||||
unsigned long depot_size;
|
||||
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
|
||||
struct iova_cpu_rcache __percpu *cpu_rcaches;
|
||||
};
|
||||
struct iova_rcache;
|
||||
|
||||
/* holds all the iova translations for a domain */
|
||||
struct iova_domain {
|
||||
@@ -46,7 +36,7 @@ struct iova_domain {
|
||||
unsigned long max32_alloc_size; /* Size of last failed allocation */
|
||||
struct iova anchor; /* rbtree lookup anchor */
|
||||
|
||||
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
|
||||
struct iova_rcache *rcaches;
|
||||
struct hlist_node cpuhp_dead;
|
||||
};
|
||||
|
||||
@@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
unsigned long pfn_hi);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn);
|
||||
int iova_domain_init_rcaches(struct iova_domain *iovad);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
void put_iova_domain(struct iova_domain *iovad);
|
||||
#else
|
||||
|
||||
@@ -158,185 +158,4 @@ struct iommu_page_response {
|
||||
__u32 code;
|
||||
};
|
||||
|
||||
/* defines the granularity of the invalidation */
|
||||
enum iommu_inv_granularity {
|
||||
IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
|
||||
IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
|
||||
IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
|
||||
IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_inv_addr_info - Address Selective Invalidation Structure
|
||||
*
|
||||
* @flags: indicates the granularity of the address-selective invalidation
|
||||
* - If the PASID bit is set, the @pasid field is populated and the invalidation
|
||||
* relates to cache entries tagged with this PASID and matching the address
|
||||
* range.
|
||||
* - If ARCHID bit is set, @archid is populated and the invalidation relates
|
||||
* to cache entries tagged with this architecture specific ID and matching
|
||||
* the address range.
|
||||
* - Both PASID and ARCHID can be set as they may tag different caches.
|
||||
* - If neither PASID or ARCHID is set, global addr invalidation applies.
|
||||
* - The LEAF flag indicates whether only the leaf PTE caching needs to be
|
||||
* invalidated and other paging structure caches can be preserved.
|
||||
* @pasid: process address space ID
|
||||
* @archid: architecture-specific ID
|
||||
* @addr: first stage/level input address
|
||||
* @granule_size: page/block size of the mapping in bytes
|
||||
* @nb_granules: number of contiguous granules to be invalidated
|
||||
*/
|
||||
struct iommu_inv_addr_info {
|
||||
#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
|
||||
#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
|
||||
#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
|
||||
__u32 flags;
|
||||
__u32 archid;
|
||||
__u64 pasid;
|
||||
__u64 addr;
|
||||
__u64 granule_size;
|
||||
__u64 nb_granules;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
|
||||
*
|
||||
* @flags: indicates the granularity of the PASID-selective invalidation
|
||||
* - If the PASID bit is set, the @pasid field is populated and the invalidation
|
||||
* relates to cache entries tagged with this PASID and matching the address
|
||||
* range.
|
||||
* - If the ARCHID bit is set, the @archid is populated and the invalidation
|
||||
* relates to cache entries tagged with this architecture specific ID and
|
||||
* matching the address range.
|
||||
* - Both PASID and ARCHID can be set as they may tag different caches.
|
||||
* - At least one of PASID or ARCHID must be set.
|
||||
* @pasid: process address space ID
|
||||
* @archid: architecture-specific ID
|
||||
*/
|
||||
struct iommu_inv_pasid_info {
|
||||
#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
|
||||
#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
|
||||
__u32 flags;
|
||||
__u32 archid;
|
||||
__u64 pasid;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_cache_invalidate_info - First level/stage invalidation
|
||||
* information
|
||||
* @argsz: User filled size of this data
|
||||
* @version: API version of this structure
|
||||
* @cache: bitfield that allows to select which caches to invalidate
|
||||
* @granularity: defines the lowest granularity used for the invalidation:
|
||||
* domain > PASID > addr
|
||||
* @padding: reserved for future use (should be zero)
|
||||
* @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
|
||||
* @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
|
||||
*
|
||||
* Not all the combinations of cache/granularity are valid:
|
||||
*
|
||||
* +--------------+---------------+---------------+---------------+
|
||||
* | type / | DEV_IOTLB | IOTLB | PASID |
|
||||
* | granularity | | | cache |
|
||||
* +==============+===============+===============+===============+
|
||||
* | DOMAIN | N/A | Y | Y |
|
||||
* +--------------+---------------+---------------+---------------+
|
||||
* | PASID | Y | Y | Y |
|
||||
* +--------------+---------------+---------------+---------------+
|
||||
* | ADDR | Y | Y | N/A |
|
||||
* +--------------+---------------+---------------+---------------+
|
||||
*
|
||||
* Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
|
||||
* @version and @cache.
|
||||
*
|
||||
* If multiple cache types are invalidated simultaneously, they all
|
||||
* must support the used granularity.
|
||||
*/
|
||||
struct iommu_cache_invalidate_info {
|
||||
__u32 argsz;
|
||||
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
|
||||
__u32 version;
|
||||
/* IOMMU paging structure cache */
|
||||
#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
|
||||
#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
|
||||
#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
|
||||
#define IOMMU_CACHE_INV_TYPE_NR (3)
|
||||
__u8 cache;
|
||||
__u8 granularity;
|
||||
__u8 padding[6];
|
||||
union {
|
||||
struct iommu_inv_pasid_info pasid_info;
|
||||
struct iommu_inv_addr_info addr_info;
|
||||
} granu;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
|
||||
* SVA binding.
|
||||
*
|
||||
* @flags: VT-d PASID table entry attributes
|
||||
* @pat: Page attribute table data to compute effective memory type
|
||||
* @emt: Extended memory type
|
||||
*
|
||||
* Only guest vIOMMU selectable and effective options are passed down to
|
||||
* the host IOMMU.
|
||||
*/
|
||||
struct iommu_gpasid_bind_data_vtd {
|
||||
#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
|
||||
#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
|
||||
#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
|
||||
#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
|
||||
#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
|
||||
#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
|
||||
#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */
|
||||
#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7)
|
||||
__u64 flags;
|
||||
__u32 pat;
|
||||
__u32 emt;
|
||||
};
|
||||
|
||||
#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
|
||||
IOMMU_SVA_VTD_GPASID_EMTE | \
|
||||
IOMMU_SVA_VTD_GPASID_PCD | \
|
||||
IOMMU_SVA_VTD_GPASID_PWT)
|
||||
|
||||
/**
|
||||
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
|
||||
* @argsz: User filled size of this data
|
||||
* @version: Version of this data structure
|
||||
* @format: PASID table entry format
|
||||
* @flags: Additional information on guest bind request
|
||||
* @gpgd: Guest page directory base of the guest mm to bind
|
||||
* @hpasid: Process address space ID used for the guest mm in host IOMMU
|
||||
* @gpasid: Process address space ID used for the guest mm in guest IOMMU
|
||||
* @addr_width: Guest virtual address width
|
||||
* @padding: Reserved for future use (should be zero)
|
||||
* @vtd: Intel VT-d specific data
|
||||
*
|
||||
* Guest to host PASID mapping can be an identity or non-identity, where guest
|
||||
* has its own PASID space. For non-identify mapping, guest to host PASID lookup
|
||||
* is needed when VM programs guest PASID into an assigned device. VMM may
|
||||
* trap such PASID programming then request host IOMMU driver to convert guest
|
||||
* PASID to host PASID based on this bind data.
|
||||
*/
|
||||
struct iommu_gpasid_bind_data {
|
||||
__u32 argsz;
|
||||
#define IOMMU_GPASID_BIND_VERSION_1 1
|
||||
__u32 version;
|
||||
#define IOMMU_PASID_FORMAT_INTEL_VTD 1
|
||||
#define IOMMU_PASID_FORMAT_LAST 2
|
||||
__u32 format;
|
||||
__u32 addr_width;
|
||||
#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
|
||||
__u64 flags;
|
||||
__u64 gpgd;
|
||||
__u64 hpasid;
|
||||
__u64 gpasid;
|
||||
__u8 padding[8];
|
||||
/* Vendor specific data */
|
||||
union {
|
||||
struct iommu_gpasid_bind_data_vtd vtd;
|
||||
} vendor;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_IOMMU_H */
|
||||
|
||||
Reference in New Issue
Block a user