mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 07:51:31 -04:00
iommu/vt-d: Pass size_order to qi_desc_piotlb() not npages
It doesn't make sense for the caller to compute mask, throw it away and then have qi_desc_piotlb() compute it again. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/2-v1-f175e27af136+11647-iommupt_inv_vtd_jgg@nvidia.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
committed by
Joerg Roedel
parent
b6fd468a05
commit
e36ee89679
@@ -338,13 +338,11 @@ static void qi_batch_add_piotlb_all(struct intel_iommu *iommu, u16 did,
|
||||
}
|
||||
|
||||
static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid,
|
||||
u64 addr, unsigned long npages, bool ih,
|
||||
u64 addr, unsigned int size_order, bool ih,
|
||||
struct qi_batch *batch)
|
||||
{
|
||||
if (!npages)
|
||||
return;
|
||||
|
||||
qi_desc_piotlb(did, pasid, addr, npages, ih, &batch->descs[batch->index]);
|
||||
qi_desc_piotlb(did, pasid, addr, size_order, ih,
|
||||
&batch->descs[batch->index]);
|
||||
qi_batch_increment_index(iommu, batch);
|
||||
}
|
||||
|
||||
@@ -385,7 +383,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
|
||||
tag->pasid, domain->qi_batch);
|
||||
else
|
||||
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid,
|
||||
addr, pages, ih, domain->qi_batch);
|
||||
addr, mask, ih, domain->qi_batch);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1087,19 +1087,16 @@ static inline void qi_desc_piotlb_all(u16 did, u32 pasid, struct qi_desc *desc)
|
||||
|
||||
/* Page-selective-within-PASID IOTLB invalidation */
|
||||
static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
|
||||
unsigned long npages, bool ih,
|
||||
unsigned int size_order, bool ih,
|
||||
struct qi_desc *desc)
|
||||
{
|
||||
int mask = ilog2(__roundup_pow_of_two(npages));
|
||||
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
|
||||
|
||||
if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
|
||||
addr = ALIGN_DOWN(addr, align);
|
||||
|
||||
/*
|
||||
* calculate_psi_aligned_address() must be used for addr and size_order
|
||||
*/
|
||||
desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
|
||||
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
|
||||
desc->qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(ih) |
|
||||
QI_EIOTLB_AM(mask);
|
||||
QI_EIOTLB_AM(size_order);
|
||||
}
|
||||
|
||||
static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
|
||||
|
||||
Reference in New Issue
Block a user