mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-13 07:20:14 -04:00
iommu/vt-d: Use virt_to_phys()
If all the inlines are unwound virt_to_dma_pfn() is simply:
return page_to_pfn(virt_to_page(p)) << (PAGE_SHIFT - VTD_PAGE_SHIFT);
Which can be re-arranged to:
(page_to_pfn(virt_to_page(p)) << PAGE_SHIFT) >> VTD_PAGE_SHIFT
The only caller is:
((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT)
re-arranged to:
((page_to_pfn(virt_to_page(tmp_page)) << PAGE_SHIFT) >> VTD_PAGE_SHIFT)
<< VTD_PAGE_SHIFT
Which simplifies to:
page_to_pfn(virt_to_page(tmp_page)) << PAGE_SHIFT
That is the same as virt_to_phys(tmp_page), so just remove all of this.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/8-v3-e797f4dc6918+93057-iommu_pages_jgg@nvidia.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
committed by
Joerg Roedel
parent
9ce7603ad3
commit
a8653e5cc2
@@ -737,7 +737,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
||||
return NULL;
|
||||
|
||||
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
|
||||
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
|
||||
pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
|
||||
DMA_PTE_WRITE;
|
||||
if (domain->use_first_level)
|
||||
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
|
||||
|
||||
|
||||
@@ -953,25 +953,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
|
||||
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
|
||||
}
|
||||
|
||||
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
|
||||
are never going to work. */
|
||||
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
|
||||
{
|
||||
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
|
||||
}
|
||||
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
|
||||
{
|
||||
return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
|
||||
}
|
||||
static inline unsigned long page_to_dma_pfn(struct page *pg)
|
||||
{
|
||||
return mm_to_dma_pfn_start(page_to_pfn(pg));
|
||||
}
|
||||
static inline unsigned long virt_to_dma_pfn(void *p)
|
||||
{
|
||||
return page_to_dma_pfn(virt_to_page(p));
|
||||
}
|
||||
|
||||
static inline void context_set_present(struct context_entry *context)
|
||||
{
|
||||
context->lo |= 1;
|
||||
|
||||
Reference in New Issue
Block a user