Merge tag 'iommu-fixes-v6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux

Pull iommu fixes from Joerg Roedel:

 - iommupt: Fix an oops found by syzcaller in the new generic
   IO-page-table code.

 - AMD-Vi: Fix IO_PAGE_FAULTs in kdump kernels triggered by re-using
   domain-ids from previous kernel.

* tag 'iommu-fixes-v6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux:
  amd/iommu: Make protection domain ID functions non-static
  amd/iommu: Preserve domain ids inside the kdump kernel
  iommupt: Return ERR_PTR from _table_alloc()
This commit is contained in:
Linus Torvalds
2025-12-20 11:18:32 -08:00
4 changed files with 47 additions and 12 deletions

View File

@@ -173,6 +173,11 @@ static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
bool translation_pre_enabled(struct amd_iommu *iommu); bool translation_pre_enabled(struct amd_iommu *iommu);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line); int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
int amd_iommu_pdom_id_alloc(void);
int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp);
void amd_iommu_pdom_id_free(int id);
void amd_iommu_pdom_id_destroy(void);
#ifdef CONFIG_DMI #ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void); void amd_iommu_apply_ivrs_quirks(void);
#else #else

View File

@@ -1136,8 +1136,11 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
static bool __reuse_device_table(struct amd_iommu *iommu) static bool __reuse_device_table(struct amd_iommu *iommu)
{ {
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
u32 lo, hi, old_devtb_size; struct dev_table_entry *old_dev_tbl_entry;
u32 lo, hi, old_devtb_size, devid;
phys_addr_t old_devtb_phys; phys_addr_t old_devtb_phys;
u16 dom_id;
bool dte_v;
u64 entry; u64 entry;
/* Each IOMMU use separate device table with the same size */ /* Each IOMMU use separate device table with the same size */
@@ -1173,6 +1176,22 @@ static bool __reuse_device_table(struct amd_iommu *iommu)
return false; return false;
} }
for (devid = 0; devid <= pci_seg->last_bdf; devid++) {
old_dev_tbl_entry = &pci_seg->old_dev_tbl_cpy[devid];
dte_v = FIELD_GET(DTE_FLAG_V, old_dev_tbl_entry->data[0]);
dom_id = FIELD_GET(DEV_DOMID_MASK, old_dev_tbl_entry->data[1]);
if (!dte_v || !dom_id)
continue;
/*
* ID reservation can fail with -ENOSPC when there
* are multiple devices present in the same domain,
* hence check only for -ENOMEM.
*/
if (amd_iommu_pdom_id_reserve(dom_id, GFP_KERNEL) == -ENOMEM)
return false;
}
return true; return true;
} }
@@ -3127,8 +3146,7 @@ static bool __init check_ioapic_information(void)
static void __init free_dma_resources(void) static void __init free_dma_resources(void)
{ {
ida_destroy(&pdom_ids); amd_iommu_pdom_id_destroy();
free_unity_maps(); free_unity_maps();
} }

View File

@@ -1811,17 +1811,26 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
* contain. * contain.
* *
****************************************************************************/ ****************************************************************************/
int amd_iommu_pdom_id_alloc(void)
static int pdom_id_alloc(void)
{ {
return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
} }
static void pdom_id_free(int id) int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp)
{
return ida_alloc_range(&pdom_ids, id, id, gfp);
}
void amd_iommu_pdom_id_free(int id)
{ {
ida_free(&pdom_ids, id); ida_free(&pdom_ids, id);
} }
void amd_iommu_pdom_id_destroy(void)
{
ida_destroy(&pdom_ids);
}
static void free_gcr3_tbl_level1(u64 *tbl) static void free_gcr3_tbl_level1(u64 *tbl)
{ {
u64 *ptr; u64 *ptr;
@@ -1864,7 +1873,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
gcr3_info->glx = 0; gcr3_info->glx = 0;
/* Free per device domain ID */ /* Free per device domain ID */
pdom_id_free(gcr3_info->domid); amd_iommu_pdom_id_free(gcr3_info->domid);
iommu_free_pages(gcr3_info->gcr3_tbl); iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL; gcr3_info->gcr3_tbl = NULL;
@@ -1900,14 +1909,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -EBUSY; return -EBUSY;
/* Allocate per device domain ID */ /* Allocate per device domain ID */
domid = pdom_id_alloc(); domid = amd_iommu_pdom_id_alloc();
if (domid <= 0) if (domid <= 0)
return -ENOSPC; return -ENOSPC;
gcr3_info->domid = domid; gcr3_info->domid = domid;
gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K); gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) { if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid); amd_iommu_pdom_id_free(domid);
return -ENOMEM; return -ENOMEM;
} }
@@ -2503,7 +2512,7 @@ struct protection_domain *protection_domain_alloc(void)
if (!domain) if (!domain)
return NULL; return NULL;
domid = pdom_id_alloc(); domid = amd_iommu_pdom_id_alloc();
if (domid <= 0) { if (domid <= 0) {
kfree(domain); kfree(domain);
return NULL; return NULL;
@@ -2802,7 +2811,7 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
WARN_ON(!list_empty(&domain->dev_list)); WARN_ON(!list_empty(&domain->dev_list));
pt_iommu_deinit(&domain->iommu); pt_iommu_deinit(&domain->iommu);
pdom_id_free(domain->id); amd_iommu_pdom_id_free(domain->id);
kfree(domain); kfree(domain);
} }
@@ -2853,7 +2862,7 @@ void amd_iommu_init_identity_domain(void)
domain->ops = &identity_domain_ops; domain->ops = &identity_domain_ops;
domain->owner = &amd_iommu_ops; domain->owner = &amd_iommu_ops;
identity_domain.id = pdom_id_alloc(); identity_domain.id = amd_iommu_pdom_id_alloc();
protection_domain_init(&identity_domain); protection_domain_init(&identity_domain);
} }

View File

@@ -372,6 +372,9 @@ static inline struct pt_table_p *_table_alloc(struct pt_common *common,
table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp, table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
log2_to_int(lg2sz)); log2_to_int(lg2sz));
if (!table_mem)
return ERR_PTR(-ENOMEM);
if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) && if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
mode == ALLOC_NORMAL) { mode == ALLOC_NORMAL) {
int ret = iommu_pages_start_incoherent( int ret = iommu_pages_start_incoherent(