vfio/pci: Set up BAR resources and maps in vfio_pci_core_enable()

Previously BAR resource requests and the corresponding pci_iomap()
were performed on-demand and without synchronisation, which was racy.
Rather than add synchronisation, it's simplest to address this by
doing both activities from vfio_pci_core_enable().

The resource allocation and/or pci_iomap() can still fail; their
status is tracked and existing calls to vfio_pci_core_setup_barmap()
will fail in a similar way to before.  This keeps the point of failure
as observed by userspace the same, i.e. failures to request/map unused
BARs are benign.

Fixes: 89e1f7d4c6 ("vfio: Add PCI device driver")
Signed-off-by: Matt Evans <mattev@meta.com>
Link: https://lore.kernel.org/r/20260511145829.2993601-2-mattev@meta.com
[ERR_PTR -> IOMEM_ERR_PTR per lkp report]
Signed-off-by: Alex Williamson <alex@shazbot.org>
This commit is contained in:
Matt Evans
2026-05-11 07:58:23 -07:00
committed by Alex Williamson
parent df733ddc26
commit 05f2a68b40
2 changed files with 43 additions and 20 deletions

View File

@@ -482,6 +482,40 @@ static int vfio_pci_core_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
/*
* Eager-request BAR resources, and iomap them. Soft failures are
* allowed, and consumers must check the barmap before use in order to
* give compatible user-visible behaviour with the previous on-demand
* allocation method.
*/
static void vfio_pci_core_map_bars(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
int bar = i + PCI_STD_RESOURCES;
vdev->barmap[bar] = IOMEM_ERR_PTR(-ENODEV);
if (!pci_resource_len(pdev, i))
continue;
if (pci_request_selected_regions(pdev, 1 << bar, "vfio")) {
pci_dbg(pdev, "Failed to reserve region %d\n", bar);
vdev->barmap[bar] = IOMEM_ERR_PTR(-EBUSY);
continue;
}
vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
if (!vdev->barmap[bar]) {
pci_dbg(pdev, "Failed to iomap region %d\n", bar);
pci_release_selected_regions(pdev, 1 << bar);
vdev->barmap[bar] = IOMEM_ERR_PTR(-ENOMEM);
}
}
}
/*
* The pci-driver core runtime PM routines always save the device state
* before going into suspended state. If the device is going into low power
@@ -568,6 +602,7 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
vfio_pci_core_map_bars(vdev);
return 0;
@@ -648,7 +683,7 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar = i + PCI_STD_RESOURCES;
if (!vdev->barmap[bar])
if (IS_ERR_OR_NULL(vdev->barmap[bar]))
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
pci_release_selected_regions(pdev, 1 << bar);

View File

@@ -198,27 +198,15 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
}
EXPORT_SYMBOL_GPL(vfio_pci_core_do_io_rw);
/*
* The barmap is set up in vfio_pci_core_enable(). Callers use this
* function to check that the BAR resources are requested or that the
* pci_iomap() was done.
*/
int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
void __iomem *io;
if (vdev->barmap[bar])
return 0;
ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
if (ret)
return ret;
io = pci_iomap(pdev, bar, 0);
if (!io) {
pci_release_selected_regions(pdev, 1 << bar);
return -ENOMEM;
}
vdev->barmap[bar] = io;
if (IS_ERR(vdev->barmap[bar]))
return PTR_ERR(vdev->barmap[bar]);
return 0;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_setup_barmap);