Merge tag 'drm-misc-fixes-2026-05-15' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

bridge:
- imx8qxp-pxl2dpi: avoid ERR_PTR with device_node cleanup

gma500:
- oaktrail_lvds: fix i2c handling

loongson:
- use managed cleanup for connector polling

panfrost:
- handle results from reservation locking correctly

qaic:
- check for integer overflows in mmap logic

rocket:
- handle results from reservation locking correctly

ttm:
- avoid infinite loop in swap out
- avoid infinite loop in BO shrinking
- convert -EAGAIN from dmem_cgroup_try_charge to -ENOSPC

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patch.msgid.link/20260515070816.GA88575@2a02-2455-9062-2500-7dec-552d-233d-9fe0.dyn6.pyur.net
This commit is contained in:
Dave Airlie
2026-05-16 07:55:49 +10:00
11 changed files with 86 additions and 42 deletions

View File

@@ -606,8 +606,11 @@ static const struct vm_operations_struct drm_vm_ops = {
static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct qaic_bo *bo = to_qaic_bo(obj);
unsigned long remap_start;
unsigned long offset = 0;
unsigned long remap_end;
struct scatterlist *sg;
unsigned long length;
int ret = 0;
if (drm_gem_is_imported(obj))
@@ -615,11 +618,27 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
if (sg_page(sg)) {
/* if sg is too large for the VMA, so truncate it to fit */
if (check_add_overflow(vma->vm_start, offset, &remap_start))
return -EINVAL;
if (check_add_overflow(remap_start, sg->length, &remap_end))
return -EINVAL;
if (remap_end > vma->vm_end) {
if (check_sub_overflow(vma->vm_end, remap_start, &length))
return -EINVAL;
} else {
length = sg->length;
}
if (length == 0)
goto out;
ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
sg->length, vma->vm_page_prot);
length, vma->vm_page_prot);
if (ret)
goto out;
offset += sg->length;
offset += length;
}
}

View File

@@ -145,6 +145,8 @@ int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *fi
ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
else if (ret > 0)
ret = 0;
shmem_obj = &to_rocket_bo(gem_obj)->base;

View File

@@ -222,52 +222,58 @@ static const struct drm_bridge_funcs imx8qxp_pxl2dpi_bridge_funcs = {
imx8qxp_pxl2dpi_bridge_atomic_get_output_bus_fmts,
};
static struct device_node *
static int
imx8qxp_pxl2dpi_get_available_ep_from_port(struct imx8qxp_pxl2dpi *p2d,
u32 port_id)
u32 port_id,
struct device_node **ep)
{
struct device_node *port, *ep;
struct device_node *port;
int ret = 0;
int ep_cnt;
*ep = NULL;
port = of_graph_get_port_by_id(p2d->dev->of_node, port_id);
if (!port) {
DRM_DEV_ERROR(p2d->dev, "failed to get port@%u\n", port_id);
return ERR_PTR(-ENODEV);
return -ENODEV;
}
ep_cnt = of_get_available_child_count(port);
if (ep_cnt == 0) {
DRM_DEV_ERROR(p2d->dev, "no available endpoints of port@%u\n",
port_id);
ep = ERR_PTR(-ENODEV);
ret = -ENODEV;
goto out;
} else if (ep_cnt > 1) {
DRM_DEV_ERROR(p2d->dev,
"invalid available endpoints of port@%u\n",
port_id);
ep = ERR_PTR(-EINVAL);
ret = -EINVAL;
goto out;
}
ep = of_get_next_available_child(port, NULL);
if (!ep) {
*ep = of_get_next_available_child(port, NULL);
if (!*ep) {
DRM_DEV_ERROR(p2d->dev,
"failed to get available endpoint of port@%u\n",
port_id);
ep = ERR_PTR(-ENODEV);
ret = -ENODEV;
goto out;
}
out:
of_node_put(port);
return ep;
return ret;
}
static int imx8qxp_pxl2dpi_find_next_bridge(struct imx8qxp_pxl2dpi *p2d)
{
struct device_node *ep __free(device_node) =
imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 1);
if (IS_ERR(ep))
return PTR_ERR(ep);
struct device_node *ep __free(device_node) = NULL;
int ret;
ret = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 1, &ep);
if (ret)
return ret;
struct device_node *remote __free(device_node) = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote)) {
@@ -291,9 +297,9 @@ static int imx8qxp_pxl2dpi_set_pixel_link_sel(struct imx8qxp_pxl2dpi *p2d)
struct of_endpoint endpoint;
int ret;
ep = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 0);
if (IS_ERR(ep))
return PTR_ERR(ep);
ret = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 0, &ep);
if (ret)
return ret;
ret = of_graph_parse_endpoint(ep, &endpoint);
if (ret) {

View File

@@ -580,6 +580,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
} else {
edid = (struct edid *)raw_edid;
/* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
i2c_put_adapter(i2c_adap);
}
if (edid) {

View File

@@ -293,7 +293,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
{
struct gma_encoder *gma_encoder;
struct gma_connector *gma_connector;
struct gma_i2c_chan *ddc_bus;
struct gma_i2c_chan *ddc_bus = NULL;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@@ -367,6 +367,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
if (edid == NULL && dev_priv->lpc_gpio_base) {
ddc_bus = oaktrail_lvds_i2c_init(dev);
if (!IS_ERR(ddc_bus)) {
if (i2c_adap)
i2c_put_adapter(i2c_adap);
i2c_adap = &ddc_bus->base;
edid = drm_get_edid(connector, i2c_adap);
}
@@ -421,7 +423,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
err_unlock:
mutex_unlock(&dev->mode_config.mutex);
gma_i2c_destroy(to_gma_i2c_chan(connector->ddc));
if (!IS_ERR_OR_NULL(ddc_bus))
gma_i2c_destroy(ddc_bus);
else if (i2c_adap)
i2c_put_adapter(i2c_adap);
drm_encoder_cleanup(encoder);
err_connector_cleanup:
drm_connector_cleanup(connector);

View File

@@ -293,7 +293,7 @@ static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vga_client_register(pdev, lsdc_vga_set_decode);
drm_kms_helper_poll_init(ddev);
drmm_kms_helper_poll_init(ddev);
if (loongson_vblank) {
ret = drm_vblank_init(ddev, descp->num_of_crtc);

View File

@@ -390,6 +390,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
else if (ret > 0)
ret = 0;
drm_gem_object_put(gem_obj);

View File

@@ -739,7 +739,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
if (ret) {
if (ret != -ENOSPC && ret != -EAGAIN) {
if (ret != -ENOSPC) {
dmem_cgroup_pool_state_put(limit_pool);
return ret;
}
@@ -1177,17 +1177,13 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(tt)) {
spin_lock(&bdev->lru_lock);
ttm_resource_del_bulk_move(bo->resource, bo);
spin_unlock(&bdev->lru_lock);
ret = ttm_tt_swapout(bdev, tt, swapout_walk->gfp_flags);
spin_lock(&bdev->lru_lock);
if (ret)
ttm_resource_add_bulk_move(bo->resource, bo);
ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bdev->lru_lock);
if (!ret) {
spin_lock(&bdev->lru_lock);
ttm_resource_del_bulk_move_unevictable(bo->resource, bo);
ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bdev->lru_lock);
}
}
out:

View File

@@ -1112,19 +1112,14 @@ long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
if (lret < 0)
return lret;
if (bo->bulk_move) {
spin_lock(&bdev->lru_lock);
ttm_resource_del_bulk_move(bo->resource, bo);
spin_unlock(&bdev->lru_lock);
}
lret = ttm_tt_backup(bdev, bo->ttm, (struct ttm_backup_flags)
{.purge = flags.purge,
.writeback = flags.writeback});
if (lret <= 0 && bo->bulk_move) {
if (lret > 0) {
spin_lock(&bdev->lru_lock);
ttm_resource_add_bulk_move(bo->resource, bo);
ttm_resource_del_bulk_move_unevictable(bo->resource, bo);
ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bdev->lru_lock);
}

View File

@@ -292,6 +292,19 @@ void ttm_resource_del_bulk_move(struct ttm_resource *res,
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
/*
* Remove a resource from its bulk_move, bypassing the unevictable check.
* Use only when the resource is known to still be tracked in the range despite
* the BO having just become unevictable; asserts that this is the case.
*/
void ttm_resource_del_bulk_move_unevictable(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
WARN_ON_ONCE(!ttm_resource_unevictable(res, bo));
if (bo->bulk_move)
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
/* Move a resource to the LRU or bulk tail */
void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
{
@@ -385,8 +398,11 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
if (man->cg) {
ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
if (ret)
if (ret) {
if (ret == -EAGAIN)
ret = -ENOSPC;
return ret;
}
}
ret = man->func->alloc(man, bo, place, res_ptr);

View File

@@ -448,6 +448,8 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_del_bulk_move_unevictable(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
void ttm_resource_init(struct ttm_buffer_object *bo,