drm/vmwgfx: Add error path for xa_store in vmw_bo_add_detached_resource

The xa_store() may fail due to memory allocation failure because there
is no guarantee that the index is already used. This fix introduces new
paths to handle the error.

This patch also aligns the order of function calls by calling
vmw_bo_add_detached_resource() before ttm_prime_object_init() in order
to allow consistent error handling.

Fixes: d6667f0ddf ("drm/vmwgfx: Fix handling of dumb buffers")
Signed-off-by: Keisuke Nishimura <keisuke.nishimura@inria.fr>
Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250225145223.34773-1-keisuke.nishimura@inria.fr
This commit is contained in:
Keisuke Nishimura
2025-02-25 15:52:23 +01:00
committed by Zack Rusin
parent 0039a3b35b
commit 3282422bf2
3 changed files with 17 additions and 5 deletions

View File

@@ -848,9 +848,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)

View File

@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);

View File

@@ -838,7 +838,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
goto out_unlock;
}
vmw_bo_add_detached_resource(res->guest_memory_bo, res);
ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
}
tmp = vmw_resource_reference(&srf->res);
@@ -1637,6 +1642,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
if (res->guest_memory_bo) {
ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1651,7 +1664,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;