drm/gem/shmem: Extract drm_gem_shmem_init() from drm_gem_shmem_create()

With gem objects in rust, the most ideal way for us to be able to handle
gem shmem object creation is to be able to handle the memory allocation of
a gem object ourselves - and then have the DRM gem shmem helpers initialize
the object we've allocated afterwards. So, let's split out
drm_gem_shmem_init() from drm_gem_shmem_create() to allow for doing this.

Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Link: https://lore.kernel.org/r/20250911230147.650077-2-lyude@redhat.com
This commit is contained in:
Lyude Paul
2025-09-11 18:57:38 -04:00
parent 27ed0d64a0
commit e3f4bdaf2c
2 changed files with 62 additions and 36 deletions

View File

@@ -48,6 +48,64 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
size_t size, bool private, struct vfsmount *gemfs)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
if (private) {
drm_gem_private_object_init(dev, obj, size);
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
}
if (ret) {
drm_gem_private_object_fini(obj);
return ret;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto err_release;
INIT_LIST_HEAD(&shmem->madv_list);
if (!private) {
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
* conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
return 0;
err_release:
drm_gem_object_release(obj);
return ret;
}
/**
* drm_gem_shmem_init - Initialize an allocated object.
* @dev: DRM device
* @obj: The allocated shmem GEM object.
*
* Returns:
* 0 on success, or a negative error code on failure.
*/
int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
{
return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
struct vfsmount *gemfs)
@@ -70,46 +128,13 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
obj = &shmem->base;
}
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
if (private) {
drm_gem_private_object_init(dev, obj, size);
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
}
ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
if (ret) {
drm_gem_private_object_fini(obj);
goto err_free;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto err_release;
INIT_LIST_HEAD(&shmem->madv_list);
if (!private) {
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
* conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
kfree(obj);
return ERR_PTR(ret);
}
return shmem;
err_release:
drm_gem_object_release(obj);
err_free:
kfree(obj);
return ERR_PTR(ret);
}
/**
* drm_gem_shmem_create - Allocate an object with the given size

View File

@@ -107,6 +107,7 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
size_t size,