mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 07:51:31 -04:00
Retry doesn't work here, since bo will be freed on error, leading to
UAF. However, now that we do the alloc & init before the attach, we can
now combine this as one unit and have the init do the alloc for us. This
should make the retry safe.
Reported by Sashiko.
v2: Fix up the error unwind (CI)
Closes: https://sashiko.dev/#/patchset/20260506184332.86743-2-matthew.auld%40intel.com
Fixes: eb289a5f6c ("drm/xe: Convert xe_dma_buf.c for exhaustive eviction")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.18+
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20260508102635.149172-4-matthew.auld@intel.com
(cherry picked from commit 479669418253e0f27f8cf5db01a731352ea592e7)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
399 lines
9.5 KiB
C
399 lines
9.5 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#include "xe_dma_buf.h"
|
|
|
|
#include <kunit/test.h>
|
|
#include <linux/dma-buf.h>
|
|
#include <linux/pci-p2pdma.h>
|
|
|
|
#include <drm/drm_device.h>
|
|
#include <drm/drm_prime.h>
|
|
#include <drm/ttm/ttm_tt.h>
|
|
|
|
#include "tests/xe_test.h"
|
|
#include "xe_bo.h"
|
|
#include "xe_device.h"
|
|
#include "xe_pm.h"
|
|
#include "xe_ttm_vram_mgr.h"
|
|
#include "xe_vm.h"
|
|
|
|
MODULE_IMPORT_NS("DMA_BUF");
|
|
|
|
static int xe_dma_buf_attach(struct dma_buf *dmabuf,
|
|
struct dma_buf_attachment *attach)
|
|
{
|
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
|
|
if (attach->peer2peer &&
|
|
pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
|
|
attach->peer2peer = false;
|
|
|
|
if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
|
|
return -EOPNOTSUPP;
|
|
|
|
xe_pm_runtime_get(to_xe_device(obj->dev));
|
|
return 0;
|
|
}
|
|
|
|
static void xe_dma_buf_detach(struct dma_buf *dmabuf,
|
|
struct dma_buf_attachment *attach)
|
|
{
|
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
|
|
xe_pm_runtime_put(to_xe_device(obj->dev));
|
|
}
|
|
|
|
static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
|
|
{
|
|
struct dma_buf *dmabuf = attach->dmabuf;
|
|
struct drm_gem_object *obj = dmabuf->priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
struct xe_device *xe = xe_bo_device(bo);
|
|
struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
|
|
bool allow_vram = true;
|
|
int ret;
|
|
|
|
list_for_each_entry(attach, &dmabuf->attachments, node) {
|
|
if (!attach->peer2peer) {
|
|
allow_vram = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT) &&
|
|
!(xe_bo_is_vram(bo) && allow_vram)) {
|
|
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!allow_vram) {
|
|
ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
|
|
if (ret) {
|
|
if (ret != -EINTR && ret != -ERESTARTSYS)
|
|
drm_dbg(&xe->drm,
|
|
"Failed migrating dma-buf to TT memory: %pe\n",
|
|
ERR_PTR(ret));
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
ret = xe_bo_pin_external(bo, !allow_vram, exec);
|
|
xe_assert(xe, !ret);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
|
|
{
|
|
struct drm_gem_object *obj = attach->dmabuf->priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
|
|
xe_bo_unpin_external(bo);
|
|
}
|
|
|
|
static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
|
|
enum dma_data_direction dir)
|
|
{
|
|
struct dma_buf *dma_buf = attach->dmabuf;
|
|
struct drm_gem_object *obj = dma_buf->priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
|
|
struct sg_table *sgt;
|
|
int r = 0;
|
|
|
|
if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
if (!xe_bo_is_pinned(bo)) {
|
|
if (!attach->peer2peer)
|
|
r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
|
|
else
|
|
r = xe_bo_validate(bo, NULL, false, exec);
|
|
if (r)
|
|
return ERR_PTR(r);
|
|
}
|
|
|
|
switch (bo->ttm.resource->mem_type) {
|
|
case XE_PL_TT:
|
|
sgt = drm_prime_pages_to_sg(obj->dev,
|
|
bo->ttm.ttm->pages,
|
|
obj->size >> PAGE_SHIFT);
|
|
if (IS_ERR(sgt))
|
|
return sgt;
|
|
|
|
if (dma_map_sgtable(attach->dev, sgt, dir,
|
|
DMA_ATTR_SKIP_CPU_SYNC))
|
|
goto error_free;
|
|
break;
|
|
|
|
case XE_PL_VRAM0:
|
|
case XE_PL_VRAM1:
|
|
r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
|
|
bo->ttm.resource, 0,
|
|
bo->ttm.base.size, attach->dev,
|
|
dir, &sgt);
|
|
if (r)
|
|
return ERR_PTR(r);
|
|
break;
|
|
default:
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
return sgt;
|
|
|
|
error_free:
|
|
sg_free_table(sgt);
|
|
kfree(sgt);
|
|
return ERR_PTR(-EBUSY);
|
|
}
|
|
|
|
static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
|
|
struct sg_table *sgt,
|
|
enum dma_data_direction dir)
|
|
{
|
|
if (sg_page(sgt->sgl)) {
|
|
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
|
|
sg_free_table(sgt);
|
|
kfree(sgt);
|
|
} else {
|
|
xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
|
|
}
|
|
}
|
|
|
|
static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
|
enum dma_data_direction direction)
|
|
{
|
|
struct drm_gem_object *obj = dma_buf->priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
bool reads = (direction == DMA_BIDIRECTIONAL ||
|
|
direction == DMA_FROM_DEVICE);
|
|
struct xe_validation_ctx ctx;
|
|
struct drm_exec exec;
|
|
int ret = 0;
|
|
|
|
if (!reads)
|
|
return 0;
|
|
|
|
/* Can we do interruptible lock here? */
|
|
xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) {
|
|
ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
|
|
drm_exec_retry_on_contention(&exec);
|
|
if (ret)
|
|
break;
|
|
|
|
ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec);
|
|
drm_exec_retry_on_contention(&exec);
|
|
xe_validation_retry_on_oom(&ctx, &ret);
|
|
}
|
|
|
|
/* If we failed, cpu-access takes place in current placement. */
|
|
return 0;
|
|
}
|
|
|
|
static void xe_dma_buf_release(struct dma_buf *dmabuf)
|
|
{
|
|
struct drm_gem_object *obj = dmabuf->priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
|
|
xe_bo_lock(bo, false);
|
|
xe_bo_willneed_put_locked(bo);
|
|
xe_bo_unlock(bo);
|
|
|
|
drm_gem_dmabuf_release(dmabuf);
|
|
}
|
|
|
|
static const struct dma_buf_ops xe_dmabuf_ops = {
|
|
.attach = xe_dma_buf_attach,
|
|
.detach = xe_dma_buf_detach,
|
|
.pin = xe_dma_buf_pin,
|
|
.unpin = xe_dma_buf_unpin,
|
|
.map_dma_buf = xe_dma_buf_map,
|
|
.unmap_dma_buf = xe_dma_buf_unmap,
|
|
.release = xe_dma_buf_release,
|
|
.begin_cpu_access = xe_dma_buf_begin_cpu_access,
|
|
.mmap = drm_gem_dmabuf_mmap,
|
|
.vmap = drm_gem_dmabuf_vmap,
|
|
.vunmap = drm_gem_dmabuf_vunmap,
|
|
};
|
|
|
|
struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
|
|
{
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
struct dma_buf *buf;
|
|
struct ttm_operation_ctx ctx = {
|
|
.interruptible = true,
|
|
.no_wait_gpu = true,
|
|
/* We opt to avoid OOM on system pages allocations */
|
|
.gfp_retry_mayfail = true,
|
|
.allow_res_evict = false,
|
|
};
|
|
int ret;
|
|
|
|
if (bo->vm)
|
|
return ERR_PTR(-EPERM);
|
|
|
|
/*
|
|
* Reject exporting purgeable BOs. DONTNEED BOs can be purged
|
|
* at any time, making the exported dma-buf unusable. Purged BOs
|
|
* have no backing store and are permanently invalid.
|
|
*/
|
|
ret = xe_bo_lock(bo, true);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
|
|
if (xe_bo_madv_is_dontneed(bo)) {
|
|
ret = -EBUSY;
|
|
goto out_unlock;
|
|
}
|
|
|
|
if (xe_bo_is_purged(bo)) {
|
|
ret = -EINVAL;
|
|
goto out_unlock;
|
|
}
|
|
|
|
xe_bo_willneed_get_locked(bo);
|
|
xe_bo_unlock(bo);
|
|
|
|
ret = ttm_bo_setup_export(&bo->ttm, &ctx);
|
|
if (ret)
|
|
goto out_put;
|
|
|
|
buf = drm_gem_prime_export(obj, flags);
|
|
if (IS_ERR(buf)) {
|
|
ret = PTR_ERR(buf);
|
|
goto out_put;
|
|
}
|
|
|
|
buf->ops = &xe_dmabuf_ops;
|
|
return buf;
|
|
|
|
out_put:
|
|
xe_bo_lock(bo, false);
|
|
xe_bo_willneed_put_locked(bo);
|
|
out_unlock:
|
|
xe_bo_unlock(bo);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
static struct drm_gem_object *
|
|
xe_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
|
|
{
|
|
struct dma_resv *resv = dma_buf->resv;
|
|
struct xe_device *xe = to_xe_device(dev);
|
|
struct xe_validation_ctx ctx;
|
|
struct drm_gem_object *dummy_obj;
|
|
struct drm_exec exec;
|
|
struct xe_bo *bo;
|
|
int ret = 0;
|
|
|
|
dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
|
|
if (!dummy_obj)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
dummy_obj->resv = resv;
|
|
xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) {
|
|
ret = drm_exec_lock_obj(&exec, dummy_obj);
|
|
drm_exec_retry_on_contention(&exec);
|
|
if (ret)
|
|
break;
|
|
|
|
bo = xe_bo_init_locked(xe, NULL, NULL, resv, NULL, dma_buf->size,
|
|
0, /* Will require 1way or 2way for vm_bind */
|
|
ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec);
|
|
drm_exec_retry_on_contention(&exec);
|
|
if (IS_ERR(bo)) {
|
|
ret = PTR_ERR(bo);
|
|
xe_validation_retry_on_oom(&ctx, &ret);
|
|
break;
|
|
}
|
|
}
|
|
drm_gem_object_put(dummy_obj);
|
|
|
|
return ret ? ERR_PTR(ret) : &bo->ttm.base;
|
|
}
|
|
|
|
static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
|
{
|
|
struct drm_gem_object *obj = attach->importer_priv;
|
|
struct xe_bo *bo = gem_to_xe_bo(obj);
|
|
struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
|
|
|
|
XE_WARN_ON(xe_bo_evict(bo, exec));
|
|
}
|
|
|
|
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
|
|
.allow_peer2peer = true,
|
|
.invalidate_mappings = xe_dma_buf_move_notify
|
|
};
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
|
|
|
|
struct dma_buf_test_params {
|
|
struct xe_test_priv base;
|
|
const struct dma_buf_attach_ops *attach_ops;
|
|
bool force_different_devices;
|
|
u32 mem_mask;
|
|
};
|
|
|
|
#define to_dma_buf_test_params(_priv) \
|
|
container_of(_priv, struct dma_buf_test_params, base)
|
|
#endif
|
|
|
|
struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
|
|
struct dma_buf *dma_buf)
|
|
{
|
|
XE_TEST_DECLARE(struct dma_buf_test_params *test =
|
|
to_dma_buf_test_params
|
|
(xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
|
|
const struct dma_buf_attach_ops *attach_ops;
|
|
struct dma_buf_attachment *attach;
|
|
struct drm_gem_object *obj;
|
|
|
|
if (dma_buf->ops == &xe_dmabuf_ops) {
|
|
obj = dma_buf->priv;
|
|
if (obj->dev == dev &&
|
|
!XE_TEST_ONLY(test && test->force_different_devices)) {
|
|
/*
|
|
* Importing dmabuf exported from out own gem increases
|
|
* refcount on gem itself instead of f_count of dmabuf.
|
|
*/
|
|
drm_gem_object_get(obj);
|
|
return obj;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* This needs to happen before the attach, since it will create a new
|
|
* attachment for this, and add it to the list of attachments, at which
|
|
* point it is globally visible, and at any point the export side can
|
|
* call into on invalidate_mappings callback, which require a working
|
|
* object.
|
|
*/
|
|
obj = xe_dma_buf_create_obj(dev, dma_buf);
|
|
if (IS_ERR(obj))
|
|
return obj;
|
|
|
|
attach_ops = &xe_dma_buf_attach_ops;
|
|
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
|
|
if (test)
|
|
attach_ops = test->attach_ops;
|
|
#endif
|
|
|
|
attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, obj);
|
|
if (IS_ERR(attach)) {
|
|
xe_bo_put(gem_to_xe_bo(obj));
|
|
return ERR_CAST(attach);
|
|
}
|
|
|
|
get_dma_buf(dma_buf);
|
|
obj->import_attach = attach;
|
|
return obj;
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
|
|
#include "tests/xe_dma_buf.c"
|
|
#endif
|