drm/xe: Convert xe_dma_buf.c for exhaustive eviction

Convert dma-buf migration to XE_PL_TT and dma-buf import to
support exhaustive eviction, using xe_validation_guard().
It seems unlikely that the import would result in an -ENOMEM,
but convert import anyway for completeness.

The dma-buf map_attachment() functionality unfortunately doesn't
support passing a drm_exec, which means that foreign devices
validating a dma-buf that we exported will not, unless they are
xeKMD devices, participate in the exhaustive eviction scheme.

v2:
- Avoid gotos from within xe_validation_guard(). (Matt Brost)
- Adapt to signature change of xe_validation_guard(). (Matt Brost)
- Remove an unneeded (void)ret. (Matt Brost)
- Fix up an error path.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250908101246.65025-9-thomas.hellstrom@linux.intel.com
This commit is contained in:
Thomas Hellström
2025-09-08 12:12:41 +02:00
parent 7bcb6e38c1
commit eb289a5f6c

View File

@@ -163,16 +163,26 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
struct xe_bo *bo = gem_to_xe_bo(obj);
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
struct xe_validation_ctx ctx;
struct drm_exec exec;
int ret = 0;
if (!reads)
return 0;
/* Can we do interruptible lock here? */
xe_bo_lock(bo, false);
(void)xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
xe_bo_unlock(bo);
xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) {
ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
drm_exec_retry_on_contention(&exec);
if (ret)
break;
ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec);
drm_exec_retry_on_contention(&exec);
xe_validation_retry_on_oom(&ctx, &ret);
}
/* If we failed, cpu-access takes place in current placement. */
return 0;
}
@@ -211,25 +221,36 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
{
struct dma_resv *resv = dma_buf->resv;
struct xe_device *xe = to_xe_device(dev);
struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
struct xe_validation_ctx ctx;
struct drm_gem_object *dummy_obj;
struct drm_exec exec;
struct xe_bo *bo;
int ret;
int ret = 0;
dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, exec);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto error;
dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!dummy_obj)
return ERR_PTR(-ENOMEM);
dummy_obj->resv = resv;
xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) {
ret = drm_exec_lock_obj(&exec, dummy_obj);
drm_exec_retry_on_contention(&exec);
if (ret)
break;
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec);
drm_exec_retry_on_contention(&exec);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
xe_validation_retry_on_oom(&ctx, &ret);
break;
}
}
dma_resv_unlock(resv);
drm_gem_object_put(dummy_obj);
return &bo->ttm.base;
error:
dma_resv_unlock(resv);
return ERR_PTR(ret);
return ret ? ERR_PTR(ret) : &bo->ttm.base;
}
static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)