drm/xe: add xe_migrate_resolve wrapper and is_vram_resolve support

Introduce an internal __xe_migrate_copy(..., is_vram_resolve) path and
expose a small wrapper xe_migrate_resolve() that calls it with
is_vram_resolve=true.

For resolve/decompression operations we must ensure the copy code uses
the compression PAT index when appropriate; this change centralizes that
behavior and allows callers to schedule a resolve (decompress) operation
via the migrate API.

v3: Fix kernel-doc warnings

v2: (Matt)
  - Simplify xe_migrate_resolve(), use single BO/resource;
    remove copy_only_ccs argument as it's always false.

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patch.msgid.link/20260304123758.3050386-7-nitin.r.gote@intel.com
This commit is contained in:
Nitin Gote
2026-03-04 18:08:01 +05:30
committed by Matthew Auld
parent 2270bd7124
commit be97fd0645
2 changed files with 67 additions and 27 deletions

View File

@@ -860,31 +860,13 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
return flush_flags;
}
/**
* xe_migrate_copy() - Copy content of TTM resources.
* @m: The migration context.
* @src_bo: The buffer object @src is currently bound to.
* @dst_bo: If copying between resources created for the same bo, set this to
* the same value as @src_bo. If copying between buffer objects, set it to
* the buffer object @dst is currently bound to.
* @src: The source TTM resource.
* @dst: The dst TTM resource.
* @copy_only_ccs: If true copy only CCS metadata
*
* Copies the contents of @src to @dst: On flat CCS devices,
* the CCS metadata is copied as well if needed, or if not present,
* the CCS metadata of @dst is cleared for security reasons.
*
* Return: Pointer to a dma_fence representing the last copy batch, or
* an error pointer on failure. If there is a failure, any copy operation
* started by the function call has been synced.
*/
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
struct ttm_resource *src,
struct ttm_resource *dst,
bool copy_only_ccs)
static struct dma_fence *__xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
struct ttm_resource *src,
struct ttm_resource *dst,
bool copy_only_ccs,
bool is_vram_resolve)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
@@ -905,8 +887,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/*
* For decompression operation, always use the compression PAT index.
* Otherwise, only use the compression PAT index for device memory
* when copying from VRAM to system memory.
*/
bool use_comp_pat = is_vram_resolve || (type_device &&
xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram);
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
@@ -1065,6 +1054,53 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
return fence;
}
/**
* xe_migrate_copy() - Copy content of TTM resources.
* @m: The migration context.
* @src_bo: The buffer object @src is currently bound to.
* @dst_bo: If copying between resources created for the same bo, set this to
* the same value as @src_bo. If copying between buffer objects, set it to
* the buffer object @dst is currently bound to.
* @src: The source TTM resource.
* @dst: The dst TTM resource.
* @copy_only_ccs: If true copy only CCS metadata
*
* Copies the contents of @src to @dst: On flat CCS devices,
* the CCS metadata is copied as well if needed, or if not present,
* the CCS metadata of @dst is cleared for security reasons.
*
* Return: Pointer to a dma_fence representing the last copy batch, or
* an error pointer on failure. If there is a failure, any copy operation
* started by the function call has been synced.
*/
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
struct ttm_resource *src,
struct ttm_resource *dst,
bool copy_only_ccs)
{
return __xe_migrate_copy(m, src_bo, dst_bo, src, dst, copy_only_ccs, false);
}
/**
* xe_migrate_resolve() - Resolve and decompress a buffer object if required.
* @m: The migrate context
* @bo: The buffer object to resolve
* @res: The reservation object
*
* Wrapper around __xe_migrate_copy() with is_vram_resolve set to true
* to trigger decompression if needed.
*
* Return: A dma_fence that signals on completion, or an ERR_PTR on failure.
*/
struct dma_fence *xe_migrate_resolve(struct xe_migrate *m,
struct xe_bo *bo,
struct ttm_resource *res)
{
return __xe_migrate_copy(m, bo, bo, res, res, false, true);
}
/**
* xe_migrate_lrc() - Get the LRC from migrate context.
* @migrate: Migrate context.

View File

@@ -132,6 +132,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
struct dma_fence *xe_migrate_resolve(struct xe_migrate *m,
struct xe_bo *bo,
struct ttm_resource *res);
int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
struct xe_bo *src_bo,
enum xe_sriov_vf_ccs_rw_ctxs read_write);