drm/xe/madvise: Block imported and exported dma-bufs

Prevent marking imported or exported dma-bufs as purgeable.
External devices may be accessing these buffers without our
knowledge, making purging unsafe.

Check drm_gem_is_imported() for buffers created by other
drivers and obj->dma_buf for buffers exported to other
drivers. Silently skip these BOs during madvise processing.

This follows drm_gem_shmem's purgeable implementation and
prevents data corruption from purging actively-used shared
buffers.

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260326130843.3545241-8-arvind.yadav@intel.com
This commit is contained in:
Arvind Yadav
2026-03-26 18:38:33 +05:30
committed by Matthew Brost
parent 5bfb7e6a7f
commit 63cf199c0c

View File

@@ -185,6 +185,33 @@ static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
}
}
/**
* xe_bo_is_dmabuf_shared() - Check if BO is shared via dma-buf
* @bo: Buffer object
*
* Prevent marking imported or exported dma-bufs as purgeable.
* For imported BOs, Xe doesn't own the backing store and cannot
* safely reclaim pages (exporter or other devices may still be
* using them). For exported BOs, external devices may have active
* mappings we cannot track.
*
* Return: true if BO is imported or exported, false otherwise
*/
static bool xe_bo_is_dmabuf_shared(struct xe_bo *bo)
{
struct drm_gem_object *obj = &bo->ttm.base;
/* Imported: exporter owns backing store */
if (drm_gem_is_imported(obj))
return true;
/* Exported: external devices may be accessing */
if (obj->dma_buf)
return true;
return false;
}
/**
* enum xe_bo_vmas_purge_state - VMA purgeable state aggregation
*
@@ -234,6 +261,10 @@ static enum xe_bo_vmas_purge_state xe_bo_all_vmas_dontneed(struct xe_bo *bo)
xe_bo_assert_held(bo);
/* Shared dma-bufs cannot be purgeable */
if (xe_bo_is_dmabuf_shared(bo))
return XE_BO_VMAS_STATE_WILLNEED;
drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
struct xe_vma *vma = gpuva_to_vma(gpuva);
@@ -335,6 +366,12 @@ static void __maybe_unused madvise_purgeable(struct xe_device *xe,
/* BO must be locked before modifying madv state */
xe_bo_assert_held(bo);
/* Skip shared dma-bufs - no PTEs to zap */
if (xe_bo_is_dmabuf_shared(bo)) {
vmas[i]->skip_invalidation = true;
continue;
}
/*
* Once purged, always purged. Cannot transition back to WILLNEED.
* This matches i915 semantics where purged BOs are permanently invalid.