drm/xe/vm: Prevent binding of purged buffer objects

Add purge checking to vma_lock_and_validate() to block new mapping
operations on purged BOs while allowing cleanup operations to proceed.

Purged BOs have their backing pages freed by the kernel. New
mapping operations (MAP, PREFETCH, REMAP) must be rejected with
-EINVAL to prevent GPU access to invalid memory. Cleanup
operations (UNMAP) must be allowed so applications can release
resources after detecting purge via the retained field.

REMAP operations require mixed handling - reject new prev/next
VMAs if the BO is purged, but allow the unmap portion to proceed
for cleanup.

The check_purged flag in struct xe_vma_lock_and_validate_flags
distinguishes between these cases: true for new mappings (must reject),
false for cleanup (allow).

Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260326130843.3545241-6-arvind.yadav@intel.com
This commit is contained in:
Arvind Yadav
2026-03-26 18:38:31 +05:30
committed by Matthew Brost
parent 9a16fdf5dc
commit 4f44961eab

View File

@@ -3006,8 +3006,22 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
}
}
/**
* struct xe_vma_lock_and_validate_flags - Flags for vma_lock_and_validate()
* @res_evict: Allow evicting resources during validation
* @validate: Perform BO validation
* @request_decompress: Request BO decompression
* @check_purged: Reject operation if BO is purged
*/
struct xe_vma_lock_and_validate_flags {
u32 res_evict : 1;
u32 validate : 1;
u32 request_decompress : 1;
u32 check_purged : 1;
};
static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool res_evict, bool validate, bool request_decompress)
struct xe_vma_lock_and_validate_flags flags)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
@@ -3016,15 +3030,24 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
/* Reject new mappings to DONTNEED/purged BOs; allow cleanup operations */
if (!err && flags.check_purged) {
if (xe_bo_madv_is_dontneed(bo))
err = -EBUSY; /* BO marked purgeable */
else if (xe_bo_is_purged(bo))
err = -EINVAL; /* BO already purged */
}
if (!err && flags.validate)
err = xe_bo_validate(bo, vm,
xe_vm_allow_vm_eviction(vm) &&
res_evict, exec);
flags.res_evict, exec);
if (err)
return err;
if (request_decompress)
if (flags.request_decompress)
err = xe_bo_decompress(bo);
}
@@ -3118,10 +3141,14 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if (!op->map.invalidate_on_bind)
err = vma_lock_and_validate(exec, op->map.vma,
res_evict,
!xe_vm_in_fault_mode(vm) ||
op->map.immediate,
op->map.request_decompress);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = !xe_vm_in_fault_mode(vm) ||
op->map.immediate,
.request_decompress =
op->map.request_decompress,
.check_purged = true,
});
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -3130,13 +3157,28 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.remap.unmap->va),
res_evict, false, false);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = false,
.request_decompress = false,
.check_purged = false,
});
if (!err && op->remap.prev)
err = vma_lock_and_validate(exec, op->remap.prev,
res_evict, true, false);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = true,
.request_decompress = false,
.check_purged = true,
});
if (!err && op->remap.next)
err = vma_lock_and_validate(exec, op->remap.next,
res_evict, true, false);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = true,
.request_decompress = false,
.check_purged = true,
});
break;
case DRM_GPUVA_OP_UNMAP:
err = check_ufence(gpuva_to_vma(op->base.unmap.va));
@@ -3145,7 +3187,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.unmap.va),
res_evict, false, false);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = false,
.request_decompress = false,
.check_purged = false,
});
break;
case DRM_GPUVA_OP_PREFETCH:
{
@@ -3158,9 +3205,19 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
region <= ARRAY_SIZE(region_to_mem_type));
}
/*
* Prefetch attempts to migrate BO's backing store without
* repopulating it first. Purged BOs have no backing store
* to migrate, so reject the operation.
*/
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
res_evict, false, false);
(struct xe_vma_lock_and_validate_flags) {
.res_evict = res_evict,
.validate = false,
.request_decompress = false,
.check_purged = true,
});
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
region_to_mem_type[region],