mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 21:21:49 -04:00
drm/xe: Allow CPU address mirror VMA unbind with gpu bindings for madvise
In the case of the MADVISE ioctl, if the start or end addresses fall within a VMA and existing SVM ranges are present, remove the existing SVM mappings. Then, continue with ops_parse to create new VMAs by REMAP unmapping of old one. v2 (Matthew Brost) - Use vops flag to call unmapping of ranges in vm_bind_ioctl_ops_parse - Rename the function v3 - Fix doc v4 - check if range is already in garbage collector (Matthew Brost) Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250821173104.3030148-7-himal.prasad.ghimiray@intel.com Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
This commit is contained in:
@@ -932,6 +932,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
|
||||
return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
|
||||
* @vm: The VM
|
||||
* @start: start addr
|
||||
* @end: end addr
|
||||
*
|
||||
* This function UNMAPS svm ranges if start or end address are inside them.
|
||||
*/
|
||||
void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
|
||||
{
|
||||
struct drm_gpusvm_notifier *notifier, *next;
|
||||
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
|
||||
struct drm_gpusvm_range *range, *__next;
|
||||
|
||||
drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
|
||||
if (start > drm_gpusvm_range_start(range) ||
|
||||
end < drm_gpusvm_range_end(range)) {
|
||||
if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
|
||||
drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
|
||||
drm_gpusvm_range_get(range);
|
||||
__xe_svm_garbage_collector(vm, to_xe_range(range));
|
||||
if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
|
||||
spin_lock(&vm->svm.garbage_collector.lock);
|
||||
list_del(&to_xe_range(range)->garbage_collector_link);
|
||||
spin_unlock(&vm->svm.garbage_collector.lock);
|
||||
}
|
||||
drm_gpusvm_range_put(range);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_svm_bo_evict() - SVM evict BO to system memory
|
||||
* @bo: BO to evict
|
||||
|
||||
@@ -90,6 +90,8 @@ bool xe_svm_range_validate(struct xe_vm *vm,
|
||||
|
||||
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
|
||||
|
||||
void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
|
||||
|
||||
/**
|
||||
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
|
||||
* @range: SVM range
|
||||
@@ -303,6 +305,11 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
|
||||
return ULONG_MAX;
|
||||
}
|
||||
|
||||
static inline
|
||||
void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
|
||||
{
|
||||
}
|
||||
|
||||
#define xe_svm_assert_in_notifier(...) do {} while (0)
|
||||
#define xe_svm_range_has_dma_mapping(...) false
|
||||
|
||||
|
||||
@@ -2694,8 +2694,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
||||
end = op->base.remap.next->va.addr;
|
||||
|
||||
if (xe_vma_is_cpu_addr_mirror(old) &&
|
||||
xe_svm_has_mapping(vm, start, end))
|
||||
return -EBUSY;
|
||||
xe_svm_has_mapping(vm, start, end)) {
|
||||
if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
|
||||
xe_svm_unmap_address_range(vm, start, end);
|
||||
else
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
op->remap.start = xe_vma_start(old);
|
||||
op->remap.range = xe_vma_size(old);
|
||||
|
||||
Reference in New Issue
Block a user