mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-16 10:16:53 -05:00
drm/xe/svm: Make xe_svm_range_needs_migrate_to_vram() public
xe_svm_range_needs_migrate_to_vram() determines whether range needs migration to vram or not, modify it to accept region preference parameter too, so we can use it in prefetch too. v2 - add assert instead of warn (Matthew Brost) Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250513040228.470682-11-himal.prasad.ghimiray@intel.com Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
This commit is contained in:
@@ -733,22 +733,32 @@ static bool supports_4K_migration(struct xe_device *xe)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
|
||||
struct xe_vma *vma)
|
||||
/**
|
||||
* xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
|
||||
* @range: SVM range for which migration needs to be decided
|
||||
* @vma: vma which has range
|
||||
* @preferred_region_is_vram: preferred region for range is vram
|
||||
*
|
||||
* Return: True for range needing migration and migration is supported else false
|
||||
*/
|
||||
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
|
||||
bool preferred_region_is_vram)
|
||||
{
|
||||
struct xe_vm *vm = range_to_vm(&range->base);
|
||||
u64 range_size = xe_svm_range_size(range);
|
||||
|
||||
if (!range->base.flags.migrate_devmem)
|
||||
if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
|
||||
return false;
|
||||
|
||||
if (xe_svm_range_in_vram(range)) {
|
||||
drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
|
||||
xe_assert(vm->xe, IS_DGFX(vm->xe));
|
||||
|
||||
if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
|
||||
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
|
||||
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
|
||||
if (preferred_region_is_vram && range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
|
||||
drm_warn(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -817,7 +827,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
|
||||
range_debug(range, "PAGE FAULT");
|
||||
|
||||
if (--migrate_try_count >= 0 &&
|
||||
xe_svm_range_needs_migrate_to_vram(range, vma)) {
|
||||
xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
|
||||
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
|
||||
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
|
||||
if (err) {
|
||||
|
||||
@@ -80,6 +80,9 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
|
||||
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
|
||||
struct drm_gpusvm_ctx *ctx);
|
||||
|
||||
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
|
||||
bool preferred_region_is_vram);
|
||||
|
||||
/**
|
||||
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
|
||||
* @range: SVM range
|
||||
@@ -268,6 +271,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
|
||||
u32 region)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define xe_svm_assert_in_notifier(...) do {} while (0)
|
||||
#define xe_svm_range_has_dma_mapping(...) false
|
||||
|
||||
|
||||
Reference in New Issue
Block a user