drm/xe/bo: Add purgeable shrinker state helpers

Encapsulate TTM purgeable flag updates and shrinker page accounting
into helper functions to prevent desynchronization between the TTM
tt->purgeable flag and the shrinker's page bucket counters.

Without these helpers, direct manipulation of xe_ttm_tt->purgeable
risks forgetting to update the corresponding shrinker counters,
leading to incorrect memory pressure calculations.

Update purgeable BO state to PURGED after successful shrinker purge
for DONTNEED BOs.

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260326130843.3545241-11-arvind.yadav@intel.com
This commit is contained in:
Arvind Yadav
2026-03-26 18:38:36 +05:30
committed by Matthew Brost
parent da857c9a48
commit 536a2ead3a

View File

@@ -838,6 +838,42 @@ static int xe_bo_move_notify(struct xe_bo *bo,
return 0;
}
/**
* xe_bo_set_purgeable_shrinker() - Update shrinker accounting for purgeable state
* @bo: Buffer object
* @new_state: New purgeable state being set
*
* Transfers pages between shrinkable and purgeable buckets when the BO
* purgeable state changes. Called automatically from xe_bo_set_purgeable_state().
*/
static void xe_bo_set_purgeable_shrinker(struct xe_bo *bo,
enum xe_madv_purgeable_state new_state)
{
struct ttm_buffer_object *ttm_bo = &bo->ttm;
struct ttm_tt *tt = ttm_bo->ttm;
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
struct xe_ttm_tt *xe_tt;
long tt_pages;
xe_bo_assert_held(bo);
if (!tt || !ttm_tt_is_populated(tt))
return;
xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
tt_pages = tt->num_pages;
if (!xe_tt->purgeable && new_state == XE_MADV_PURGEABLE_DONTNEED) {
xe_tt->purgeable = true;
/* Transfer pages from shrinkable to purgeable count */
xe_shrinker_mod_pages(xe->mem.shrinker, -tt_pages, tt_pages);
} else if (xe_tt->purgeable && new_state == XE_MADV_PURGEABLE_WILLNEED) {
xe_tt->purgeable = false;
/* Transfer pages from purgeable to shrinkable count */
xe_shrinker_mod_pages(xe->mem.shrinker, tt_pages, -tt_pages);
}
}
/**
* xe_bo_set_purgeable_state() - Set BO purgeable state with validation
* @bo: Buffer object
@@ -845,7 +881,8 @@ static int xe_bo_move_notify(struct xe_bo *bo,
*
* Sets the purgeable state with lockdep assertions and validates state
* transitions. Once a BO is PURGED, it cannot transition to any other state.
* Invalid transitions are caught with xe_assert().
* Invalid transitions are caught with xe_assert(). Shrinker page accounting
* is updated automatically.
*/
void xe_bo_set_purgeable_state(struct xe_bo *bo,
enum xe_madv_purgeable_state new_state)
@@ -864,6 +901,7 @@ void xe_bo_set_purgeable_state(struct xe_bo *bo,
new_state != XE_MADV_PURGEABLE_PURGED));
bo->madv_purgeable = new_state;
xe_bo_set_purgeable_shrinker(bo, new_state);
}
/**
@@ -1246,6 +1284,9 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
lret = xe_bo_move_notify(xe_bo, ctx);
if (!lret)
lret = xe_bo_shrink_purge(ctx, bo, scanned);
if (lret > 0 && xe_bo_madv_is_dontneed(xe_bo))
xe_bo_set_purgeable_state(xe_bo,
XE_MADV_PURGEABLE_PURGED);
goto out_unref;
}