diff options
| author | Arvind Yadav <arvind.yadav@intel.com> | 2026-03-26 18:38:36 +0530 |
|---|---|---|
| committer | Matthew Brost <matthew.brost@intel.com> | 2026-03-26 19:59:53 -0700 |
| commit | 536a2ead3a8f2048643da3e8340a4d73fdf71903 (patch) | |
| tree | e4c229786a2037fa6679027567ee074954e8f65a | |
| parent | da857c9a48874dff183a10348ab1a49ffba1ec1e (diff) | |
drm/xe/bo: Add purgeable shrinker state helpers
Encapsulate TTM purgeable flag updates and shrinker page accounting
into helper functions to prevent desynchronization between the TTM
tt->purgeable flag and the shrinker's page bucket counters.
Without these helpers, direct manipulation of xe_ttm_tt->purgeable
risks forgetting to update the corresponding shrinker counters,
leading to incorrect memory pressure calculations.
Update purgeable BO state to PURGED after successful shrinker purge
for DONTNEED BOs.
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Arvind Yadav <arvind.yadav@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20260326130843.3545241-11-arvind.yadav@intel.com
| -rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 43 |
1 files changed, 42 insertions, 1 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 67ed64edd9c6..a7c2dc7f224c 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -839,13 +839,50 @@ static int xe_bo_move_notify(struct xe_bo *bo, } /** + * xe_bo_set_purgeable_shrinker() - Update shrinker accounting for purgeable state + * @bo: Buffer object + * @new_state: New purgeable state being set + * + * Transfers pages between shrinkable and purgeable buckets when the BO + * purgeable state changes. Called automatically from xe_bo_set_purgeable_state(). + */ +static void xe_bo_set_purgeable_shrinker(struct xe_bo *bo, + enum xe_madv_purgeable_state new_state) +{ + struct ttm_buffer_object *ttm_bo = &bo->ttm; + struct ttm_tt *tt = ttm_bo->ttm; + struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + struct xe_ttm_tt *xe_tt; + long tt_pages; + + xe_bo_assert_held(bo); + + if (!tt || !ttm_tt_is_populated(tt)) + return; + + xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + tt_pages = tt->num_pages; + + if (!xe_tt->purgeable && new_state == XE_MADV_PURGEABLE_DONTNEED) { + xe_tt->purgeable = true; + /* Transfer pages from shrinkable to purgeable count */ + xe_shrinker_mod_pages(xe->mem.shrinker, -tt_pages, tt_pages); + } else if (xe_tt->purgeable && new_state == XE_MADV_PURGEABLE_WILLNEED) { + xe_tt->purgeable = false; + /* Transfer pages from purgeable to shrinkable count */ + xe_shrinker_mod_pages(xe->mem.shrinker, tt_pages, -tt_pages); + } +} + +/** * xe_bo_set_purgeable_state() - Set BO purgeable state with validation * @bo: Buffer object * @new_state: New purgeable state * * Sets the purgeable state with lockdep assertions and validates state * transitions. Once a BO is PURGED, it cannot transition to any other state. - * Invalid transitions are caught with xe_assert(). + * Invalid transitions are caught with xe_assert(). Shrinker page accounting + * is updated automatically. */ void xe_bo_set_purgeable_state(struct xe_bo *bo, enum xe_madv_purgeable_state new_state) @@ -864,6 +901,7 @@ void xe_bo_set_purgeable_state(struct xe_bo *bo, new_state != XE_MADV_PURGEABLE_PURGED)); bo->madv_purgeable = new_state; + xe_bo_set_purgeable_shrinker(bo, new_state); } /** @@ -1246,6 +1284,9 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, lret = xe_bo_move_notify(xe_bo, ctx); if (!lret) lret = xe_bo_shrink_purge(ctx, bo, scanned); + if (lret > 0 && xe_bo_madv_is_dontneed(xe_bo)) + xe_bo_set_purgeable_state(xe_bo, + XE_MADV_PURGEABLE_PURGED); goto out_unref; } |
