mm/damon/vaddr: add vaddr versions of migrate_{hot,cold}

migrate_{hot,cold} are paddr schemes that are used to migrate hot/cold
data to a specified node.  However, these schemes are only available when
doing physical address monitoring.  This patch adds an implementation for
them virtual address monitoring as well.

Link: https://lkml.kernel.org/r/20250709005952.17776-10-bijan311@gmail.com
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@micron.com>
Signed-off-by: Bijan Tabatabai <bijantabatab@micron.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Bijan Tabatabai
2025-07-08 19:59:39 -05:00
committed by Andrew Morton
parent 13dde31db7
commit 256b0c7faa

View File

@@ -15,6 +15,7 @@
#include <linux/pagewalk.h>
#include <linux/sched/mm.h>
#include "../internal.h"
#include "ops-common.h"
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
@@ -610,6 +611,68 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct list_head *migration_list = walk->private;
struct folio *folio;
spinlock_t *ptl;
pmd_t pmde;
ptl = pmd_lock(walk->mm, pmd);
pmde = pmdp_get(pmd);
if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
goto unlock;
/* Tell page walk code to not split the PMD */
walk->action = ACTION_CONTINUE;
folio = damon_get_folio(pmd_pfn(pmde));
if (!folio)
goto unlock;
if (!folio_isolate_lru(folio))
goto put_folio;
list_add(&folio->lru, migration_list);
put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
return 0;
}
#else
#define damos_va_migrate_pmd_entry NULL
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
struct list_head *migration_list = walk->private;
struct folio *folio;
pte_t ptent;
ptent = ptep_get(pte);
if (pte_none(ptent) || !pte_present(ptent))
return 0;
folio = damon_get_folio(pte_pfn(ptent));
if (!folio)
return 0;
if (!folio_isolate_lru(folio))
goto out;
list_add(&folio->lru, migration_list);
out:
folio_put(folio);
return 0;
}
/*
* Functions for the target validity check and cleanup
*/
@@ -653,6 +716,34 @@ static unsigned long damos_madvise(struct damon_target *target,
}
#endif /* CONFIG_ADVISE_SYSCALLS */
static unsigned long damos_va_migrate(struct damon_target *target,
struct damon_region *r, struct damos *s,
unsigned long *sz_filter_passed)
{
LIST_HEAD(folio_list);
struct mm_struct *mm;
unsigned long applied = 0;
struct mm_walk_ops walk_ops = {
.pmd_entry = damos_va_migrate_pmd_entry,
.pte_entry = damos_va_migrate_pte_entry,
.walk_lock = PGWALK_RDLOCK,
};
mm = damon_get_mm(target);
if (!mm)
return 0;
mmap_read_lock(mm);
walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &folio_list);
mmap_read_unlock(mm);
mmput(mm);
applied = damon_migrate_pages(&folio_list, s->target_nid);
cond_resched();
return applied * PAGE_SIZE;
}
static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed)
@@ -675,6 +766,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
case DAMOS_NOHUGEPAGE:
madv_action = MADV_NOHUGEPAGE;
break;
case DAMOS_MIGRATE_HOT:
case DAMOS_MIGRATE_COLD:
return damos_va_migrate(t, r, scheme, sz_filter_passed);
case DAMOS_STAT:
return 0;
default:
@@ -695,6 +789,10 @@ static int damon_va_scheme_score(struct damon_ctx *context,
switch (scheme->action) {
case DAMOS_PAGEOUT:
return damon_cold_score(context, r, scheme);
case DAMOS_MIGRATE_HOT:
return damon_hot_score(context, r, scheme);
case DAMOS_MIGRATE_COLD:
return damon_cold_score(context, r, scheme);
default:
break;
}