mm/zsmalloc: convert create_page_chain() and its users to use zpdesc

Introduce a few helper functions for conversion to convert
create_page_chain() to use zpdesc, then use zpdesc in replace_sub_page().

Link: https://lkml.kernel.org/r/20241216150450.1228021-7-42.hyeyoo@gmail.com
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alex Shi
2024-12-17 00:04:37 +09:00
committed by Andrew Morton
parent 8f1868ad0c
commit 7d2e1a6950
2 changed files with 76 additions and 39 deletions

View File

@@ -148,4 +148,10 @@ static inline struct zpdesc *pfn_zpdesc(unsigned long pfn)
{
return page_zpdesc(pfn_to_page(pfn));
}
static inline void __zpdesc_set_movable(struct zpdesc *zpdesc,
const struct movable_operations *mops)
{
__SetPageMovable(zpdesc_page(zpdesc), mops);
}
#endif

View File

@@ -228,6 +228,35 @@ struct zs_pool {
atomic_t compaction_in_progress;
};
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
{
SetPagePrivate(zpdesc_page(zpdesc));
}
static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc)
{
inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
}
static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc)
{
dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES);
}
static inline struct zpdesc *alloc_zpdesc(gfp_t gfp)
{
struct page *page = alloc_page(gfp);
return page_zpdesc(page);
}
static inline void free_zpdesc(struct zpdesc *zpdesc)
{
struct page *page = zpdesc_page(zpdesc);
__free_page(page);
}
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -937,35 +966,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
}
static void create_page_chain(struct size_class *class, struct zspage *zspage,
struct page *pages[])
struct zpdesc *zpdescs[])
{
int i;
struct page *page;
struct page *prev_page = NULL;
int nr_pages = class->pages_per_zspage;
struct zpdesc *zpdesc;
struct zpdesc *prev_zpdesc = NULL;
int nr_zpdescs = class->pages_per_zspage;
/*
* Allocate individual pages and link them together as:
* 1. all pages are linked together using page->index
* 2. each sub-page point to zspage using page->private
* 1. all pages are linked together using zpdesc->next
* 2. each sub-page point to zspage using zpdesc->zspage
*
* we set PG_private to identify the first page (i.e. no other sub-page
* we set PG_private to identify the first zpdesc (i.e. no other zpdesc
* has this flag set).
*/
for (i = 0; i < nr_pages; i++) {
page = pages[i];
set_page_private(page, (unsigned long)zspage);
page->index = 0;
for (i = 0; i < nr_zpdescs; i++) {
zpdesc = zpdescs[i];
zpdesc->zspage = zspage;
zpdesc->next = NULL;
if (i == 0) {
zspage->first_zpdesc = page_zpdesc(page);
SetPagePrivate(page);
zspage->first_zpdesc = zpdesc;
zpdesc_set_first(zpdesc);
if (unlikely(class->objs_per_zspage == 1 &&
class->pages_per_zspage == 1))
SetZsHugePage(zspage);
} else {
prev_page->index = (unsigned long)page;
prev_zpdesc->next = zpdesc;
}
prev_page = page;
prev_zpdesc = zpdesc;
}
}
@@ -977,7 +1006,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
gfp_t gfp)
{
int i;
struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE];
struct zspage *zspage = cache_alloc_zspage(pool, gfp);
if (!zspage)
@@ -987,25 +1016,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
migrate_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
struct page *page;
struct zpdesc *zpdesc;
page = alloc_page(gfp);
if (!page) {
zpdesc = alloc_zpdesc(gfp);
if (!zpdesc) {
while (--i >= 0) {
dec_zone_page_state(pages[i], NR_ZSPAGES);
__ClearPageZsmalloc(pages[i]);
__free_page(pages[i]);
zpdesc_dec_zone_page_state(zpdescs[i]);
__ClearPageZsmalloc(zpdesc_page(zpdescs[i]));
free_zpdesc(zpdescs[i]);
}
cache_free_zspage(pool, zspage);
return NULL;
}
__SetPageZsmalloc(page);
__SetPageZsmalloc(zpdesc_page(zpdesc));
inc_zone_page_state(page, NR_ZSPAGES);
pages[i] = page;
zpdesc_inc_zone_page_state(zpdesc);
zpdescs[i] = zpdesc;
}
create_page_chain(class, zspage, pages);
create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
zspage->pool = pool;
zspage->class = class->index;
@@ -1725,26 +1754,28 @@ static void migrate_write_unlock(struct zspage *zspage)
static const struct movable_operations zsmalloc_mops;
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
struct page *newpage, struct page *oldpage)
struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc)
{
struct page *page;
struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
struct zpdesc *zpdesc;
struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
unsigned int first_obj_offset;
int idx = 0;
page = get_first_page(zspage);
zpdesc = get_first_zpdesc(zspage);
do {
if (page == oldpage)
pages[idx] = newpage;
if (zpdesc == oldzpdesc)
zpdescs[idx] = newzpdesc;
else
pages[idx] = page;
zpdescs[idx] = zpdesc;
idx++;
} while ((page = get_next_page(page)) != NULL);
} while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL);
create_page_chain(class, zspage, pages);
set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
create_page_chain(class, zspage, zpdescs);
first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc));
set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset);
if (unlikely(ZsHugePage(zspage)))
newpage->index = oldpage->index;
__SetPageMovable(newpage, &zsmalloc_mops);
newzpdesc->handle = oldzpdesc->handle;
__zpdesc_set_movable(newzpdesc, &zsmalloc_mops);
}
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
@@ -1817,7 +1848,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
}
kunmap_local(s_addr);
replace_sub_page(class, zspage, newpage, page);
replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page));
/*
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.