mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-03 18:12:25 -04:00
mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio()
Let's change shmem_alloc_folio() to take a order and use folio_alloc_mpol() helper, then directly use it for normal or large folio to cleanup code. Link: https://lkml.kernel.org/r/20240515070709.78529-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
1d9cb7852b
commit
6f775463d0
32
mm/shmem.c
32
mm/shmem.c
@@ -1603,32 +1603,18 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
|
||||
return result;
|
||||
}
|
||||
|
||||
static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
|
||||
static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
|
||||
struct shmem_inode_info *info, pgoff_t index)
|
||||
{
|
||||
struct mempolicy *mpol;
|
||||
pgoff_t ilx;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx);
|
||||
page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id());
|
||||
mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
|
||||
folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
|
||||
mpol_cond_put(mpol);
|
||||
|
||||
return page_rmappable_folio(page);
|
||||
}
|
||||
|
||||
static struct folio *shmem_alloc_folio(gfp_t gfp,
|
||||
struct shmem_inode_info *info, pgoff_t index)
|
||||
{
|
||||
struct mempolicy *mpol;
|
||||
pgoff_t ilx;
|
||||
struct page *page;
|
||||
|
||||
mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
|
||||
page = alloc_pages_mpol(gfp, 0, mpol, ilx, numa_node_id());
|
||||
mpol_cond_put(mpol);
|
||||
|
||||
return (struct folio *)page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
|
||||
@@ -1660,12 +1646,12 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp,
|
||||
index + HPAGE_PMD_NR - 1, XA_PRESENT))
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
folio = shmem_alloc_hugefolio(gfp, info, index);
|
||||
folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index);
|
||||
if (!folio)
|
||||
count_vm_event(THP_FILE_FALLBACK);
|
||||
} else {
|
||||
pages = 1;
|
||||
folio = shmem_alloc_folio(gfp, info, index);
|
||||
folio = shmem_alloc_folio(gfp, 0, info, index);
|
||||
}
|
||||
if (!folio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -1765,7 +1751,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
|
||||
*/
|
||||
gfp &= ~GFP_CONSTRAINT_MASK;
|
||||
VM_BUG_ON_FOLIO(folio_test_large(old), old);
|
||||
new = shmem_alloc_folio(gfp, info, index);
|
||||
new = shmem_alloc_folio(gfp, 0, info, index);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2633,7 +2619,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
|
||||
|
||||
if (!*foliop) {
|
||||
ret = -ENOMEM;
|
||||
folio = shmem_alloc_folio(gfp, info, pgoff);
|
||||
folio = shmem_alloc_folio(gfp, 0, info, pgoff);
|
||||
if (!folio)
|
||||
goto out_unacct_blocks;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user