mm: shmem: factor out the within_size logic into a new helper

Factor out the within_size logic into a new helper to remove duplicate
code.

Link: https://lkml.kernel.org/r/527dea9d7e32fe6b94c7fe00df2c126203017911.1738918357.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Baolin Wang
2025-02-07 17:44:21 +08:00
committed by Andrew Morton
parent cd81c424b5
commit 086e66b690

View File

@@ -590,6 +590,28 @@ shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t w
return order > 0 ? BIT(order + 1) - 1 : 0;
}
static unsigned int shmem_get_orders_within_size(struct inode *inode,
unsigned long within_size_orders, pgoff_t index,
loff_t write_end)
{
pgoff_t aligned_index;
unsigned long order;
loff_t i_size;
order = highest_order(within_size_orders);
while (within_size_orders) {
aligned_index = round_up(index + 1, 1 << order);
i_size = max(write_end, i_size_read(inode));
i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= aligned_index)
return within_size_orders;
order = next_order(&within_size_orders, order);
}
return 0;
}
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
struct vm_area_struct *vma,
@@ -598,9 +620,6 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
0 : BIT(HPAGE_PMD_ORDER);
unsigned long within_size_orders;
unsigned int order;
pgoff_t aligned_index;
loff_t i_size;
if (!S_ISREG(inode->i_mode))
return 0;
@@ -634,16 +653,11 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
index, write_end);
order = highest_order(within_size_orders);
while (within_size_orders) {
aligned_index = round_up(index + 1, 1 << order);
i_size = max(write_end, i_size_read(inode));
i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= aligned_index)
return within_size_orders;
within_size_orders = shmem_get_orders_within_size(inode, within_size_orders,
index, write_end);
if (within_size_orders > 0)
return within_size_orders;
order = next_order(&within_size_orders, order);
}
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
@@ -1747,10 +1761,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
pgoff_t aligned_index;
unsigned int global_orders;
loff_t i_size;
int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
@@ -1776,17 +1787,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
return READ_ONCE(huge_shmem_orders_inherit);
/* Allow mTHP that will be fully within i_size. */
order = highest_order(within_size_orders);
while (within_size_orders) {
aligned_index = round_up(index + 1, 1 << order);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= aligned_index) {
mask |= within_size_orders;
break;
}
order = next_order(&within_size_orders, order);
}
mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);