fs/ntfs3: return folios from ntfs_lock_new_page()

ntfs_lock_new_page() currently returns a struct page * but it primarily
operates on folios via __filemap_get_folio(). Convert it to return a
struct folio * and use folio_alloc() + __folio_set_locked() for the
temporary page used to avoid data corruption during decompression.

When the cached folio is not uptodate, preserve the existing behavior by
using folio_file_page() and converting the returned page back to a
folio.

Update ni_readpage_cmpr() and ni_decompress_file() to handle the new
return type while keeping the existing struct page * array and the
unlock_page()/put_page() cleanup paths unchanged.

Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202602072013.jwrURE2e-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202602071921.nGIiI1J5-lkp@intel.com/
Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com>
[almaz.alexandrovich@paragon-software.com: removed ni_fiemap function,
added reported-by and closes tags to commit]
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
This commit is contained in:
Sun Jian
2026-02-07 22:45:52 +08:00
committed by Konstantin Komarov
parent 3a2141b2f1
commit e8619bcb08

View File

@@ -1852,27 +1852,31 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
return REPARSE_LINK;
}
static struct page *ntfs_lock_new_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
static struct folio *ntfs_lock_new_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
struct folio *folio = __filemap_get_folio(
mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
struct page *page;
struct folio *folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
if (IS_ERR(folio))
return ERR_CAST(folio);
return folio;
if (!folio_test_uptodate(folio))
return folio_file_page(folio, index);
if (!folio_test_uptodate(folio)) {
struct page *page = folio_file_page(folio, index);
if (IS_ERR(page))
return ERR_CAST(page);
return page_folio(page);
}
/* Use a temporary page to avoid data corruption */
folio_unlock(folio);
folio_put(folio);
page = alloc_page(gfp);
if (!page)
folio = folio_alloc(gfp, 0);
if (!folio)
return ERR_PTR(-ENOMEM);
__SetPageLocked(page);
return page;
__folio_set_locked(folio);
return folio;
}
/*
@@ -1894,6 +1898,7 @@ int ni_read_folio_cmpr(struct ntfs_inode *ni, struct folio *folio)
u32 i, idx, frame_size, pages_per_frame;
gfp_t gfp_mask;
struct page *pg;
struct folio *f;
if (vbo >= i_size_read(&ni->vfs_inode)) {
folio_zero_range(folio, 0, folio_size(folio));
@@ -1929,12 +1934,12 @@ int ni_read_folio_cmpr(struct ntfs_inode *ni, struct folio *folio)
if (i == idx)
continue;
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(pg)) {
err = PTR_ERR(pg);
f = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(f)) {
err = PTR_ERR(f);
goto out1;
}
pages[i] = pg;
pages[i] = &f->page;
}
ni_lock(ni);
@@ -2023,18 +2028,18 @@ int ni_decompress_file(struct ntfs_inode *ni)
}
for (i = 0; i < pages_per_frame; i++, index++) {
struct page *pg;
struct folio *f;
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(pg)) {
f = ntfs_lock_new_page(mapping, index, gfp_mask);
if (IS_ERR(f)) {
while (i--) {
unlock_page(pages[i]);
put_page(pages[i]);
}
err = PTR_ERR(pg);
err = PTR_ERR(f);
goto out;
}
pages[i] = pg;
pages[i] = &f->page;
}
err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);