mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
iov: remove copy_page_from_iter_atomic()
All callers now use copy_folio_from_iter_atomic(), so convert copy_page_from_iter_atomic(). While I'm in there, use kmap_local_folio() and pagefault_disable() instead of kmap_atomic(). That allows preemption and/or task migration to happen during the copy_from_user(). Also use the new folio_test_partial_kmap() predicate instead of open-coding it. Link: https://lkml.kernel.org/r/20250514170607.3000994-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Hugh Dickins <hughd@google.com> Cc: Konstantin Komarov <almaz.alexandrovich@paragon-software.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
80ae99c572
commit
d973692944
@@ -176,8 +176,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
void iov_iter_revert(struct iov_iter *i, size_t bytes);
|
||||
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
|
||||
@@ -187,6 +185,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i);
|
||||
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i);
|
||||
size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
|
||||
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
|
||||
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
|
||||
@@ -204,12 +204,6 @@ static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset,
|
||||
return copy_page_from_iter(&folio->page, offset, bytes, i);
|
||||
}
|
||||
|
||||
static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
|
||||
size_t offset, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
|
||||
}
|
||||
|
||||
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
|
||||
|
||||
@@ -457,38 +457,35 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_zero);
|
||||
|
||||
size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
|
||||
size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset,
|
||||
size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
size_t n, copied = 0;
|
||||
bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) ||
|
||||
PageHighMem(page);
|
||||
|
||||
if (!page_copy_sane(page, offset, bytes))
|
||||
if (!page_copy_sane(&folio->page, offset, bytes))
|
||||
return 0;
|
||||
if (WARN_ON_ONCE(!i->data_source))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
char *p;
|
||||
char *to = kmap_local_folio(folio, offset);
|
||||
|
||||
n = bytes - copied;
|
||||
if (uses_kmap) {
|
||||
page += offset / PAGE_SIZE;
|
||||
offset %= PAGE_SIZE;
|
||||
n = min_t(size_t, n, PAGE_SIZE - offset);
|
||||
}
|
||||
if (folio_test_partial_kmap(folio) &&
|
||||
n > PAGE_SIZE - offset_in_page(offset))
|
||||
n = PAGE_SIZE - offset_in_page(offset);
|
||||
|
||||
p = kmap_atomic(page) + offset;
|
||||
n = __copy_from_iter(p, n, i);
|
||||
kunmap_atomic(p);
|
||||
pagefault_disable();
|
||||
n = __copy_from_iter(to, n, i);
|
||||
pagefault_enable();
|
||||
kunmap_local(to);
|
||||
copied += n;
|
||||
offset += n;
|
||||
} while (uses_kmap && copied != bytes && n > 0);
|
||||
} while (copied != bytes && n > 0);
|
||||
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_from_iter_atomic);
|
||||
EXPORT_SYMBOL(copy_folio_from_iter_atomic);
|
||||
|
||||
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user