userfaultfd: mfill_atomic(): remove retry logic

Since __mfill_atomic_pte() handles the retry for both anonymous and shmem,
there is no need to retry copying the date from the userspace in the loop
in mfill_atomic().

Drop the retry logic from mfill_atomic().

[rppt@kernel.org: remove safety measure of not returning ENOENT from _copy]
  Link: https://lore.kernel.org/ac5zcDUY8CFHr6Lw@kernel.org
Link: https://lore.kernel.org/20260402041156.1377214-12-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrei Vagin <avagin@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Harry Yoo (Oracle) <harry@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nikita Kalyazin <kalyazin@amazon.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Carlier <devnexen@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Mike Rapoport (Microsoft)
2026-04-02 07:11:52 +03:00
committed by Andrew Morton
parent f74991b4e3
commit 6ab703034f

View File

@@ -29,7 +29,6 @@ struct mfill_state {
struct vm_area_struct *vma;
unsigned long src_addr;
unsigned long dst_addr;
struct folio *folio;
pmd_t *pmd;
};
@@ -531,9 +530,6 @@ static int __mfill_atomic_pte(struct mfill_state *state,
ops->filemap_remove(folio, state->vma);
err_folio_put:
folio_put(folio);
/* Don't return -ENOENT so that our caller won't retry */
if (ret == -ENOENT)
ret = -EFAULT;
return ret;
}
@@ -899,7 +895,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
VM_WARN_ON_ONCE(src_start + len <= src_start);
VM_WARN_ON_ONCE(dst_start + len <= dst_start);
retry:
err = mfill_get_vma(&state);
if (err)
goto out;
@@ -926,26 +921,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
err = mfill_atomic_pte(&state);
cond_resched();
if (unlikely(err == -ENOENT)) {
void *kaddr;
mfill_put_vma(&state);
VM_WARN_ON_ONCE(!state.folio);
kaddr = kmap_local_folio(state.folio, 0);
err = copy_from_user(kaddr,
(const void __user *)state.src_addr,
PAGE_SIZE);
kunmap_local(kaddr);
if (unlikely(err)) {
err = -EFAULT;
goto out;
}
flush_dcache_folio(state.folio);
goto retry;
} else
VM_WARN_ON_ONCE(state.folio);
if (!err) {
state.dst_addr += PAGE_SIZE;
state.src_addr += PAGE_SIZE;
@@ -960,8 +935,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
mfill_put_vma(&state);
out:
if (state.folio)
folio_put(state.folio);
VM_WARN_ON_ONCE(copied < 0);
VM_WARN_ON_ONCE(err > 0);
VM_WARN_ON_ONCE(!copied && !err);