From 6b7ce3134f68107438c99f8ea5424e08d418ca1b Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:05 +0100 Subject: [PATCH 001/431] x86/kgdb: use IS_ERR_PCPU() macro Patch series "Enable strict percpu address space checks", v4. Enable strict percpu address space checks via x86 named address space qualifiers. Percpu variables are declared in __seg_gs/__seg_fs named AS and kept named AS qualified until they are dereferenced via percpu accessor. This approach enables various compiler checks for cross-namespace variable assignments. Please note that current version of sparse doesn't know anything about __typeof_unqual__() operator. Avoid the usage of __typeof_unqual__() when sparse checking is active to prevent sparse errors with unknowing keyword. The proposed patch by Dan Carpenter to implement __typeof_unqual__() handling in sparse is located at: https://lore.kernel.org/lkml/5b8d0dee-8fb6-45af-ba6c-7f74aff9a4b8@stanley.mountain/ This patch (of 6): Use IS_ERR_PCPU() when checking the error pointer in the percpu address space. This macro adds intermediate cast to unsigned long when switching named address spaces. The patch will avoid future build errors due to pointer address space mismatch with enabled strict percpu address space checks. Link: https://lkml.kernel.org/r/20250127160709.80604-1-ubizjak@gmail.com Link: https://lkml.kernel.org/r/20250127160709.80604-2-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/kernel/kgdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 9c9faa1634fb..102641fd2172 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -655,7 +655,7 @@ void kgdb_arch_late(void) if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); - if (IS_ERR((void * __force)breakinfo[i].pev)) { + if (IS_ERR_PCPU(breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; From ac053946f5c40ce90ca7ccb75ed687612d9eccf9 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:06 +0100 Subject: [PATCH 002/431] compiler.h: introduce TYPEOF_UNQUAL() macro Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof operator when available, to return unqualified type of the expression. Current version of sparse doesn't know anything about __typeof_unqual__() operator. Avoid the usage of __typeof_unqual__() when sparse checking is active to prevent sparse errors with unknowing keyword. Link: https://lkml.kernel.org/r/20250127160709.80604-3-ubizjak@gmail.com Signed-off-by: Uros Bizjak Cc: Thomas Gleixner Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Brian Gerst Cc: Denys Vlasenko Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: Borislav Petkov Cc: Dave Hansen Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Nadav Amit Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/compiler-clang.h | 8 ++++++++ include/linux/compiler-gcc.h | 8 ++++++++ include/linux/compiler.h | 20 ++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 2e7c2c282f3a..4fc8e26914ad 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -128,3 +128,11 @@ */ #define ASM_INPUT_G "ir" #define ASM_INPUT_RM "r" + +/* + * Declare compiler support for __typeof_unqual__() operator. + * + * Bindgen uses LLVM even if our C compiler is GCC, so we cannot + * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL. + */ +#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index c9b58188ec61..32048052c64a 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -137,3 +137,11 @@ #if GCC_VERSION < 90100 #undef __alloc_size__ #endif + +/* + * Declare compiler support for __typeof_unqual__() operator. + * + * Bindgen uses LLVM even if our C compiler is GCC, so we cannot + * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL. + */ +#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 155385754824..3a7a537e13a3 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -210,6 +210,26 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define __must_be_cstr(p) \ __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)") +/* + * Use __typeof_unqual__() when available. + * + * XXX: Remove test for __CHECKER__ once + * sparse learns about __typeof_unqual__(). + */ +#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__) +# define USE_TYPEOF_UNQUAL 1 +#endif + +/* + * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof + * operator when available, to return an unqualified type of the exp. + */ +#if defined(USE_TYPEOF_UNQUAL) +# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp) +#else +# define TYPEOF_UNQUAL(exp) __typeof__(exp) +#endif + #endif /* __KERNEL__ */ /** From 8a3c392388c6a6e0c8937a24712b630ec9ac7016 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:07 +0100 Subject: [PATCH 003/431] percpu: use TYPEOF_UNQUAL() in variable declarations Use TYPEOF_UNQUAL() to declare variables as a corresponding type without named address space qualifier to avoid "`__seg_gs' specified for auto variable `var'" errors. Link: https://lkml.kernel.org/r/20250127160709.80604-4-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Christoph Lameter Cc: Dennis Zhou Cc: Tejun Heo Cc: Andy Lutomirski Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Kent Overstreet Cc: Arnd Bergmann Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Peter Zijlstra Cc: Will Deacon Cc: Waiman Long Cc: Boqun Feng Cc: Linus Torvalds Cc: Brian Gerst Cc: Denys Vlasenko Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 10 +++++----- fs/bcachefs/util.h | 2 +- include/asm-generic/percpu.h | 26 +++++++++++++------------- include/linux/part_stat.h | 2 +- include/linux/percpu-defs.h | 4 ++-- include/net/snmp.h | 5 ++--- kernel/locking/percpu-rwsem.c | 2 +- net/mpls/internal.h | 4 ++-- 8 files changed, 27 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index e525cd85f999..666e4137b09f 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -180,7 +180,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -219,7 +219,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -240,7 +240,7 @@ do { \ (val) == (typeof(val))-1)) ? (int)(val) : 0; \ \ if (0) { \ - typeof(var) pao_tmp__; \ + TYPEOF_UNQUAL(var) pao_tmp__; \ pao_tmp__ = (val); \ (void)pao_tmp__; \ } \ @@ -273,7 +273,7 @@ do { \ */ #define raw_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = raw_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var); \ \ raw_cpu_write(_var, _nval); \ \ @@ -287,7 +287,7 @@ do { \ */ #define this_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = this_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var); \ \ do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \ \ diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index e7c3541b38f3..5760f7dbcac2 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -609,7 +609,7 @@ do { \ #define per_cpu_sum(_p) \ ({ \ - typeof(*_p) _ret = 0; \ + TYPEOF_UNQUAL(*_p) _ret = 0; \ \ int cpu; \ for_each_possible_cpu(cpu) \ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 94cbd50cc870..50597b975a49 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -74,7 +74,7 @@ do { \ #define raw_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ \ *__p += val; \ *__p; \ @@ -82,8 +82,8 @@ do { \ #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __ret; \ __ret = *__p; \ *__p = nval; \ __ret; \ @@ -91,7 +91,7 @@ do { \ #define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \ ({ \ - typeof(pcp) __val, __old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \ __val = _cmpxchg(pcp, __old, nval); \ if (__val != __old) \ *(ovalp) = __val; \ @@ -100,8 +100,8 @@ do { \ #define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __val = *__p, ___old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \ bool __ret; \ if (__val == ___old) { \ *__p = nval; \ @@ -115,14 +115,14 @@ do { \ #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __old = (oval); \ + TYPEOF_UNQUAL(pcp) __old = (oval); \ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \ __old; \ }) #define __this_cpu_generic_read_nopreempt(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ preempt_disable_notrace(); \ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ preempt_enable_notrace(); \ @@ -131,7 +131,7 @@ do { \ #define __this_cpu_generic_read_noirq(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ unsigned long ___flags; \ raw_local_irq_save(___flags); \ ___ret = raw_cpu_generic_read(pcp); \ @@ -141,7 +141,7 @@ do { \ #define this_cpu_generic_read(pcp) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ if (__native_word(pcp)) \ __ret = __this_cpu_generic_read_nopreempt(pcp); \ else \ @@ -160,7 +160,7 @@ do { \ #define this_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_add_return(pcp, val); \ @@ -170,7 +170,7 @@ do { \ #define this_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ @@ -190,7 +190,7 @@ do { \ #define this_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index ac8c44dd8237..c5e9cac0575e 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -33,7 +33,7 @@ struct disk_stats { #define part_stat_read(part, field) \ ({ \ - typeof((part)->bd_stats->field) res = 0; \ + TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \ unsigned int _cpu; \ for_each_possible_cpu(_cpu) \ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 5b520fe86b60..79b9402404f1 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -317,7 +317,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return(stem, variable) \ ({ \ - typeof(variable) pscr_ret__; \ + TYPEOF_UNQUAL(variable) pscr_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr_ret__ = stem##1(variable); break; \ @@ -332,7 +332,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return2(stem, variable, ...) \ ({ \ - typeof(variable) pscr2_ret__; \ + TYPEOF_UNQUAL(variable) pscr2_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ diff --git a/include/net/snmp.h b/include/net/snmp.h index 468a67836e2f..4cb4326dfebe 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -159,7 +159,7 @@ struct linux_tls_mib { #define __SNMP_ADD_STATS64(mib, field, addend) \ do { \ - __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[field] += addend; \ u64_stats_update_end(&ptr->syncp); \ @@ -176,8 +176,7 @@ struct linux_tls_mib { #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ - __typeof__(*mib) *ptr; \ - ptr = raw_cpu_ptr((mib)); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend; \ diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 6083883c4fe0..d6964fc29f51 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read); #define per_cpu_sum(var) \ ({ \ - typeof(var) __sum = 0; \ + TYPEOF_UNQUAL(var) __sum = 0; \ int cpu; \ compiletime_assert_atomic_type(__sum); \ for_each_possible_cpu(cpu) \ diff --git a/net/mpls/internal.h b/net/mpls/internal.h index b9f492ddf93b..83c629529b57 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -33,7 +33,7 @@ struct mpls_dev { #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ @@ -45,7 +45,7 @@ struct mpls_dev { #define MPLS_INC_STATS(mdev, field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ From 6a39fe05ecaa3946ed0af9efd3e56689d519e420 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:08 +0100 Subject: [PATCH 004/431] percpu: use TYPEOF_UNQUAL() in *_cpu_ptr() accessors Use TYPEOF_UNQUAL() macro to declare the return type of *_cpu_ptr() accessors in the generic named address space to avoid access to data from pointer to non-enclosed address space type of errors. Link: https://lkml.kernel.org/r/20250127160709.80604-5-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Christoph Lameter Cc: Dennis Zhou Cc: Tejun Heo Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 8 ++++++-- include/linux/percpu-defs.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 666e4137b09f..27f668660abe 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -73,10 +73,14 @@ unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ \ tcp_ptr__ += (__force unsigned long)(_ptr); \ - (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__; \ }) #else -#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; }) +#define arch_raw_cpu_ptr(_ptr) \ +({ \ + BUILD_BUG(); \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0; \ +}) #endif #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 79b9402404f1..a7cf954ea99d 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -221,7 +221,7 @@ do { \ } while (0) #define PERCPU_PTR(__p) \ - (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) + (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p)) #ifdef CONFIG_SMP From 6cea5ae714ba47ea4807d15903baca9857a450e6 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:09 +0100 Subject: [PATCH 005/431] percpu: repurpose __percpu tag as a named address space qualifier The patch introduces __percpu_qual define and repurposes __percpu tag as a named address space qualifier using the new define. Arches can now conditionally define __percpu_qual as their named address space qualifier for percpu variables. Link: https://lkml.kernel.org/r/20250127160709.80604-6-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Brian Gerst Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Boqun Feng Cc: Borislav Petkov Cc: Dave Hansen Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- include/asm-generic/percpu.h | 13 +++++++++++++ include/linux/compiler_types.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 50597b975a49..02aeca21479a 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -6,6 +6,19 @@ #include #include +/* + * __percpu_qual is the qualifier for the percpu named address space. + * + * Most arches use generic named address space for percpu variables but + * some arches define percpu variables in different named address space + * (on the x86 arch, percpu variable may be declared as being relative + * to the %fs or %gs segments using __seg_fs or __seg_gs named address + * space qualifier). + */ +#ifndef __percpu_qual +# define __percpu_qual +#endif + #ifdef CONFIG_SMP /* diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 981cc3d7e3aa..5d6544545658 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -57,7 +57,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __user BTF_TYPE_TAG(user) # endif # define __iomem -# define __percpu BTF_TYPE_TAG(percpu) +# define __percpu __percpu_qual BTF_TYPE_TAG(percpu) # define __rcu BTF_TYPE_TAG(rcu) # define __chk_user_ptr(x) (void)0 From 6a367577153acd9b432a5340fb10891eeb7e10f1 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 27 Jan 2025 17:05:10 +0100 Subject: [PATCH 006/431] percpu/x86: enable strict percpu checks via named AS qualifiers This patch declares percpu variables in __seg_gs/__seg_fs named AS and keeps them named AS qualified until they are dereferenced with percpu accessor. This approach enables various compiler check for cross-namespace variable assignments. Link: https://lkml.kernel.org/r/20250127160709.80604-7-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 27f668660abe..474d648bca9a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -95,9 +95,18 @@ #endif /* CONFIG_SMP */ -#define __my_cpu_type(var) typeof(var) __percpu_seg_override -#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) -#define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +#if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL) +# define __my_cpu_type(var) typeof(var) +# define __my_cpu_ptr(ptr) (ptr) +# define __my_cpu_var(var) (var) + +# define __percpu_qual __percpu_seg_override +#else +# define __my_cpu_type(var) typeof(var) __percpu_seg_override +# define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) +# define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +#endif + #define __percpu_arg(x) __percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x From 1c81f1a699263aeae9aa1ac777058846c546e3c0 Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 24 Jan 2025 07:35:11 +0000 Subject: [PATCH 007/431] memcg: use OFP_PEAK_UNSET instead of -1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Some cleanup for memcg", v4. This patch (of 4): The 'OFP_PEAK_UNSET' has been defined, use it instead of '-1'. Link: https://lkml.kernel.org/r/20250124073514.2375622-1-chenridong@huaweicloud.com Link: https://lkml.kernel.org/r/20250124073514.2375622-2-chenridong@huaweicloud.com Signed-off-by: Chen Ridong Reviewed-by: Michal Koutný Acked-by: David Finkel Acked-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Roman Gushchin Cc: Michal Hocko Cc: Muchun Song Cc: Michal Hocko Cc: Muchun Song Cc: Vlastimil Babka Cc: Wang Weiyang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a037ec92881d..12f45dd0f64a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4013,7 +4013,7 @@ static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes, WRITE_ONCE(peer_ctx->value, usage); /* initial write, register watcher */ - if (ofp->value == -1) + if (ofp->value == OFP_PEAK_UNSET) list_add(&ofp->list, watchers); WRITE_ONCE(ofp->value, usage); From 2059c8e320e2e538f70aa9b1e6ee9e793d4db6f7 Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 24 Jan 2025 07:35:12 +0000 Subject: [PATCH 008/431] memcg: call the free function when allocation of pn fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'free_mem_cgroup_per_node_info' function is used to free the 'mem_cgroup_per_node' struct. Using 'pn' as the input for the free_mem_cgroup_per_node_info function will be much clearer. Call 'free_mem_cgroup_per_node_info' when 'alloc_mem_cgroup_per_node_info' fails, to free 'pn' as a whole, which makes the code more cohesive. Link: https://lkml.kernel.org/r/20250124073514.2375622-3-chenridong@huaweicloud.com Signed-off-by: Chen Ridong Reviewed-by: Michal Koutný Acked-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Roman Gushchin Cc: David Finkel Cc: Michal Hocko Cc: Michal Hocko Cc: Muchun Song Cc: Muchun Song Cc: Vlastimil Babka Cc: Wang Weiyang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 12f45dd0f64a..7fd03425e2c4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3433,6 +3433,16 @@ struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) } #endif +static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn) +{ + if (!pn) + return; + + free_percpu(pn->lruvec_stats_percpu); + kfree(pn->lruvec_stats); + kfree(pn); +} + static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) { struct mem_cgroup_per_node *pn; @@ -3457,23 +3467,10 @@ static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) memcg->nodeinfo[node] = pn; return true; fail: - kfree(pn->lruvec_stats); - kfree(pn); + free_mem_cgroup_per_node_info(pn); return false; } -static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) -{ - struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; - - if (!pn) - return; - - free_percpu(pn->lruvec_stats_percpu); - kfree(pn->lruvec_stats); - kfree(pn); -} - static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; @@ -3481,7 +3478,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) obj_cgroup_put(memcg->orig_objcg); for_each_node(node) - free_mem_cgroup_per_node_info(memcg, node); + free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); memcg1_free_events(memcg); kfree(memcg->vmstats); free_percpu(memcg->vmstats_percpu); From bc812d1905df2e7dbc8a80253ec9c31366526ed3 Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 24 Jan 2025 07:35:13 +0000 Subject: [PATCH 009/431] memcg: factor out the replace_stock_objcg function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Factor out the 'replace_stock_objcg' function to make the code more cohesive. Link: https://lkml.kernel.org/r/20250124073514.2375622-4-chenridong@huaweicloud.com Signed-off-by: Chen Ridong Reviewed-by: Roman Gushchin Acked-by: Shakeel Butt Acked-by: Johannes Weiner Cc: David Finkel Cc: Michal Hocko Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Muchun Song Cc: Vlastimil Babka Cc: Wang Weiyang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7fd03425e2c4..65056395a1b4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2700,6 +2700,20 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) obj_cgroup_put(objcg); } +/* Replace the stock objcg with objcg, return the old objcg */ +static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock, + struct obj_cgroup *objcg) +{ + struct obj_cgroup *old = NULL; + + old = drain_obj_stock(stock); + obj_cgroup_get(objcg); + stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) + ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + WRITE_ONCE(stock->cached_objcg, objcg); + return old; +} + static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) { @@ -2717,11 +2731,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, * changes. */ if (READ_ONCE(stock->cached_objcg) != objcg) { - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; - WRITE_ONCE(stock->cached_objcg, objcg); + old = replace_stock_objcg(stock, objcg); stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ @@ -2875,11 +2885,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, stock = this_cpu_ptr(&memcg_stock); if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ - old = drain_obj_stock(stock); - obj_cgroup_get(objcg); - WRITE_ONCE(stock->cached_objcg, objcg); - stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) - ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; + old = replace_stock_objcg(stock, objcg); allow_uncharge = true; /* Allow uncharge when objcg changes */ } stock->nr_bytes += nr_bytes; From 610dc18c502df113e95d4f23374b30538d0b633e Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 24 Jan 2025 07:35:14 +0000 Subject: [PATCH 010/431] memcg: add CONFIG_MEMCG_V1 for 'local' functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add CONFIG_MEMCG_V1 for the 'local' functions, which are only used in memcg v1, so that they won't be built for v2. Link: https://lkml.kernel.org/r/20250124073514.2375622-5-chenridong@huaweicloud.com Signed-off-by: Chen Ridong Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Roman Gushchin Cc: David Finkel Cc: Michal Hocko Cc: Muchun Song Cc: Vlastimil Babka Cc: Wang Weiyang Cc: Yosry Ahmed Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol-v1.h | 6 +++--- mm/memcontrol.c | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 144d71b65907..ecff454373e2 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -60,15 +60,15 @@ unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); void drain_all_stock(struct mem_cgroup *root_memcg); unsigned long memcg_events(struct mem_cgroup *memcg, int event); -unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); -unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); -unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); int memory_stat_show(struct seq_file *m, void *v); /* Cgroup v1-specific declarations */ #ifdef CONFIG_MEMCG_V1 +unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); +unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); +unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); bool memcg1_alloc_events(struct mem_cgroup *memcg); void memcg1_free_events(struct mem_cgroup *memcg); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 65056395a1b4..729b3e5f98f4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -706,6 +706,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, trace_mod_memcg_state(memcg, idx, val); } +#ifdef CONFIG_MEMCG_V1 /* idx can be of type enum memcg_stat_item or node_stat_item. */ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) { @@ -722,6 +723,7 @@ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) #endif return x; } +#endif static void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, @@ -869,6 +871,7 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event) return READ_ONCE(memcg->vmstats->events[i]); } +#ifdef CONFIG_MEMCG_V1 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) { int i = memcg_events_index(event); @@ -878,6 +881,7 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) return READ_ONCE(memcg->vmstats->events_local[i]); } +#endif struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) { @@ -1447,11 +1451,13 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) memcg_page_state_output_unit(item); } +#ifdef CONFIG_MEMCG_V1 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) { return memcg_page_state_local(memcg, item) * memcg_page_state_output_unit(item); } +#endif #ifdef CONFIG_HUGETLB_PAGE static bool memcg_accounts_hugetlb(void) From 75fe8ec2380233f7d80c2574d52119072f9eb63e Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 23 Jan 2025 23:38:58 -0500 Subject: [PATCH 011/431] mm: memcontrol: unshare v2-only charge API bits again 6b611388b626 ("memcg-v1: remove charge move code") removed the remaining v1 callers. Link: https://lkml.kernel.org/r/20250124043859.18808-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Roman Gushchin Acked-by: Shakeel Butt Acked-by: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol-v1.h | 15 --------------- mm/memcontrol.c | 17 +++++++++++++---- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index ecff454373e2..ffd2ac839185 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -7,21 +7,6 @@ /* Cgroup v1 and v2 common declarations */ -int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages); - -static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages) -{ - if (mem_cgroup_is_root(memcg)) - return 0; - - return try_charge_memcg(memcg, gfp_mask, nr_pages); -} - -void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n); -void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n); - /* * Iteration constructs for visiting all cgroups (under a tree). If * loops are exited prematurely (break), mem_cgroup_iter_break() must diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 729b3e5f98f4..7f1ca1065316 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2213,8 +2213,8 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask) css_put(&memcg->css); } -int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, - unsigned int nr_pages) +static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, + unsigned int nr_pages) { unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); int nr_retries = MAX_RECLAIM_RETRIES; @@ -2403,6 +2403,15 @@ int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, return 0; } +static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, + unsigned int nr_pages) +{ + if (mem_cgroup_is_root(memcg)) + return 0; + + return try_charge_memcg(memcg, gfp_mask, nr_pages); +} + static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) { VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); @@ -3389,13 +3398,13 @@ static void mem_cgroup_id_remove(struct mem_cgroup *memcg) } } -void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, +static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { refcount_add(n, &memcg->id.ref); } -void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) +static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) { if (refcount_sub_and_test(n, &memcg->id.ref)) { mem_cgroup_id_remove(memcg); From 0d892bbbfa1c3b75569ef5075503bb785d1d52fd Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 23 Jan 2025 23:38:59 -0500 Subject: [PATCH 012/431] mm: memcontrol: move stray ratelimit bits to v1 41213dd0f816 ("memcg: move mem_cgroup_event_ratelimit to v1 code") left this one behind. There are no v2 references. Link: https://lkml.kernel.org/r/20250124043859.18808-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Roman Gushchin Acked-by: Shakeel Butt Acked-by: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol-v1.c | 13 +++++++++++++ mm/memcontrol-v1.h | 12 ------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 2be6b9112808..6d184fae0ad1 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -490,6 +490,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg) } /* Cgroup1: threshold notifications & softlimit tree updates */ + +/* + * Per memcg event counter is incremented at every pagein/pageout. With THP, + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + */ +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_NTARGETS, +}; + struct memcg1_events_percpu { unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index ffd2ac839185..bfe44d2337a5 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -28,18 +28,6 @@ static inline bool do_memsw_account(void) return !cgroup_subsys_on_dfl(memory_cgrp_subsys); } -/* - * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremented by the number of pages. This counter is used - * to trigger some periodic events. This is straightforward and better - * than using jiffies etc. to handle periodic memcg event. - */ -enum mem_cgroup_events_target { - MEM_CGROUP_TARGET_THRESH, - MEM_CGROUP_TARGET_SOFTLIMIT, - MEM_CGROUP_NTARGETS, -}; - unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); void drain_all_stock(struct mem_cgroup *root_memcg); From 89ce924f0bd447eb52a5f224d879dbf8f09451db Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 24 Jan 2025 00:41:32 -0500 Subject: [PATCH 013/431] mm: memcontrol: move memsw charge callbacks to v1 The interweaving of two entirely different swap accounting strategies has been one of the more confusing parts of the memcg code. Split out the v1 code to clarify the implementation and a handful of callsites, and to avoid building the v1 bits when !CONFIG_MEMCG_V1. text data bss dec hex filename 39253 6446 4160 49859 c2c3 mm/memcontrol.o.old 38877 6382 4160 49419 c10b mm/memcontrol.o Link: https://lkml.kernel.org/r/20250124054132.45643-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Roman Gushchin Acked-by: Michal Hocko Acked-by: Balbir Singh Acked-by: Shakeel Butt Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 17 +++-- include/linux/swap.h | 5 -- mm/huge_memory.c | 2 +- mm/memcontrol-v1.c | 89 ++++++++++++++++++++++++- mm/memcontrol-v1.h | 6 +- mm/memcontrol.c | 129 ++++++------------------------------- mm/memory.c | 2 +- mm/shmem.c | 2 +- mm/swap_state.c | 2 +- mm/vmscan.c | 2 +- 10 files changed, 126 insertions(+), 130 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6e74b8254d9b..57664e2a8fb7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -649,8 +649,6 @@ int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); - void __mem_cgroup_uncharge(struct folio *folio); /** @@ -1165,10 +1163,6 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) -{ -} - static inline void mem_cgroup_uncharge(struct folio *folio) { } @@ -1848,6 +1842,9 @@ static inline void mem_cgroup_exit_user_fault(void) current->in_user_fault = 0; } +void memcg1_swapout(struct folio *folio, swp_entry_t entry); +void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); + #else /* CONFIG_MEMCG_V1 */ static inline unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, @@ -1875,6 +1872,14 @@ static inline void mem_cgroup_exit_user_fault(void) { } +static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) +{ +} + +static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +{ +} + #endif /* CONFIG_MEMCG_V1 */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index b13b72645db3..91b30701274e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -659,7 +659,6 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) @@ -680,10 +679,6 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else -static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) -{ -} - static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 373781b21e5c..118f2127c785 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3740,7 +3740,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) /* * Exclude swapcache: originally to avoid a corrupt deferred split - * queue. Nowadays that is fully prevented by mem_cgroup_swapout(); + * queue. Nowadays that is fully prevented by memcg1_swapout(); * but if page reclaim is already handling the same folio, it is * unnecessary to handle it again in the shrinker, so excluding * swapcache here may still be a useful optimization. diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 6d184fae0ad1..c1feb3945350 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -581,8 +581,59 @@ void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg) local_irq_restore(flags); } -void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) +/** + * memcg1_swapout - transfer a memsw charge to swap + * @folio: folio whose memsw charge to transfer + * @entry: swap entry to move the charge to + * + * Transfer the memsw charge of @folio to @entry. + */ +void memcg1_swapout(struct folio *folio, swp_entry_t entry) { + struct mem_cgroup *memcg, *swap_memcg; + unsigned int nr_entries; + + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); + + if (mem_cgroup_disabled()) + return; + + if (!do_memsw_account()) + return; + + memcg = folio_memcg(folio); + + VM_WARN_ON_ONCE_FOLIO(!memcg, folio); + if (!memcg) + return; + + /* + * In case the memcg owning these pages has been offlined and doesn't + * have an ID allocated to it anymore, charge the closest online + * ancestor for the swap instead and transfer the memory+swap charge. + */ + swap_memcg = mem_cgroup_id_get_online(memcg); + nr_entries = folio_nr_pages(folio); + /* Get references for the tail pages, too */ + if (nr_entries > 1) + mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); + mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); + + swap_cgroup_record(folio, mem_cgroup_id(memcg), entry); + + folio_unqueue_deferred_split(folio); + folio->memcg_data = 0; + + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memory, nr_entries); + + if (memcg != swap_memcg) { + if (!mem_cgroup_is_root(swap_memcg)) + page_counter_charge(&swap_memcg->memsw, nr_entries); + page_counter_uncharge(&memcg->memsw, nr_entries); + } + /* * Interrupts should be disabled here because the caller holds the * i_pages lock which is taken with interrupts-off. It is @@ -594,6 +645,42 @@ void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) memcg1_charge_statistics(memcg, -folio_nr_pages(folio)); preempt_enable_nested(); memcg1_check_events(memcg, folio_nid(folio)); + + css_put(&memcg->css); +} + +/* + * memcg1_swapin - uncharge swap slot + * @entry: the first swap entry for which the pages are charged + * @nr_pages: number of pages which will be uncharged + * + * Call this function after successfully adding the charged page to swapcache. + * + * Note: This function assumes the page for which swap slot is being uncharged + * is order 0 page. + */ +void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +{ + /* + * Cgroup1's unified memory+swap counter has been charged with the + * new swapcache page, finish the transfer by uncharging the swap + * slot. The swap slot would also get uncharged when it dies, but + * it can stick around indefinitely and we'd count the page twice + * the entire time. + * + * Cgroup2 has separate resource counters for memory and swap, + * so this is a non-issue here. Memory and swap charge lifetimes + * correspond 1:1 to page and swap slot lifetimes: we charge the + * page to memory here, and uncharge swap when the slot is freed. + */ + if (do_memsw_account()) { + /* + * The swap entry might not get freed for a long time, + * let's not wait for it. The page already received a + * memory+swap charge, drop the swap entry duplicate. + */ + mem_cgroup_uncharge_swap(entry, nr_pages); + } } void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index bfe44d2337a5..653ff1bad244 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -36,6 +36,9 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event); unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); int memory_stat_show(struct seq_file *m, void *v); +void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n); +struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg); + /* Cgroup v1-specific declarations */ #ifdef CONFIG_MEMCG_V1 @@ -69,7 +72,6 @@ void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked); void memcg1_oom_recover(struct mem_cgroup *memcg); void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg); -void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg); void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, unsigned long nr_memory, int nid); @@ -107,8 +109,6 @@ static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {} static inline void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg) {} -static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {} - static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, unsigned long nr_memory, int nid) {} diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7f1ca1065316..04973c084c63 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3398,7 +3398,7 @@ static void mem_cgroup_id_remove(struct mem_cgroup *memcg) } } -static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, +void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) { refcount_add(n, &memcg->id.ref); @@ -3419,6 +3419,24 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) mem_cgroup_id_put_many(memcg, 1); } +struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) +{ + while (!refcount_inc_not_zero(&memcg->id.ref)) { + /* + * The root cgroup cannot be destroyed, so it's refcount must + * always be >= 1. + */ + if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { + VM_BUG_ON(1); + break; + } + memcg = parent_mem_cgroup(memcg); + if (!memcg) + memcg = root_mem_cgroup; + } + return memcg; +} + /** * mem_cgroup_from_id - look up a memcg from a memcg id * @id: the memcg id to look up @@ -4604,40 +4622,6 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, return ret; } -/* - * mem_cgroup_swapin_uncharge_swap - uncharge swap slot - * @entry: the first swap entry for which the pages are charged - * @nr_pages: number of pages which will be uncharged - * - * Call this function after successfully adding the charged page to swapcache. - * - * Note: This function assumes the page for which swap slot is being uncharged - * is order 0 page. - */ -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) -{ - /* - * Cgroup1's unified memory+swap counter has been charged with the - * new swapcache page, finish the transfer by uncharging the swap - * slot. The swap slot would also get uncharged when it dies, but - * it can stick around indefinitely and we'd count the page twice - * the entire time. - * - * Cgroup2 has separate resource counters for memory and swap, - * so this is a non-issue here. Memory and swap charge lifetimes - * correspond 1:1 to page and swap slot lifetimes: we charge the - * page to memory here, and uncharge swap when the slot is freed. - */ - if (do_memsw_account()) { - /* - * The swap entry might not get freed for a long time, - * let's not wait for it. The page already received a - * memory+swap charge, drop the swap entry duplicate. - */ - mem_cgroup_uncharge_swap(entry, nr_pages); - } -} - struct uncharge_gather { struct mem_cgroup *memcg; unsigned long nr_memory; @@ -4963,81 +4947,6 @@ static int __init mem_cgroup_init(void) subsys_initcall(mem_cgroup_init); #ifdef CONFIG_SWAP -static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) -{ - while (!refcount_inc_not_zero(&memcg->id.ref)) { - /* - * The root cgroup cannot be destroyed, so it's refcount must - * always be >= 1. - */ - if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { - VM_BUG_ON(1); - break; - } - memcg = parent_mem_cgroup(memcg); - if (!memcg) - memcg = root_mem_cgroup; - } - return memcg; -} - -/** - * mem_cgroup_swapout - transfer a memsw charge to swap - * @folio: folio whose memsw charge to transfer - * @entry: swap entry to move the charge to - * - * Transfer the memsw charge of @folio to @entry. - */ -void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) -{ - struct mem_cgroup *memcg, *swap_memcg; - unsigned int nr_entries; - - VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); - - if (mem_cgroup_disabled()) - return; - - if (!do_memsw_account()) - return; - - memcg = folio_memcg(folio); - - VM_WARN_ON_ONCE_FOLIO(!memcg, folio); - if (!memcg) - return; - - /* - * In case the memcg owning these pages has been offlined and doesn't - * have an ID allocated to it anymore, charge the closest online - * ancestor for the swap instead and transfer the memory+swap charge. - */ - swap_memcg = mem_cgroup_id_get_online(memcg); - nr_entries = folio_nr_pages(folio); - /* Get references for the tail pages, too */ - if (nr_entries > 1) - mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); - mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); - - swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry); - - folio_unqueue_deferred_split(folio); - folio->memcg_data = 0; - - if (!mem_cgroup_is_root(memcg)) - page_counter_uncharge(&memcg->memory, nr_entries); - - if (memcg != swap_memcg) { - if (!mem_cgroup_is_root(swap_memcg)) - page_counter_charge(&swap_memcg->memsw, nr_entries); - page_counter_uncharge(&memcg->memsw, nr_entries); - } - - memcg1_swapout(folio, memcg); - css_put(&memcg->css); -} - /** * __mem_cgroup_try_charge_swap - try charging swap space for a folio * @folio: folio being added to swap diff --git a/mm/memory.c b/mm/memory.c index b9661ccfa64f..f77c10fd26b9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4407,7 +4407,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } need_clear_cache = true; - mem_cgroup_swapin_uncharge_swap(entry, nr_pages); + memcg1_swapin(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) diff --git a/mm/shmem.c b/mm/shmem.c index 1ede0800e846..15fa7fa9c8e8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2017,7 +2017,7 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode, __folio_set_swapbacked(new); new->swap = entry; - mem_cgroup_swapin_uncharge_swap(entry, nr_pages); + memcg1_swapin(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(new, shadow); diff --git a/mm/swap_state.c b/mm/swap_state.c index ca42b2be64d9..2e1acb210e57 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -521,7 +521,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; - mem_cgroup_swapin_uncharge_swap(entry, 1); + memcg1_swapin(entry, 1); if (shadow) workingset_refault(new_folio, shadow); diff --git a/mm/vmscan.c b/mm/vmscan.c index c767d71c43d7..fc4951d23b97 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -769,7 +769,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); __delete_from_swap_cache(folio, swap, shadow); - mem_cgroup_swapout(folio, swap); + memcg1_swapout(folio, swap); xa_unlock_irq(&mapping->i_pages); put_swap_folio(folio, swap); } else { From 5f6084f95bc1f0a0b95e58e49be1ee74b01f5144 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Thu, 23 Jan 2025 19:35:22 +0000 Subject: [PATCH 014/431] mm/oom_kill: fix trivial typo in comment Update 'give' -> 'given' in the description of oom_reap_task_mm(). Link: https://lkml.kernel.org/r/20250123193523.1496909-1-cmllamas@google.com Signed-off-by: Carlos Llamas Signed-off-by: Andrew Morton --- mm/oom_kill.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1cf121ad7085..25923cfec9c6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -563,7 +563,7 @@ static bool __oom_reap_task_mm(struct mm_struct *mm) } /* - * Reaps the address space of the give task. + * Reaps the address space of the given task. * * Returns true on success and false if none or part of the address space * has been reclaimed and the caller should retry later. From 035a112e5fd5b93a1d34c5d736bb515b2f9fa52f Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 22 Jan 2025 11:19:26 -0500 Subject: [PATCH 015/431] selftests/mm: make file-backed THP split work by writing PMD size data Commit acd7ccb284b8 ("mm: shmem: add large folio support for tmpfs") changes huge=always to allocate THP/mTHP based on write size and split_huge_page_test does not write PMD size data, so file-back THP is not created during the test. Fix it by writing PMD size data. Link: https://lkml.kernel.org/r/20250122161928.1240637-1-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Miaohe Lin Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- .../selftests/mm/split_huge_page_test.c | 52 ++++++++++++++++--- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 3f353f3d070f..ba498aaaf857 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -265,14 +265,28 @@ void split_file_backed_thp(void) { int status; int fd; - ssize_t num_written; char tmpfs_template[] = "/tmp/thp_split_XXXXXX"; const char *tmpfs_loc = mkdtemp(tmpfs_template); char testfile[INPUT_MAX]; + ssize_t num_written, num_read; + char *file_buf1, *file_buf2; uint64_t pgoff_start = 0, pgoff_end = 1024; + int i; ksft_print_msg("Please enable pr_debug in split_huge_pages_in_file() for more info.\n"); + file_buf1 = (char *)malloc(pmd_pagesize); + file_buf2 = (char *)malloc(pmd_pagesize); + + if (!file_buf1 || !file_buf2) { + ksft_print_msg("cannot allocate file buffers\n"); + goto out; + } + + for (i = 0; i < pmd_pagesize; i++) + file_buf1[i] = (char)i; + memset(file_buf2, 0, pmd_pagesize); + status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m"); if (status) @@ -281,26 +295,45 @@ void split_file_backed_thp(void) status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc); if (status >= INPUT_MAX) { ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n"); + goto cleanup; } - fd = open(testfile, O_CREAT|O_WRONLY, 0664); + fd = open(testfile, O_CREAT|O_RDWR, 0664); if (fd == -1) { ksft_perror("Cannot open testing file"); goto cleanup; } - /* write something to the file, so a file-backed THP can be allocated */ - num_written = write(fd, tmpfs_loc, strlen(tmpfs_loc) + 1); - close(fd); + /* write pmd size data to the file, so a file-backed THP can be allocated */ + num_written = write(fd, file_buf1, pmd_pagesize); - if (num_written < 1) { - ksft_perror("Fail to write data to testing file"); - goto cleanup; + if (num_written == -1 || num_written != pmd_pagesize) { + ksft_perror("Failed to write data to testing file"); + goto close_file; } /* split the file-backed THP */ write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0); + /* check file content after split */ + status = lseek(fd, 0, SEEK_SET); + if (status == -1) { + ksft_perror("Cannot lseek file"); + goto close_file; + } + + num_read = read(fd, file_buf2, num_written); + if (num_read == -1 || num_read != num_written) { + ksft_perror("Cannot read file content back"); + goto close_file; + } + + if (strncmp(file_buf1, file_buf2, pmd_pagesize) != 0) { + ksft_print_msg("File content changed\n"); + goto close_file; + } + + close(fd); status = unlink(testfile); if (status) { ksft_perror("Cannot remove testing file"); @@ -321,9 +354,12 @@ void split_file_backed_thp(void) ksft_test_result_pass("File-backed THP split test done\n"); return; +close_file: + close(fd); cleanup: umount(tmpfs_loc); rmdir(tmpfs_loc); +out: ksft_exit_fail_msg("Error occurred\n"); } From 9b2f764933eb5e3ac9ebba26e3341529219c4401 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 22 Jan 2025 11:19:27 -0500 Subject: [PATCH 016/431] mm/huge_memory: allow split shmem large folio to any lower order Commit 4d684b5f92ba ("mm: shmem: add large folio support for tmpfs") has added large folio support to shmem. Remove the restriction in split_huge_page*(). Link: https://lkml.kernel.org/r/20250122161928.1240637-2-ziy@nvidia.com Signed-off-by: Zi Yan Reviewed-by: Baolin Wang Reviewed-by: Yang Shi Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Miaohe Lin Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/huge_memory.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 118f2127c785..e33da765c428 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3299,7 +3299,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, /* Some pages can be beyond EOF: drop them from page cache */ if (tail->index >= end) { if (shmem_mapping(folio->mapping)) - nr_dropped++; + nr_dropped += new_nr; else if (folio_test_clear_dirty(tail)) folio_account_cleaned(tail, inode_to_wb(folio->mapping->host)); @@ -3465,12 +3465,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, return -EINVAL; } } else if (new_order) { - /* Split shmem folio to non-zero order not supported */ - if (shmem_mapping(folio->mapping)) { - VM_WARN_ONCE(1, - "Cannot split shmem folio to non-0 order"); - return -EINVAL; - } /* * No split if the file system does not support large folio. * Note that we might still have THPs in such mappings due to From ad4c9bb5415232b452157002a48d58a1dc92d3c0 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Wed, 22 Jan 2025 11:19:28 -0500 Subject: [PATCH 017/431] selftests/mm: test splitting file-backed THP to any lower order Now split_huge_page*() supports shmem THP split to any lower order. Test it. The test now reads file content out after split to check if the split corrupts the file data. Link: https://lkml.kernel.org/r/20250122161928.1240637-3-ziy@nvidia.com Signed-off-by: Zi Yan Reviewed-by: Baolin Wang Tested-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Miaohe Lin Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/split_huge_page_test.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index ba498aaaf857..e0304046b1a0 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -261,7 +261,7 @@ void split_pte_mapped_thp(void) close(kpageflags_fd); } -void split_file_backed_thp(void) +void split_file_backed_thp(int order) { int status; int fd; @@ -313,7 +313,7 @@ void split_file_backed_thp(void) } /* split the file-backed THP */ - write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0); + write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, order); /* check file content after split */ status = lseek(fd, 0, SEEK_SET); @@ -351,7 +351,7 @@ void split_file_backed_thp(void) ksft_exit_fail_msg("cannot remove tmp dir: %s\n", strerror(errno)); ksft_print_msg("Please check dmesg for more information\n"); - ksft_test_result_pass("File-backed THP split test done\n"); + ksft_test_result_pass("File-backed THP split to order %d test done\n", order); return; close_file: @@ -517,7 +517,7 @@ int main(int argc, char **argv) if (argc > 1) optional_xfs_path = argv[1]; - ksft_set_plan(1+8+2+9); + ksft_set_plan(1+8+1+9+9); pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; @@ -534,7 +534,8 @@ int main(int argc, char **argv) split_pmd_thp_to_order(i); split_pte_mapped_thp(); - split_file_backed_thp(); + for (i = 0; i < 9; i++) + split_file_backed_thp(i); created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template, &fs_loc); From 100bc3b877fca43e46485883eaf179aec7a11c86 Mon Sep 17 00:00:00 2001 From: Shiyang Ruan Date: Wed, 8 Jan 2025 09:52:23 +0800 Subject: [PATCH 018/431] drivers/base/memory: simplify outputting of valid_zones_show() No need to specify position at the first writing to the buf because the @len is always 0 at this time. Use sysfs_emit() instead to simplify it. Also avoid setting/checking default_zone with a conditional operator. Link: https://lkml.kernel.org/r/20250108015223.1522887-1-ruansy.fnst@fujitsu.com Signed-off-by: Shiyang Ruan Acked-by: David Hildenbrand Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton --- drivers/base/memory.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 348c5dbbfa68..4765f2928725 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -455,7 +455,7 @@ static ssize_t valid_zones_show(struct device *dev, struct memory_group *group = mem->group; struct zone *default_zone; int nid = mem->nid; - int len = 0; + int len; /* * Check the existing zone. Make sure that we do that only on the @@ -466,22 +466,18 @@ static ssize_t valid_zones_show(struct device *dev, * If !mem->zone, the memory block spans multiple zones and * cannot get offlined. */ - default_zone = mem->zone; - if (!default_zone) - return sysfs_emit(buf, "%s\n", "none"); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); - goto out; + return sysfs_emit(buf, "%s\n", + mem->zone ? mem->zone->name : "none"); } default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, start_pfn, nr_pages); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len = sysfs_emit(buf, "%s", default_zone->name); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, default_zone); len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, default_zone); -out: len += sysfs_emit_at(buf, len, "\n"); return len; } From 8977752c8056a6a094a279004a49722da15bace3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:43 +0100 Subject: [PATCH 019/431] mm/gup: reject FOLL_SPLIT_PMD with hugetlb VMAs Patch series "mm: fixes for device-exclusive entries (hmm)", v2. Discussing the PageTail() call in make_device_exclusive_range() with Willy, I recently discovered [1] that device-exclusive handling does not properly work with THP, making the hmm-tests selftests fail if THPs are enabled on the system. Looking into more details, I found that hugetlb is not properly fenced, and I realized that something that was bugging me for longer -- how device-exclusive entries interact with mapcounts -- completely breaks migration/swapout/split/hwpoison handling of these folios while they have device-exclusive PTEs. The program below can be used to allocate 1 GiB worth of pages and making them device-exclusive on a kernel with CONFIG_TEST_HMM. Once they are device-exclusive, these folios cannot get swapped out (proc$pid/smaps_rollup will always indicate 1 GiB RSS no matter how much one forces memory reclaim), and when having a memory block onlined to ZONE_MOVABLE, trying to offline it will loop forever and complain about failed migration of a page that should be movable. # echo offline > /sys/devices/system/memory/memory136/state # echo online_movable > /sys/devices/system/memory/memory136/state # ./hmm-swap & ... wait until everything is device-exclusive # echo offline > /sys/devices/system/memory/memory136/state [ 285.193431][T14882] page: refcount:2 mapcount:0 mapping:0000000000000000 index:0x7f20671f7 pfn:0x442b6a [ 285.196618][T14882] memcg:ffff888179298000 [ 285.198085][T14882] anon flags: 0x5fff0000002091c(referenced|uptodate| dirty|active|owner_2|swapbacked|node=1|zone=3|lastcpupid=0x7ff) [ 285.201734][T14882] raw: ... [ 285.204464][T14882] raw: ... [ 285.207196][T14882] page dumped because: migration failure [ 285.209072][T14882] page_owner tracks the page as allocated [ 285.210915][T14882] page last allocated via order 0, migratetype Movable, gfp_mask 0x140dca(GFP_HIGHUSER_MOVABLE|__GFP_COMP|__GFP_ZERO), id 14926, tgid 14926 (hmm-swap), ts 254506295376, free_ts 227402023774 [ 285.216765][T14882] post_alloc_hook+0x197/0x1b0 [ 285.218874][T14882] get_page_from_freelist+0x76e/0x3280 [ 285.220864][T14882] __alloc_frozen_pages_noprof+0x38e/0x2740 [ 285.223302][T14882] alloc_pages_mpol+0x1fc/0x540 [ 285.225130][T14882] folio_alloc_mpol_noprof+0x36/0x340 [ 285.227222][T14882] vma_alloc_folio_noprof+0xee/0x1a0 [ 285.229074][T14882] __handle_mm_fault+0x2b38/0x56a0 [ 285.230822][T14882] handle_mm_fault+0x368/0x9f0 ... This series fixes all issues I found so far. There is no easy way to fix without a bigger rework/cleanup. I have a bunch of cleanups on top (some previous sent, some the result of the discussion in v1) that I will send out separately once this landed and I get to it. I wish we could just use some special present PROT_NONE PTEs instead of these (non-present, non-none) fake-swap entries; but that just results in the same problem we keep having (lack of spare PTE bits), and staring at other similar fake-swap entries, that ship has sailed. With this series, make_device_exclusive() doesn't actually belong into mm/rmap.c anymore, but I'll leave moving that for another day. I only tested this series with the hmm-tests selftests due to lack of HW, so I'd appreciate some testing, especially if the interaction between two GPUs wanting a device-exclusive entry works as expected. #include #include #include #include #include #include #include #include #include #include #define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) struct hmm_dmirror_cmd { __u64 addr; __u64 ptr; __u64 npages; __u64 cpages; __u64 faults; }; const size_t size = 1 * 1024 * 1024 * 1024ul; const size_t chunk_size = 2 * 1024 * 1024ul; int main(void) { struct hmm_dmirror_cmd cmd; size_t cur_size; int fd, ret; char *addr, *mirror; fd = open("/dev/hmm_dmirror1", O_RDWR, 0); if (fd < 0) { perror("open failed\n"); exit(1); } addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { perror("mmap failed\n"); exit(1); } madvise(addr, size, MADV_NOHUGEPAGE); memset(addr, 1, size); mirror = malloc(chunk_size); for (cur_size = 0; cur_size < size; cur_size += chunk_size) { cmd.addr = (uintptr_t)addr + cur_size; cmd.ptr = (uintptr_t)mirror; cmd.npages = chunk_size / getpagesize(); ret = ioctl(fd, HMM_DMIRROR_EXCLUSIVE, &cmd); if (ret) { perror("ioctl failed\n"); exit(1); } } pause(); return 0; } [1] https://lkml.kernel.org/r/25e02685-4f1d-47fa-be5b-01ff85bb0ce2@redhat.com This patch (of 17): We only have two FOLL_SPLIT_PMD users. While uprobe refuses hugetlb early, make_device_exclusive_range() can end up getting called on hugetlb VMAs. Right now, this means that with a PMD-sized hugetlb page, we can end up calling split_huge_pmd(), because pmd_trans_huge() also succeeds with hugetlb PMDs. For example, using a modified hmm-test selftest one can trigger: [ 207.017134][T14945] ------------[ cut here ]------------ [ 207.018614][T14945] kernel BUG at mm/page_table_check.c:87! [ 207.019716][T14945] Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 207.021072][T14945] CPU: 3 UID: 0 PID: ... [ 207.023036][T14945] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-2.fc40 04/01/2014 [ 207.024834][T14945] RIP: 0010:page_table_check_clear.part.0+0x488/0x510 [ 207.026128][T14945] Code: ... [ 207.029965][T14945] RSP: 0018:ffffc9000cb8f348 EFLAGS: 00010293 [ 207.031139][T14945] RAX: 0000000000000000 RBX: 00000000ffffffff RCX: ffffffff8249a0cd [ 207.032649][T14945] RDX: ffff88811e883c80 RSI: ffffffff8249a357 RDI: ffff88811e883c80 [ 207.034183][T14945] RBP: ffff888105c0a050 R08: 0000000000000005 R09: 0000000000000000 [ 207.035688][T14945] R10: 00000000ffffffff R11: 0000000000000003 R12: 0000000000000001 [ 207.037203][T14945] R13: 0000000000000200 R14: 0000000000000001 R15: dffffc0000000000 [ 207.038711][T14945] FS: 00007f2783275740(0000) GS:ffff8881f4980000(0000) knlGS:0000000000000000 [ 207.040407][T14945] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 207.041660][T14945] CR2: 00007f2782c00000 CR3: 0000000132356000 CR4: 0000000000750ef0 [ 207.043196][T14945] PKRU: 55555554 [ 207.043880][T14945] Call Trace: [ 207.044506][T14945] [ 207.045086][T14945] ? __die+0x51/0x92 [ 207.045864][T14945] ? die+0x29/0x50 [ 207.046596][T14945] ? do_trap+0x250/0x320 [ 207.047430][T14945] ? do_error_trap+0xe7/0x220 [ 207.048346][T14945] ? page_table_check_clear.part.0+0x488/0x510 [ 207.049535][T14945] ? handle_invalid_op+0x34/0x40 [ 207.050494][T14945] ? page_table_check_clear.part.0+0x488/0x510 [ 207.051681][T14945] ? exc_invalid_op+0x2e/0x50 [ 207.052589][T14945] ? asm_exc_invalid_op+0x1a/0x20 [ 207.053596][T14945] ? page_table_check_clear.part.0+0x1fd/0x510 [ 207.054790][T14945] ? page_table_check_clear.part.0+0x487/0x510 [ 207.055993][T14945] ? page_table_check_clear.part.0+0x488/0x510 [ 207.057195][T14945] ? page_table_check_clear.part.0+0x487/0x510 [ 207.058384][T14945] __page_table_check_pmd_clear+0x34b/0x5a0 [ 207.059524][T14945] ? __pfx___page_table_check_pmd_clear+0x10/0x10 [ 207.060775][T14945] ? __pfx___mutex_unlock_slowpath+0x10/0x10 [ 207.061940][T14945] ? __pfx___lock_acquire+0x10/0x10 [ 207.062967][T14945] pmdp_huge_clear_flush+0x279/0x360 [ 207.064024][T14945] split_huge_pmd_locked+0x82b/0x3750 ... Before commit 9cb28da54643 ("mm/gup: handle hugetlb in the generic follow_page_mask code"), we would have ignored the flag; instead, let's simply refuse the combination completely in check_vma_flags(): the caller is likely not prepared to handle any hugetlb folios. We'll teach make_device_exclusive_range() separately to ignore any hugetlb folios as a future-proof safety net. Link: https://lkml.kernel.org/r/20250210193801.781278-1-david@redhat.com Link: https://lkml.kernel.org/r/20250210193801.781278-2-david@redhat.com Fixes: 9cb28da54643 ("mm/gup: handle hugetlb in the generic follow_page_mask code") Signed-off-by: David Hildenbrand Reviewed-by: John Hubbard Reviewed-by: Alistair Popple Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Vlastimil Babka Cc: Yanteng Si Cc: Simona Vetter Cc: Barry Song Cc: Signed-off-by: Andrew Morton --- mm/gup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index 3883b307780e..61e751baf862 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1283,6 +1283,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) return -EOPNOTSUPP; + if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) + return -EOPNOTSUPP; + if (vma_is_secretmem(vma)) return -EFAULT; From bc3fe6805cf09a25a086573a17d40e525208c5d8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:44 +0100 Subject: [PATCH 020/431] mm/rmap: reject hugetlb folios in folio_make_device_exclusive() Even though FOLL_SPLIT_PMD on hugetlb now always fails with -EOPNOTSUPP, let's add a safety net in case FOLL_SPLIT_PMD usage would ever be reworked. In particular, before commit 9cb28da54643 ("mm/gup: handle hugetlb in the generic follow_page_mask code"), GUP(FOLL_SPLIT_PMD) would just have returned a page. In particular, hugetlb folios that are not PMD-sized would never have been prone to FOLL_SPLIT_PMD. hugetlb folios can be anonymous, and page_make_device_exclusive_one() is not really prepared for handling them at all. So let's spell that out. Link: https://lkml.kernel.org/r/20250210193801.781278-3-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Reviewed-by: Alistair Popple Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Cc: Signed-off-by: Andrew Morton --- mm/rmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index c6c4d4ea29a7..17fbfa61f7ef 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2499,7 +2499,7 @@ static bool folio_make_device_exclusive(struct folio *folio, * Restrict to anonymous folios for now to avoid potential writeback * issues. */ - if (!folio_test_anon(folio)) + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) return false; rmap_walk(folio, &rwc); From 599b684a7854e51a51358fe59bbdfea281f0b461 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:45 +0100 Subject: [PATCH 021/431] mm/rmap: convert make_device_exclusive_range() to make_device_exclusive() The single "real" user in the tree of make_device_exclusive_range() always requests making only a single address exclusive. The current implementation is hard to fix for properly supporting anonymous THP / large folios and for avoiding messing with rmap walks in weird ways. So let's always process a single address/page and return folio + page to minimize page -> folio lookups. This is a preparation for further changes. Reject any non-anonymous or hugetlb folios early, directly after GUP. While at it, extend the documentation of make_device_exclusive() to clarify some things. Link: https://lkml.kernel.org/r/20250210193801.781278-4-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Simona Vetter Reviewed-by: Alistair Popple Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- Documentation/mm/hmm.rst | 2 +- Documentation/translations/zh_CN/mm/hmm.rst | 2 +- drivers/gpu/drm/nouveau/nouveau_svm.c | 5 +- include/linux/mmu_notifier.h | 2 +- include/linux/rmap.h | 5 +- lib/test_hmm.c | 41 +++----- mm/rmap.c | 103 ++++++++++++-------- 7 files changed, 83 insertions(+), 77 deletions(-) diff --git a/Documentation/mm/hmm.rst b/Documentation/mm/hmm.rst index f6d53c37a2ca..7d61b7a8b65b 100644 --- a/Documentation/mm/hmm.rst +++ b/Documentation/mm/hmm.rst @@ -400,7 +400,7 @@ Exclusive access memory Some devices have features such as atomic PTE bits that can be used to implement atomic access to system memory. To support atomic operations to a shared virtual memory page such a device needs access to that page which is exclusive of any -userspace access from the CPU. The ``make_device_exclusive_range()`` function +userspace access from the CPU. The ``make_device_exclusive()`` function can be used to make a memory range inaccessible from userspace. This replaces all mappings for pages in the given range with special swap diff --git a/Documentation/translations/zh_CN/mm/hmm.rst b/Documentation/translations/zh_CN/mm/hmm.rst index 0669f947d0bc..22c210f4e94f 100644 --- a/Documentation/translations/zh_CN/mm/hmm.rst +++ b/Documentation/translations/zh_CN/mm/hmm.rst @@ -326,7 +326,7 @@ devm_memunmap_pages() 和 devm_release_mem_region() 当资源可以绑定到 ``s 一些设备具有诸如原子PTE位的功能,可以用来实现对系统内存的原子访问。为了支持对一 个共享的虚拟内存页的原子操作,这样的设备需要对该页的访问是排他的,而不是来自CPU -的任何用户空间访问。 ``make_device_exclusive_range()`` 函数可以用来使一 +的任何用户空间访问。 ``make_device_exclusive()`` 函数可以用来使一 个内存范围不能从用户空间访问。 这将用特殊的交换条目替换给定范围内的所有页的映射。任何试图访问交换条目的行为都会 diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8ea98f06d39a..aa22d8458d22 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -610,10 +610,9 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, notifier_seq = mmu_interval_read_begin(¬ifier->notifier); mmap_read_lock(mm); - ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE, - &page, drm->dev); + page = make_device_exclusive(mm, start, drm->dev, &folio); mmap_read_unlock(mm); - if (ret <= 0 || !page) { + if (IS_ERR(page)) { ret = -EINVAL; goto out; } diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index e2dd57ca368b..d4e714661826 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -46,7 +46,7 @@ struct mmu_interval_notifier; * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no * longer have exclusive access to the page. When sent during creation of an * exclusive range the owner will be initialised to the value provided by the - * caller of make_device_exclusive_range(), otherwise the owner will be NULL. + * caller of make_device_exclusive(), otherwise the owner will be NULL. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 683a04088f3f..86425d42c1a9 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -663,9 +663,8 @@ int folio_referenced(struct folio *, int is_locked, void try_to_migrate(struct folio *folio, enum ttu_flags flags); void try_to_unmap(struct folio *, enum ttu_flags flags); -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, - unsigned long end, struct page **pages, - void *arg); +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, + void *owner, struct folio **foliop); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 056f2e411d7b..e4afca8d1880 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -780,10 +780,8 @@ static int dmirror_exclusive(struct dmirror *dmirror, unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; - struct page *pages[64]; struct dmirror_bounce bounce; - unsigned long next; - int ret; + int ret = 0; start = cmd->addr; end = start + size; @@ -795,36 +793,27 @@ static int dmirror_exclusive(struct dmirror *dmirror, return -EINVAL; mmap_read_lock(mm); - for (addr = start; addr < end; addr = next) { - unsigned long mapped = 0; - int i; + for (addr = start; !ret && addr < end; addr += PAGE_SIZE) { + struct folio *folio; + struct page *page; - next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)); - - ret = make_device_exclusive_range(mm, addr, next, pages, NULL); - /* - * Do dmirror_atomic_map() iff all pages are marked for - * exclusive access to avoid accessing uninitialized - * fields of pages. - */ - if (ret == (next - addr) >> PAGE_SHIFT) - mapped = dmirror_atomic_map(addr, next, pages, dmirror); - for (i = 0; i < ret; i++) { - if (pages[i]) { - unlock_page(pages[i]); - put_page(pages[i]); - } + page = make_device_exclusive(mm, addr, NULL, &folio); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + break; } - if (addr + (mapped << PAGE_SHIFT) < next) { - mmap_read_unlock(mm); - mmput(mm); - return -EBUSY; - } + ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror); + ret = ret == 1 ? 0 : -EBUSY; + folio_unlock(folio); + folio_put(folio); } mmap_read_unlock(mm); mmput(mm); + if (ret) + return ret; + /* Return the migrated data for verification. */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) diff --git a/mm/rmap.c b/mm/rmap.c index 17fbfa61f7ef..7ccf850565d3 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2495,70 +2495,89 @@ static bool folio_make_device_exclusive(struct folio *folio, .arg = &args, }; - /* - * Restrict to anonymous folios for now to avoid potential writeback - * issues. - */ - if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) - return false; - rmap_walk(folio, &rwc); return args.valid && !folio_mapcount(folio); } /** - * make_device_exclusive_range() - Mark a range for exclusive use by a device + * make_device_exclusive() - Mark a page for exclusive use by a device * @mm: mm_struct of associated target process - * @start: start of the region to mark for exclusive device access - * @end: end address of region - * @pages: returns the pages which were successfully marked for exclusive access + * @addr: the virtual address to mark for exclusive device access * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering + * @foliop: folio pointer will be stored here on success. * - * Returns: number of pages found in the range by GUP. A page is marked for - * exclusive access only if the page pointer is non-NULL. + * This function looks up the page mapped at the given address, grabs a + * folio reference, locks the folio and replaces the PTE with special + * device-exclusive PFN swap entry, preventing access through the process + * page tables. The function will return with the folio locked and referenced. * - * This function finds ptes mapping page(s) to the given address range, locks - * them and replaces mappings with special swap entries preventing userspace CPU - * access. On fault these entries are replaced with the original mapping after - * calling MMU notifiers. + * On fault, the device-exclusive entries are replaced with the original PTE + * under folio lock, after calling MMU notifiers. + * + * Only anonymous non-hugetlb folios are supported and the VMA must have + * write permissions such that we can fault in the anonymous page writable + * in order to mark it exclusive. The caller must hold the mmap_lock in read + * mode. * * A driver using this to program access from a device must use a mmu notifier * critical section to hold a device specific lock during programming. Once - * programming is complete it should drop the page lock and reference after + * programming is complete it should drop the folio lock and reference after * which point CPU access to the page will revoke the exclusive access. + * + * Notes: + * #. This function always operates on individual PTEs mapping individual + * pages. PMD-sized THPs are first remapped to be mapped by PTEs before + * the conversion happens on a single PTE corresponding to @addr. + * #. While concurrent access through the process page tables is prevented, + * concurrent access through other page references (e.g., earlier GUP + * invocation) is not handled and not supported. + * #. device-exclusive entries are considered "clean" and "old" by core-mm. + * Device drivers must update the folio state when informed by MMU + * notifiers. + * + * Returns: pointer to mapped page on success, otherwise a negative error. */ -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, - unsigned long end, struct page **pages, - void *owner) +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, + void *owner, struct folio **foliop) { - long npages = (end - start) >> PAGE_SHIFT; - long i; + struct folio *folio; + struct page *page; + long npages; - npages = get_user_pages_remote(mm, start, npages, + mmap_assert_locked(mm); + + /* + * Fault in the page writable and try to lock it; note that if the + * address would already be marked for exclusive use by a device, + * the GUP call would undo that first by triggering a fault. + */ + npages = get_user_pages_remote(mm, addr, 1, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, - pages, NULL); - if (npages < 0) - return npages; + &page, NULL); + if (npages != 1) + return ERR_PTR(npages); + folio = page_folio(page); - for (i = 0; i < npages; i++, start += PAGE_SIZE) { - struct folio *folio = page_folio(pages[i]); - if (PageTail(pages[i]) || !folio_trylock(folio)) { - folio_put(folio); - pages[i] = NULL; - continue; - } - - if (!folio_make_device_exclusive(folio, mm, start, owner)) { - folio_unlock(folio); - folio_put(folio); - pages[i] = NULL; - } + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { + folio_put(folio); + return ERR_PTR(-EOPNOTSUPP); } - return npages; + if (!folio_trylock(folio)) { + folio_put(folio); + return ERR_PTR(-EBUSY); + } + + if (!folio_make_device_exclusive(folio, mm, addr, owner)) { + folio_unlock(folio); + folio_put(folio); + return ERR_PTR(-EBUSY); + } + *foliop = folio; + return page; } -EXPORT_SYMBOL_GPL(make_device_exclusive_range); +EXPORT_SYMBOL_GPL(make_device_exclusive); #endif void __put_anon_vma(struct anon_vma *anon_vma) From 438354724f69b5a717b6cd366db35b7034a8d2f9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:46 +0100 Subject: [PATCH 022/431] mm/rmap: implement make_device_exclusive() using folio_walk instead of rmap walk We require a writable PTE and only support anonymous folio: we can only have exactly one PTE pointing at that page, which we can just lookup using a folio walk, avoiding the rmap walk and the anon VMA lock. So let's stop doing an rmap walk and perform a folio walk instead, so we can easily just modify a single PTE and avoid relying on rmap/mapcounts. We now effectively work on a single PTE instead of multiple PTEs of a large folio, allowing for conversion of individual PTEs from non-exclusive to device-exclusive -- note that the opposite direction always works on single PTEs: restore_exclusive_pte(). With this change, device-exclusive handling is fully compatible with THPs / large folios. We still require PMD-sized THPs to get PTE-mapped, and supporting PMD-mapped THP (without the PTE-remapping) is a different endeavour that might not be worth it at this point: it might even have negative side-effects [1]. This gets rid of the "folio_mapcount()" usage and let's us fix ordinary rmap walks (migration/swapout) next. Spell out that messing with the mapcount is wrong and must be fixed. [1] https://lkml.kernel.org/r/Z5tI-cOSyzdLjoe_@phenom.ffwll.local Link: https://lkml.kernel.org/r/20250210193801.781278-5-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 200 ++++++++++++++++++------------------------------------ 1 file changed, 67 insertions(+), 133 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 7ccf850565d3..0cd2a2d3de00 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2375,131 +2375,6 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) } #ifdef CONFIG_DEVICE_PRIVATE -struct make_exclusive_args { - struct mm_struct *mm; - unsigned long address; - void *owner; - bool valid; -}; - -static bool page_make_device_exclusive_one(struct folio *folio, - struct vm_area_struct *vma, unsigned long address, void *priv) -{ - struct mm_struct *mm = vma->vm_mm; - DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); - struct make_exclusive_args *args = priv; - pte_t pteval; - struct page *subpage; - bool ret = true; - struct mmu_notifier_range range; - swp_entry_t entry; - pte_t swp_pte; - pte_t ptent; - - mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, - vma->vm_mm, address, min(vma->vm_end, - address + folio_size(folio)), - args->owner); - mmu_notifier_invalidate_range_start(&range); - - while (page_vma_mapped_walk(&pvmw)) { - /* Unexpected PMD-mapped THP? */ - VM_BUG_ON_FOLIO(!pvmw.pte, folio); - - ptent = ptep_get(pvmw.pte); - if (!pte_present(ptent)) { - ret = false; - page_vma_mapped_walk_done(&pvmw); - break; - } - - subpage = folio_page(folio, - pte_pfn(ptent) - folio_pfn(folio)); - address = pvmw.address; - - /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(ptent)); - pteval = ptep_clear_flush(vma, address, pvmw.pte); - - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); - - /* - * Check that our target page is still mapped at the expected - * address. - */ - if (args->mm == mm && args->address == address && - pte_write(pteval)) - args->valid = true; - - /* - * Store the pfn of the page in a special migration - * pte. do_swap_page() will wait until the migration - * pte is removed and then restart fault handling. - */ - if (pte_write(pteval)) - entry = make_writable_device_exclusive_entry( - page_to_pfn(subpage)); - else - entry = make_readable_device_exclusive_entry( - page_to_pfn(subpage)); - swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); - - set_pte_at(mm, address, pvmw.pte, swp_pte); - - /* - * There is a reference on the page for the swap entry which has - * been removed, so shouldn't take another. - */ - folio_remove_rmap_pte(folio, subpage, vma); - } - - mmu_notifier_invalidate_range_end(&range); - - return ret; -} - -/** - * folio_make_device_exclusive - Mark the folio exclusively owned by a device. - * @folio: The folio to replace page table entries for. - * @mm: The mm_struct where the folio is expected to be mapped. - * @address: Address where the folio is expected to be mapped. - * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks - * - * Tries to remove all the page table entries which are mapping this - * folio and replace them with special device exclusive swap entries to - * grant a device exclusive access to the folio. - * - * Context: Caller must hold the folio lock. - * Return: false if the page is still mapped, or if it could not be unmapped - * from the expected address. Otherwise returns true (success). - */ -static bool folio_make_device_exclusive(struct folio *folio, - struct mm_struct *mm, unsigned long address, void *owner) -{ - struct make_exclusive_args args = { - .mm = mm, - .address = address, - .owner = owner, - .valid = false, - }; - struct rmap_walk_control rwc = { - .rmap_one = page_make_device_exclusive_one, - .done = folio_not_mapped, - .anon_lock = folio_lock_anon_vma_read, - .arg = &args, - }; - - rmap_walk(folio, &rwc); - - return args.valid && !folio_mapcount(folio); -} - /** * make_device_exclusive() - Mark a page for exclusive use by a device * @mm: mm_struct of associated target process @@ -2541,22 +2416,31 @@ static bool folio_make_device_exclusive(struct folio *folio, struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, void *owner, struct folio **foliop) { - struct folio *folio; + struct mmu_notifier_range range; + struct folio *folio, *fw_folio; + struct vm_area_struct *vma; + struct folio_walk fw; struct page *page; - long npages; + swp_entry_t entry; + pte_t swp_pte; mmap_assert_locked(mm); + addr = PAGE_ALIGN_DOWN(addr); /* * Fault in the page writable and try to lock it; note that if the * address would already be marked for exclusive use by a device, * the GUP call would undo that first by triggering a fault. + * + * If any other device would already map this page exclusively, the + * fault will trigger a conversion to an ordinary + * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. */ - npages = get_user_pages_remote(mm, addr, 1, - FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, - &page, NULL); - if (npages != 1) - return ERR_PTR(npages); + page = get_user_page_vma_remote(mm, addr, + FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, + &vma); + if (IS_ERR(page)) + return page; folio = page_folio(page); if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { @@ -2569,11 +2453,61 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, return ERR_PTR(-EBUSY); } - if (!folio_make_device_exclusive(folio, mm, addr, owner)) { + /* + * Inform secondary MMUs that we are going to convert this PTE to + * device-exclusive, such that they unmap it now. Note that the + * caller must filter this event out to prevent livelocks. + */ + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, + mm, addr, addr + PAGE_SIZE, owner); + mmu_notifier_invalidate_range_start(&range); + + /* + * Let's do a second walk and make sure we still find the same page + * mapped writable. Note that any page of an anonymous folio can + * only be mapped writable using exactly one PTE ("exclusive"), so + * there cannot be other mappings. + */ + fw_folio = folio_walk_start(&fw, vma, addr, 0); + if (fw_folio != folio || fw.page != page || + fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { + if (fw_folio) + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); folio_unlock(folio); folio_put(folio); return ERR_PTR(-EBUSY); } + + /* Nuke the page table entry so we get the uptodate dirty bit. */ + flush_cache_page(vma, addr, page_to_pfn(page)); + fw.pte = ptep_clear_flush(vma, addr, fw.ptep); + + /* Set the dirty flag on the folio now the PTE is gone. */ + if (pte_dirty(fw.pte)) + folio_mark_dirty(folio); + + /* + * Store the pfn of the page in a special device-exclusive PFN swap PTE. + * do_swap_page() will trigger the conversion back while holding the + * folio lock. + */ + entry = make_writable_device_exclusive_entry(page_to_pfn(page)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(fw.pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + /* The pte is writable, uffd-wp does not apply. */ + set_pte_at(mm, addr, fw.ptep, swp_pte); + + /* + * TODO: The device-exclusive PFN swap PTE holds a folio reference but + * does not count as a mapping (mapcount), which is wrong and must be + * fixed, otherwise RMAP walks don't behave as expected. + */ + folio_remove_rmap_pte(folio, page, vma); + + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); *foliop = folio; return page; } From 9a914592140ec548ef72bfef7c8a4e036a1ac492 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:47 +0100 Subject: [PATCH 023/431] mm/memory: detect writability in restore_exclusive_pte() through can_change_pte_writable() Let's do it just like mprotect write-upgrade or during NUMA-hinting faults on PROT_NONE PTEs: detect if the PTE can be writable by using can_change_pte_writable(). Set the PTE only dirty if the folio is dirty: we might not necessarily have a write access, and setting the PTE writable doesn't require setting the PTE dirty. From a CPU perspective, these entries are clean. So only set the PTE dirty if the folios is dirty. With this change in place, there is no need to have separate readable and writable device-exclusive entry types, and we'll merge them next separately. Note that, during fork(), we first convert the device-exclusive entries back to ordinary PTEs, and we only ever allow conversion of writable PTEs to device-exclusive -- only mprotect can currently change them to readable-device-exclusive. Consequently, we always expect PageAnonExclusive(page)==true and can_change_pte_writable()==true, unless we are dealing with soft-dirty tracking or uffd-wp. But reusing can_change_pte_writable() for now is cleaner. Link: https://lkml.kernel.org/r/20250210193801.781278-6-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/memory.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f77c10fd26b9..568b3afde8d2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -723,18 +723,21 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, struct folio *folio = page_folio(page); pte_t orig_pte; pte_t pte; - swp_entry_t entry; orig_pte = ptep_get(ptep); pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(orig_pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); - else if (is_writable_device_exclusive_entry(entry)) - pte = maybe_mkwrite(pte_mkdirty(pte), vma); + + if ((vma->vm_flags & VM_WRITE) && + can_change_pte_writable(vma, address, pte)) { + if (folio_test_dirty(folio)) + pte = pte_mkdirty(pte); + pte = pte_mkwrite(pte, vma); + } VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && PageAnonExclusive(page)), folio); From c25465eb7630ffcadaab29c1010071512f8c9621 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:48 +0100 Subject: [PATCH 024/431] mm: use single SWP_DEVICE_EXCLUSIVE entry type There is no need for the distinction anymore; let's merge the readable and writable device-exclusive entries into a single device-exclusive entry type. Link: https://lkml.kernel.org/r/20250210193801.781278-7-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Simona Vetter Reviewed-by: Alistair Popple Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- include/linux/swap.h | 7 +++---- include/linux/swapops.h | 27 ++++----------------------- mm/mprotect.c | 8 -------- mm/page_table_check.c | 5 ++--- mm/rmap.c | 2 +- 5 files changed, 10 insertions(+), 39 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 91b30701274e..9a48e79a0a52 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -74,14 +74,13 @@ static inline int current_is_kswapd(void) * to a special SWP_DEVICE_{READ|WRITE} entry. * * When a page is mapped by the device for exclusive access we set the CPU page - * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. + * table entries to a special SWP_DEVICE_EXCLUSIVE entry. */ #ifdef CONFIG_DEVICE_PRIVATE -#define SWP_DEVICE_NUM 4 +#define SWP_DEVICE_NUM 3 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) -#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) -#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) +#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) #else #define SWP_DEVICE_NUM 0 #endif diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 96f26e29fefe..64ea151a7ae3 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -186,26 +186,16 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry) return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); } -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) +static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { - return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); + return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); } static inline bool is_device_exclusive_entry(swp_entry_t entry) { - return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || - swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; + return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; } -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); -} #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { @@ -227,12 +217,7 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry) return false; } -static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) -{ - return swp_entry(0, 0); -} - -static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) +static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(0, 0); } @@ -242,10 +227,6 @@ static inline bool is_device_exclusive_entry(swp_entry_t entry) return false; } -static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION diff --git a/mm/mprotect.c b/mm/mprotect.c index 516b1d847e2c..9cb6ab7c4048 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -225,14 +225,6 @@ static long change_pte_range(struct mmu_gather *tlb, newpte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(oldpte)) newpte = pte_swp_mkuffd_wp(newpte); - } else if (is_writable_device_exclusive_entry(entry)) { - entry = make_readable_device_exclusive_entry( - swp_offset(entry)); - newpte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(oldpte)) - newpte = pte_swp_mksoft_dirty(newpte); - if (pte_swp_uffd_wp(oldpte)) - newpte = pte_swp_mkuffd_wp(newpte); } else if (is_pte_marker_entry(entry)) { /* * Ignore error swap entries unconditionally, diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 509c6ef8de40..c2b3600429a0 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -196,9 +196,8 @@ EXPORT_SYMBOL(__page_table_check_pud_clear); /* Whether the swap entry cached writable information */ static inline bool swap_cached_writable(swp_entry_t entry) { - return is_writable_device_exclusive_entry(entry) || - is_writable_device_private_entry(entry) || - is_writable_migration_entry(entry); + return is_writable_device_private_entry(entry) || + is_writable_migration_entry(entry); } static inline void page_table_check_pte_flags(pte_t pte) diff --git a/mm/rmap.c b/mm/rmap.c index 0cd2a2d3de00..1129ed132af9 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2492,7 +2492,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, * do_swap_page() will trigger the conversion back while holding the * folio lock. */ - entry = make_writable_device_exclusive_entry(page_to_pfn(page)); + entry = make_device_exclusive_entry(page_to_pfn(page)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(fw.pte)) swp_pte = pte_swp_mksoft_dirty(swp_pte); From 7b2e497a42cdfc52da0df2510ba7941142987922 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:49 +0100 Subject: [PATCH 025/431] mm/page_vma_mapped: device-exclusive entries are not migration entries It's unclear why they would be considered migration entries; they are not. Likely we'll never really trigger that case in practice, because migration (including folio split) of a folio that has device-exclusive entries is never started, as we would detect "additional references": device-exclusive entries adjust the mapcount, but not the refcount. Link: https://lkml.kernel.org/r/20250210193801.781278-8-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Reviewed-by: Alistair Popple Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 81839a9e74f1..32679be22d30 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -111,8 +111,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) return false; entry = pte_to_swp_entry(ptent); - if (!is_migration_entry(entry) && - !is_device_exclusive_entry(entry)) + if (!is_migration_entry(entry)) return false; pfn = swp_offset_pfn(entry); From 096cbb80ab3fd85a9035ec17a1312c2a7db8bc8c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:50 +0100 Subject: [PATCH 026/431] kernel/events/uprobes: handle device-exclusive entries correctly in __replace_page() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). __replace_page() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, because GUP would never have returned such folios (conversion to device-private happens by page migration, not in-place conversion of the PTE). There is a race between GUP and us locking the folio to look it up using page_vma_mapped_walk(), so this is likely a fix (unless something else could prevent that race, but it doesn't look like). pte_pfn() on something that is not a present pte could give use garbage, and we'd wrongly mess up the mapcount because it was already adjusted by calling folio_remove_rmap_pte() when making the entry device-exclusive. Link: https://lkml.kernel.org/r/20250210193801.781278-9-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- kernel/events/uprobes.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index b4ca8898fe17..eac24f39c2c2 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -173,6 +173,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; + pte_t pte; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); @@ -192,6 +193,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); + pte = ptep_get(pvmw.pte); + + /* + * Handle PFN swap PTES, such as device-exclusive ones, that actually + * map pages: simply trigger GUP again to fix it up. + */ + if (unlikely(!pte_present(pte))) { + page_vma_mapped_walk_done(&pvmw); + goto unlock; + } if (new_page) { folio_get(new_folio); @@ -206,7 +217,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); + flush_cache_page(vma, addr, pte_pfn(pte)); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at(mm, addr, pvmw.pte, From 789cfc66992ed90e498a0979edcbf458c799c938 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:51 +0100 Subject: [PATCH 027/431] mm/ksm: handle device-exclusive entries correctly in write_protect_page() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). write_protect_page() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, because GUP would never have returned such folios (conversion to device-private happens by page migration, not in-place conversion of the PTE). There is a race between performing the folio_walk (which fails on non-present PTEs) and locking the folio to look it up using page_vma_mapped_walk() again, so this is likely a fix (unless something else could prevent that race, but it doesn't look like). In the future it could be handled if ever required, for now just give up and ignore them like folio_walk would. Link: https://lkml.kernel.org/r/20250210193801.781278-10-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/ksm.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mm/ksm.c b/mm/ksm.c index 8be2b144fefd..8583fb91ef13 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1270,8 +1270,15 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; - anon_exclusive = PageAnonExclusive(&folio->page); entry = ptep_get(pvmw.pte); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that actually + * map pages: give up just like the next folio_walk would. + */ + if (unlikely(!pte_present(entry))) + goto out_unlock; + + anon_exclusive = PageAnonExclusive(&folio->page); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { swapped = folio_test_swapcache(folio); From 65529295607f1b48cdcf97e7dd569a6ca52cd8ac Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:52 +0100 Subject: [PATCH 028/431] mm/rmap: handle device-exclusive entries correctly in try_to_unmap_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). try_to_unmap_one() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as we expect ZONE_DEVICE pages so far only in migration code when it comes to the RMAP. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Further note that try_to_unmap() calls MMU notifiers and holds the folio lock, so any device-exclusive users should be properly prepared for a device-exclusive PTE to "vanish". Link: https://lkml.kernel.org/r/20250210193801.781278-11-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 52 +++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 1129ed132af9..47142a656ae5 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1648,9 +1648,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, ret = true; pte_t pteval; struct page *subpage; - bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; @@ -1722,7 +1722,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - pfn = pte_pfn(ptep_get(pvmw.pte)); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); + } else { + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + } + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && @@ -1778,7 +1789,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); - } else { + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else if (likely(pte_present(pteval))) { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { @@ -1796,6 +1809,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else { + pte_clear(mm, address, pvmw.pte); } /* @@ -1805,10 +1822,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); - /* Update high watermark before we lower rss */ update_hiwater_rss(mm); @@ -1822,8 +1835,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } - - } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan @@ -1902,6 +1915,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } + + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); @@ -1926,10 +1945,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (likely(pte_present(pteval))) { + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* From bf983108be0ef6cc8e96ccc0458f926dc96e6ba2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:53 +0100 Subject: [PATCH 029/431] mm/rmap: handle device-exclusive entries correctly in try_to_migrate_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). try_to_migrate_one() is not prepared for that, so teach it about these PFN swap PTEs. We already handle device-private entries by specializing on the folio, so we can reshuffle that code to make it work on the PFN swap PTEs instead. Get rid of the folio_is_device_private() handling. Note that we never currently expect device-private folios with HWPoison flag set at that point, so add a warning in case that ever changes and we can figure out what the right thing to do is. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Further note that try_to_migrate() calls MMU notifiers and holds the folio lock, so any device-exclusive users should be properly prepared for a device-exclusive PTE to "vanish". Link: https://lkml.kernel.org/r/20250210193801.781278-12-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Alistair Popple Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 124 ++++++++++++++++++++++-------------------------------- 1 file changed, 51 insertions(+), 73 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 47142a656ae5..7c471c3ea64c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2039,9 +2039,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, writable, ret = true; pte_t pteval; struct page *subpage; - bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; @@ -2108,24 +2108,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - pfn = pte_pfn(ptep_get(pvmw.pte)); - - if (folio_is_zone_device(folio)) { - /* - * Our PTE is a non-present device exclusive entry and - * calculating the subpage as for the common case would - * result in an invalid pointer. - * - * Since only PAGE_SIZE pages can currently be - * migrated, just set it to page. This will need to be - * changed when hugepage migrations to device private - * memory are supported. - */ - VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); - subpage = &folio->page; + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); } else { - subpage = folio_page(folio, pfn - folio_pfn(folio)); + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); } + + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); @@ -2181,7 +2176,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); - } else { + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else if (likely(pte_present(pteval))) { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { @@ -2199,54 +2197,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else { + pte_clear(mm, address, pvmw.pte); + writable = is_writable_device_private_entry(pte_to_swp_entry(pteval)); } - /* Set the dirty flag on the folio now the pte is gone. */ - if (pte_dirty(pteval)) - folio_mark_dirty(folio); + VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && + !anon_exclusive, folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); - if (folio_is_device_private(folio)) { - unsigned long pfn = folio_pfn(folio); - swp_entry_t entry; - pte_t swp_pte; + if (PageHWPoison(subpage)) { + VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); - if (anon_exclusive) - WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, - subpage)); - - /* - * Store the pfn of the page in a special migration - * pte. do_swap_page() will wait until the migration - * pte is removed and then restart fault handling. - */ - entry = pte_to_swp_entry(pteval); - if (is_writable_device_private_entry(entry)) - entry = make_writable_migration_entry(pfn); - else if (anon_exclusive) - entry = make_readable_exclusive_migration_entry(pfn); - else - entry = make_readable_migration_entry(pfn); - swp_pte = swp_entry_to_pte(entry); - - /* - * pteval maps a zone device page and is therefore - * a swap pte. - */ - if (pte_swp_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_swp_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); - set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); - trace_set_migration_pte(pvmw.address, pte_val(swp_pte), - folio_order(folio)); - /* - * No need to invalidate here it will synchronize on - * against the special swap migration pte. - */ - } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); @@ -2256,8 +2223,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } - - } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan @@ -2273,6 +2240,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, swp_entry_t entry; pte_t swp_pte; + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, @@ -2283,8 +2255,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } - VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && - !anon_exclusive, subpage); /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { @@ -2309,7 +2279,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ - if (pte_write(pteval)) + if (writable) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) @@ -2318,15 +2288,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, else entry = make_readable_migration_entry( page_to_pfn(subpage)); - if (pte_young(pteval)) - entry = make_migration_entry_young(entry); - if (pte_dirty(pteval)) - entry = make_migration_entry_dirty(entry); - swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pteval)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (likely(pte_present(pteval))) { + if (pte_young(pteval)) + entry = make_migration_entry_young(entry); + if (pte_dirty(pteval)) + entry = make_migration_entry_dirty(entry); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + swp_pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte, hsz); From bd1f2b2ace84bac030b70d51710581b48ffdb690 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:54 +0100 Subject: [PATCH 030/431] mm/rmap: handle device-exclusive entries correctly in page_vma_mkclean_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). page_vma_mkclean_one() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as we expect ZONE_DEVICE pages so far only in migration code when it comes to the RMAP. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-13-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/rmap.c b/mm/rmap.c index 7c471c3ea64c..7b737f0f68fb 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1044,6 +1044,14 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are clean and not writable from a + * CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (!pte_present(entry)) + continue; if (!pte_dirty(entry) && !pte_write(entry)) continue; From 3faa3ebc89c1d95605cb43174240f97510ed0e52 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:55 +0100 Subject: [PATCH 031/431] mm/page_idle: handle device-exclusive entries correctly in page_idle_clear_pte_refs_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). page_idle_clear_pte_refs_one() is not prepared for that, so let's teach it what to do with these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as page_idle_get_folio() filters out non-lru folios. Should we just skip PFN swap PTEs completely? Possible, but it seems straight forward to just handle them correctly. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-14-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/page_idle.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mm/page_idle.c b/mm/page_idle.c index 947c7c7a3728..408aaf29a3ea 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -62,9 +62,14 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio, /* * For PTE-mapped THP, one sub page is referenced, * the whole THP is referenced. + * + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. */ - if (ptep_clear_young_notify(vma, addr, pvmw.pte)) - referenced = true; + if (likely(pte_present(ptep_get(pvmw.pte)))) + referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte); + referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) referenced = true; From 39628c39ba3b9efbbb4ba9df4a20254f0c137cd7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:56 +0100 Subject: [PATCH 032/431] mm/damon: handle device-exclusive entries correctly in damon_folio_young_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). damon_folio_young_one() is not prepared for that, so teach it about these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as we expect ZONE_DEVICE pages so far only in migration code when it comes to the RMAP. The impact is rather small: we'd be calling pte_young() on a non-present PTE, which is not really defined to have semantic. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-15-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Reviewed-by: SeongJae Park Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index c834aa217835..1a89920efce9 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -92,12 +92,20 @@ static bool damon_folio_young_one(struct folio *folio, { bool *accessed = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); + pte_t pte; *accessed = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(ptep_get(pvmw.pte)) || + pte = ptep_get(pvmw.pte); + + /* + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are "old" from a CPU perspective. + * The MMU notifier takes care of any device aspects. + */ + *accessed = (pte_present(pte) && pte_young(pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { From 1f3ac4c577bb993539561b47bf2fe9d1beaaca2e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:57 +0100 Subject: [PATCH 033/431] mm/damon: handle device-exclusive entries correctly in damon_folio_mkold_one() Ever since commit b756a3b5e7ea ("mm: device exclusive memory access") we can return with a device-exclusive entry from page_vma_mapped_walk(). damon_folio_mkold_one() is not prepared for that and calls damon_ptep_mkold() with PFN swap PTEs. Teach damon_ptep_mkold() to deal with these PFN swap PTEs. Note that device-private entries are so far not applicable on that path, as damon_get_folio() filters out non-lru folios. Should we just skip PFN swap PTEs completely? Possible, but it seems straight forward to just handle it correctly. Note that we could currently only run into this case with device-exclusive entries on THPs. We still adjust the mapcount on conversion to device-exclusive; this makes the rmap walk abort early for small folios, because we'll always have !folio_mapped() with a single device-exclusive entry. We'll adjust the mapcount logic once all page_vma_mapped_walk() users can properly handle device-exclusive entries. Link: https://lkml.kernel.org/r/20250210193801.781278-16-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: SeongJae Park Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d25d99cb5f2b..86a50e8fbc80 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include "ops-common.h" @@ -39,12 +41,29 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); + pte_t pteval = ptep_get(pte); + struct folio *folio; + bool young = false; + unsigned long pfn; + if (likely(pte_present(pteval))) + pfn = pte_pfn(pteval); + else + pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); + + folio = damon_get_folio(pfn); if (!folio) return; - if (ptep_clear_young_notify(vma, addr, pte)) + /* + * PFN swap PTEs, such as device-exclusive ones, that actually map pages + * are "old" from a CPU perspective. The MMU notifier takes care of any + * device aspects. + */ + if (likely(pte_present(pteval))) + young |= ptep_test_and_clear_young(vma, addr, pte); + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); + if (young) folio_set_young(folio); folio_set_idle(folio); From f495bd7e0d9bd00c9a76c49f792b532bbb0efd0a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:58 +0100 Subject: [PATCH 034/431] mm/rmap: keep mapcount untouched for device-exclusive entries Now that conversion to device-exclusive does no longer perform an rmap walk and all page_vma_mapped_walk() users were taught to properly handle device-exclusive entries, let's treat device-exclusive entries just as if they would be present, similar to how we handle device-private entries already. This fixes swapout/migration/split/hwpoison of folios with device-exclusive entries. We only had to take care of page_vma_mapped_walk() users, because these traditionally assume pte_present(). Other page table walkers already have to handle !pte_present(), and some of them might simply skip them (e.g., MADV_PAGEOUT) if they are not specialized on them. This change doesn't modify the latter. Note that while folios with device-exclusive PTEs can now get migrated, khugepaged will not collapse a THP if there is device-exclusive PTE. Doing so might also not be desired if the device frequently performs atomics to the same page. Similarly, KSM will never merge order-0 folios that are device-exclusive. Link: https://lkml.kernel.org/r/20250210193801.781278-17-david@redhat.com Fixes: b756a3b5e7ea ("mm: device exclusive memory access") Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/memory.c | 17 +---------------- mm/rmap.c | 7 ------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 568b3afde8d2..94feb51a7983 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -741,20 +741,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && PageAnonExclusive(page)), folio); - - /* - * No need to take a page reference as one was already - * created when the swap entry was made. - */ - if (folio_test_anon(folio)) - folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); - else - /* - * Currently device exclusive access only supports anonymous - * memory so the entry shouldn't point to a filebacked page. - */ - WARN_ON_ONCE(1); - set_pte_at(vma->vm_mm, address, ptep, pte); /* @@ -1626,8 +1612,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(folio)]--; - if (is_device_private_entry(entry)) - folio_remove_rmap_pte(folio, page, vma); + folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { /* Genuine swap entries, hence a private anon pages */ diff --git a/mm/rmap.c b/mm/rmap.c index 7b737f0f68fb..e2a543f639ce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2511,13 +2511,6 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, /* The pte is writable, uffd-wp does not apply. */ set_pte_at(mm, addr, fw.ptep, swp_pte); - /* - * TODO: The device-exclusive PFN swap PTE holds a folio reference but - * does not count as a mapping (mapcount), which is wrong and must be - * fixed, otherwise RMAP walks don't behave as expected. - */ - folio_remove_rmap_pte(folio, page, vma); - folio_walk_end(&fw, vma); mmu_notifier_invalidate_range_end(&range); *foliop = folio; From b8932ca8b9244839b263786c69d57ed05c0d96ec Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 10 Feb 2025 20:37:59 +0100 Subject: [PATCH 035/431] mm/rmap: avoid -EBUSY from make_device_exclusive() Failing to obtain the folio lock, for example because the folio is concurrently getting migrated or swapped out, can easily make the callers fail: for example, the hmm selftest can sometimes be observed to fail because of this. Instead of forcing the caller to retry, let's simply retry in this to-be-expected case. Similarly, avoid spurious failures simply because we raced with someone (e.g., swapout) modifying the page table such that our folio_walk fails. Simply unconditionally lock the folio, and retry GUP if our folio_walk fails. Note that the folio_walk repeatedly failing is not something we expect. Note that we might want to avoid grabbing the folio lock at some point; for now, keep that as is and only unconditionally lock the folio. With this change, the hmm selftests don't fail simply because the folio is already locked. While this fixes the selftests in some cases, it's likely not something that deserves a "Fixes:". Link: https://lkml.kernel.org/r/20250210193801.781278-18-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Alistair Popple Cc: Alex Shi Cc: Danilo Krummrich Cc: Dave Airlie Cc: Jann Horn Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Jonathan Corbet Cc: Karol Herbst Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Lyude Cc: "Masami Hiramatsu (Google)" Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: SeongJae Park Cc: Simona Vetter Cc: Vlastimil Babka Cc: Yanteng Si Cc: Barry Song Signed-off-by: Andrew Morton --- mm/rmap.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index e2a543f639ce..0f760b93fc0a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2435,6 +2435,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, struct page *page; swp_entry_t entry; pte_t swp_pte; + int ret; mmap_assert_locked(mm); addr = PAGE_ALIGN_DOWN(addr); @@ -2448,6 +2449,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, * fault will trigger a conversion to an ordinary * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. */ +retry: page = get_user_page_vma_remote(mm, addr, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, &vma); @@ -2460,9 +2462,10 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, return ERR_PTR(-EOPNOTSUPP); } - if (!folio_trylock(folio)) { + ret = folio_lock_killable(folio); + if (ret) { folio_put(folio); - return ERR_PTR(-EBUSY); + return ERR_PTR(ret); } /* @@ -2488,7 +2491,7 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, mmu_notifier_invalidate_range_end(&range); folio_unlock(folio); folio_put(folio); - return ERR_PTR(-EBUSY); + goto retry; } /* Nuke the page table entry so we get the uptodate dirty bit. */ From 350dce38eb6e221cca16940f3bd8d9947364f1ca Mon Sep 17 00:00:00 2001 From: Hao Zhang Date: Wed, 15 Jan 2025 09:58:29 +0800 Subject: [PATCH 036/431] mm/vmscan: extract calculated pressure balance as a function Extract pressure balance calculation into a function.This doesn't change current behaviour. [akpm@linux-foundation.org: 80-col wrapping] Link: https://lkml.kernel.org/r/tencent_735DB36A2306C08B8568049E4C0B99716C07@qq.com Signed-off-by: Hao Zhang Signed-off-by: Andrew Morton --- mm/vmscan.c | 68 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index fc4951d23b97..bc1826020159 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2400,6 +2400,43 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) } } +static inline void calculate_pressure_balance(struct scan_control *sc, + int swappiness, u64 *fraction, u64 *denominator) +{ + unsigned long anon_cost, file_cost, total_cost; + unsigned long ap, fp; + + /* + * Calculate the pressure balance between anon and file pages. + * + * The amount of pressure we put on each LRU is inversely + * proportional to the cost of reclaiming each list, as + * determined by the share of pages that are refaulting, times + * the relative IO cost of bringing back a swapped out + * anonymous page vs reloading a filesystem page (swappiness). + * + * Although we limit that influence to ensure no list gets + * left behind completely: at least a third of the pressure is + * applied, before swappiness. + * + * With swappiness at 100, anon and file have equal IO cost. + */ + total_cost = sc->anon_cost + sc->file_cost; + anon_cost = total_cost + sc->anon_cost; + file_cost = total_cost + sc->file_cost; + total_cost = anon_cost + file_cost; + + ap = swappiness * (total_cost + 1); + ap /= anon_cost + 1; + + fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); + fp /= file_cost + 1; + + fraction[WORKINGSET_ANON] = ap; + fraction[WORKINGSET_FILE] = fp; + *denominator = ap + fp; +} + /* * Determine how aggressively the anon and file LRU lists should be * scanned. @@ -2412,12 +2449,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, { struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); - unsigned long anon_cost, file_cost, total_cost; int swappiness = sc_swappiness(sc, memcg); u64 fraction[ANON_AND_FILE]; u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; - unsigned long ap, fp; enum lru_list lru; /* If we have no swap space, do not bother scanning anon folios. */ @@ -2466,35 +2501,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, } scan_balance = SCAN_FRACT; - /* - * Calculate the pressure balance between anon and file pages. - * - * The amount of pressure we put on each LRU is inversely - * proportional to the cost of reclaiming each list, as - * determined by the share of pages that are refaulting, times - * the relative IO cost of bringing back a swapped out - * anonymous page vs reloading a filesystem page (swappiness). - * - * Although we limit that influence to ensure no list gets - * left behind completely: at least a third of the pressure is - * applied, before swappiness. - * - * With swappiness at 100, anon and file have equal IO cost. - */ - total_cost = sc->anon_cost + sc->file_cost; - anon_cost = total_cost + sc->anon_cost; - file_cost = total_cost + sc->file_cost; - total_cost = anon_cost + file_cost; + calculate_pressure_balance(sc, swappiness, fraction, &denominator); - ap = swappiness * (total_cost + 1); - ap /= anon_cost + 1; - - fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); - fp /= file_cost + 1; - - fraction[0] = ap; - fraction[1] = fp; - denominator = ap + fp; out: for_each_evictable_lru(lru) { bool file = is_file_lru(lru); From 58ba73e521b3d3a7a7612fc200beba1544a5100c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 29 Jan 2025 18:06:31 +0000 Subject: [PATCH 037/431] mm: z3fold: remove z3fold Patch series "mm: zswap: remove z3fold and zbud", v2. After 2 cycles of deprecating z3fold, remove it as well as zbud (rationale in specific patches). This patch (of 2): Z3fold has been marked as deprecated for 2 cycles and no one complained, as expected. As there are no known users, remove the code now. Link: https://lkml.kernel.org/r/20250129180633.3501650-1-yosry.ahmed@linux.dev Link: https://lkml.kernel.org/r/20250129180633.3501650-2-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Acked-by: Johannes Weiner Reviewed-by: Shakeel Butt Acked-by: Nhat Pham Cc: Alexander Gordeev Cc: Chengming Zhou Cc: Christian Borntraeger Cc: Dan Streetman Cc: Heiko Carstens Cc: Huacai Chen Cc: Miaohe Lin Cc: Seth Jennings Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vitaly Wool Cc: Vlastimil Babka Cc: WANG Xuerui Signed-off-by: Andrew Morton --- CREDITS | 1 + Documentation/mm/index.rst | 1 - Documentation/mm/z3fold.rst | 28 - Documentation/translations/zh_CN/mm/index.rst | 1 - .../translations/zh_CN/mm/z3fold.rst | 31 - MAINTAINERS | 7 - mm/Kconfig | 29 - mm/Makefile | 1 - mm/z3fold.c | 1447 ----------------- 9 files changed, 1 insertion(+), 1545 deletions(-) delete mode 100644 Documentation/mm/z3fold.rst delete mode 100644 Documentation/translations/zh_CN/mm/z3fold.rst delete mode 100644 mm/z3fold.c diff --git a/CREDITS b/CREDITS index 53d11a46fd69..aeccd79f9778 100644 --- a/CREDITS +++ b/CREDITS @@ -4311,6 +4311,7 @@ S: England N: Vitaly Wool E: vitaly.wool@konsulko.com D: Maintenance and development of zswap +D: Maintenance and development of z3fold N: Chris Wright E: chrisw@sous-sol.org diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst index 0be1c7503a01..d3ada3e45e10 100644 --- a/Documentation/mm/index.rst +++ b/Documentation/mm/index.rst @@ -62,5 +62,4 @@ documentation, or deleted if it has served its purpose. unevictable-lru vmalloced-kernel-stacks vmemmap_dedup - z3fold zsmalloc diff --git a/Documentation/mm/z3fold.rst b/Documentation/mm/z3fold.rst deleted file mode 100644 index 25b5935d06c7..000000000000 --- a/Documentation/mm/z3fold.rst +++ /dev/null @@ -1,28 +0,0 @@ -====== -z3fold -====== - -z3fold is a special purpose allocator for storing compressed pages. -It is designed to store up to three compressed pages per physical page. -It is a zbud derivative which allows for higher compression -ratio keeping the simplicity and determinism of its predecessor. - -The main differences between z3fold and zbud are: - -* unlike zbud, z3fold allows for up to PAGE_SIZE allocations -* z3fold can hold up to 3 compressed pages in its page -* z3fold doesn't export any API itself and is thus intended to be used - via the zpool API. - -To keep the determinism and simplicity, z3fold, just like zbud, always -stores an integral number of compressed pages per page, but it can store -up to 3 pages unlike zbud which can store at most 2. Therefore the -compression ratio goes to around 2.7x while zbud's one is around 1.7x. - -Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not -return a dereferenceable pointer. Instead, it returns an unsigned long -handle which encodes actual location of the allocated object. - -Keeping effective compression ratio close to zsmalloc's, z3fold doesn't -depend on MMU enabled and provides more predictable reclaim behavior -which makes it a better fit for small and response-critical systems. diff --git a/Documentation/translations/zh_CN/mm/index.rst b/Documentation/translations/zh_CN/mm/index.rst index c8726bce8f74..a71116be058f 100644 --- a/Documentation/translations/zh_CN/mm/index.rst +++ b/Documentation/translations/zh_CN/mm/index.rst @@ -58,7 +58,6 @@ Linux内存管理文档 remap_file_pages split_page_table_lock vmalloced-kernel-stacks - z3fold zsmalloc TODOLIST: diff --git a/Documentation/translations/zh_CN/mm/z3fold.rst b/Documentation/translations/zh_CN/mm/z3fold.rst deleted file mode 100644 index 9569a6d88270..000000000000 --- a/Documentation/translations/zh_CN/mm/z3fold.rst +++ /dev/null @@ -1,31 +0,0 @@ -:Original: Documentation/mm/z3fold.rst - -:翻译: - - 司延腾 Yanteng Si - -:校译: - - -====== -z3fold -====== - -z3fold是一个专门用于存储压缩页的分配器。它被设计为每个物理页最多可以存储三个压缩页。 -它是zbud的衍生物,允许更高的压缩率,保持其前辈的简单性和确定性。 - -z3fold和zbud的主要区别是: - -* 与zbud不同的是,z3fold允许最大的PAGE_SIZE分配。 -* z3fold在其页面中最多可以容纳3个压缩页面 -* z3fold本身没有输出任何API,因此打算通过zpool的API来使用 - -为了保持确定性和简单性,z3fold,就像zbud一样,总是在每页存储一个整数的压缩页,但是 -它最多可以存储3页,不像zbud最多可以存储2页。因此压缩率达到2.7倍左右,而zbud的压缩 -率是1.7倍左右。 - -不像zbud(但也像zsmalloc),z3fold_alloc()那样不返回一个可重复引用的指针。相反,它 -返回一个无符号长句柄,它编码了被分配对象的实际位置。 - -保持有效的压缩率接近于zsmalloc,z3fold不依赖于MMU的启用,并提供更可预测的回收行 -为,这使得它更适合于小型和反应迅速的系统。 diff --git a/MAINTAINERS b/MAINTAINERS index ed7aa6867674..a65a73227e3d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26181,13 +26181,6 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* -Z3FOLD COMPRESSED PAGE ALLOCATOR -M: Vitaly Wool -R: Miaohe Lin -L: linux-mm@kvack.org -S: Maintained -F: mm/z3fold.c - Z8530 DRIVER FOR AX.25 M: Joerg Reuter L: linux-hams@vger.kernel.org diff --git a/mm/Kconfig b/mm/Kconfig index 1b501db06417..6fa19022c09b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -146,15 +146,6 @@ config ZSWAP_ZPOOL_DEFAULT_ZBUD help Use the zbud allocator as the default allocator. -config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED - bool "z3foldi (DEPRECATED)" - select Z3FOLD_DEPRECATED - help - Use the z3fold allocator as the default allocator. - - Deprecated and scheduled for removal in a few cycles, - see CONFIG_Z3FOLD_DEPRECATED. - config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC bool "zsmalloc" select ZSMALLOC @@ -166,7 +157,6 @@ config ZSWAP_ZPOOL_DEFAULT string depends on ZSWAP default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD - default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC default "" @@ -180,25 +170,6 @@ config ZBUD deterministic reclaim properties that make it preferable to a higher density approach when reclaim will be used. -config Z3FOLD_DEPRECATED - tristate "3:1 compression allocator (z3fold) (DEPRECATED)" - depends on ZSWAP - help - Deprecated and scheduled for removal in a few cycles. If you have - a good reason for using Z3FOLD over ZSMALLOC, please contact - linux-mm@kvack.org and the zswap maintainers. - - A special purpose allocator for storing compressed pages. - It is designed to store up to three compressed pages per physical - page. It is a ZBUD derivative so the simplicity and determinism are - still there. - -config Z3FOLD - tristate - default y if Z3FOLD_DEPRECATED=y - default m if Z3FOLD_DEPRECATED=m - depends on Z3FOLD_DEPRECATED - config ZSMALLOC tristate prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM) diff --git a/mm/Makefile b/mm/Makefile index 850386a67b3e..e4c03da3c084 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -115,7 +115,6 @@ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o -obj-$(CONFIG_Z3FOLD) += z3fold.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_NUMA) += numa.o diff --git a/mm/z3fold.c b/mm/z3fold.c deleted file mode 100644 index 379d24b4fef9..000000000000 --- a/mm/z3fold.c +++ /dev/null @@ -1,1447 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * z3fold.c - * - * Author: Vitaly Wool - * Copyright (C) 2016, Sony Mobile Communications Inc. - * - * This implementation is based on zbud written by Seth Jennings. - * - * z3fold is an special purpose allocator for storing compressed pages. It - * can store up to three compressed pages per page which improves the - * compression ratio of zbud while retaining its main concepts (e. g. always - * storing an integral number of objects per page) and simplicity. - * It still has simple and deterministic reclaim properties that make it - * preferable to a higher density approach (with no requirement on integral - * number of object per page) when reclaim is used. - * - * As in zbud, pages are divided into "chunks". The size of the chunks is - * fixed at compile time and is determined by NCHUNKS_ORDER below. - * - * z3fold doesn't export any API and is meant to be used via zpool API. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks - * in the beginning of an allocated page are occupied by z3fold header, so - * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), - * which shows the max number of free chunks in z3fold page, also there will - * be 63, or 62, respectively, freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) -#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) -#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) -#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS) - -#define BUDDY_MASK (0x3) -#define BUDDY_SHIFT 2 -#define SLOTS_ALIGN (0x40) - -/***************** - * Structures -*****************/ -struct z3fold_pool; - -enum buddy { - HEADLESS = 0, - FIRST, - MIDDLE, - LAST, - BUDDIES_MAX = LAST -}; - -struct z3fold_buddy_slots { - /* - * we are using BUDDY_MASK in handle_to_buddy etc. so there should - * be enough slots to hold all possible variants - */ - unsigned long slot[BUDDY_MASK + 1]; - unsigned long pool; /* back link */ - rwlock_t lock; -}; -#define HANDLE_FLAG_MASK (0x03) - -/* - * struct z3fold_header - z3fold page metadata occupying first chunks of each - * z3fold page, except for HEADLESS pages - * @buddy: links the z3fold page into the relevant list in the - * pool - * @page_lock: per-page lock - * @refcount: reference count for the z3fold page - * @work: work_struct for page layout optimization - * @slots: pointer to the structure holding buddy slots - * @pool: pointer to the containing pool - * @cpu: CPU which this page "belongs" to - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @middle_chunks: the size of the middle buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - * @first_num: the starting number (for the first handle) - * @mapped_count: the number of objects currently mapped - */ -struct z3fold_header { - struct list_head buddy; - spinlock_t page_lock; - struct kref refcount; - struct work_struct work; - struct z3fold_buddy_slots *slots; - struct z3fold_pool *pool; - short cpu; - unsigned short first_chunks; - unsigned short middle_chunks; - unsigned short last_chunks; - unsigned short start_middle; - unsigned short first_num:2; - unsigned short mapped_count:2; - unsigned short foreign_handles:2; -}; - -/** - * struct z3fold_pool - stores metadata for each z3fold pool - * @name: pool name - * @lock: protects pool unbuddied lists - * @stale_lock: protects pool stale page list - * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- - * buddies; the list each z3fold page is added to depends on - * the size of its free region. - * @stale: list of pages marked for freeing - * @pages_nr: number of z3fold pages in the pool. - * @c_handle: cache for z3fold_buddy_slots allocation - * @compact_wq: workqueue for page layout background optimization - * @release_wq: workqueue for safe page release - * @work: work_struct for safe page release - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular z3fold pool. - */ -struct z3fold_pool { - const char *name; - spinlock_t lock; - spinlock_t stale_lock; - struct list_head __percpu *unbuddied; - struct list_head stale; - atomic64_t pages_nr; - struct kmem_cache *c_handle; - struct workqueue_struct *compact_wq; - struct workqueue_struct *release_wq; - struct work_struct work; -}; - -/* - * Internal z3fold page flags - */ -enum z3fold_page_flags { - PAGE_HEADLESS = 0, - MIDDLE_CHUNK_MAPPED, - NEEDS_COMPACTING, - PAGE_STALE, - PAGE_CLAIMED, /* by either reclaim or free */ - PAGE_MIGRATED, /* page is migrated and soon to be released */ -}; - -/* - * handle flags, go under HANDLE_FLAG_MASK - */ -enum z3fold_handle_flags { - HANDLES_NOFREE = 0, -}; - -/* - * Forward declarations - */ -static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); -static void compact_page_work(struct work_struct *w); - -/***************** - * Helpers -*****************/ - -/* Converts an allocation size in bytes to size in z3fold chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, - gfp_t gfp) -{ - struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, - gfp); - - if (slots) { - /* It will be freed separately in free_handle(). */ - kmemleak_not_leak(slots); - slots->pool = (unsigned long)pool; - rwlock_init(&slots->lock); - } - - return slots; -} - -static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) -{ - return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); -} - -static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) -{ - return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); -} - -/* Lock a z3fold page */ -static inline void z3fold_page_lock(struct z3fold_header *zhdr) -{ - spin_lock(&zhdr->page_lock); -} - -/* Try to lock a z3fold page */ -static inline int z3fold_page_trylock(struct z3fold_header *zhdr) -{ - return spin_trylock(&zhdr->page_lock); -} - -/* Unlock a z3fold page */ -static inline void z3fold_page_unlock(struct z3fold_header *zhdr) -{ - spin_unlock(&zhdr->page_lock); -} - -/* return locked z3fold page if it's not headless */ -static inline struct z3fold_header *get_z3fold_header(unsigned long handle) -{ - struct z3fold_buddy_slots *slots; - struct z3fold_header *zhdr; - int locked = 0; - - if (!(handle & (1 << PAGE_HEADLESS))) { - slots = handle_to_slots(handle); - do { - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - locked = z3fold_page_trylock(zhdr); - read_unlock(&slots->lock); - if (locked) { - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_MIGRATED, &page->private)) - break; - z3fold_page_unlock(zhdr); - } - cpu_relax(); - } while (true); - } else { - zhdr = (struct z3fold_header *)(handle & PAGE_MASK); - } - - return zhdr; -} - -static inline void put_z3fold_header(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (!test_bit(PAGE_HEADLESS, &page->private)) - z3fold_page_unlock(zhdr); -} - -static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) -{ - struct z3fold_buddy_slots *slots; - int i; - bool is_free; - - if (WARN_ON(*(unsigned long *)handle == 0)) - return; - - slots = handle_to_slots(handle); - write_lock(&slots->lock); - *(unsigned long *)handle = 0; - - if (test_bit(HANDLES_NOFREE, &slots->pool)) { - write_unlock(&slots->lock); - return; /* simple case, nothing else to do */ - } - - if (zhdr->slots != slots) - zhdr->foreign_handles--; - - is_free = true; - for (i = 0; i <= BUDDY_MASK; i++) { - if (slots->slot[i]) { - is_free = false; - break; - } - } - write_unlock(&slots->lock); - - if (is_free) { - struct z3fold_pool *pool = slots_to_pool(slots); - - if (zhdr->slots == slots) - zhdr->slots = NULL; - kmem_cache_free(pool->c_handle, slots); - } -} - -/* Initializes the z3fold header of a newly allocated z3fold page */ -static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, - struct z3fold_pool *pool, gfp_t gfp) -{ - struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots; - - clear_bit(PAGE_HEADLESS, &page->private); - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - clear_bit(PAGE_STALE, &page->private); - clear_bit(PAGE_CLAIMED, &page->private); - clear_bit(PAGE_MIGRATED, &page->private); - if (headless) - return zhdr; - - slots = alloc_slots(pool, gfp); - if (!slots) - return NULL; - - memset(zhdr, 0, sizeof(*zhdr)); - spin_lock_init(&zhdr->page_lock); - kref_init(&zhdr->refcount); - zhdr->cpu = -1; - zhdr->slots = slots; - zhdr->pool = pool; - INIT_LIST_HEAD(&zhdr->buddy); - INIT_WORK(&zhdr->work, compact_page_work); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_z3fold_page(struct page *page, bool headless) -{ - if (!headless) { - lock_page(page); - __ClearPageMovable(page); - unlock_page(page); - } - __free_page(page); -} - -/* Helper function to build the index */ -static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) -{ - return (bud + zhdr->first_num) & BUDDY_MASK; -} - -/* - * Encodes the handle of a particular buddy within a z3fold page. - * Zhdr->page_lock should be held as this function accesses first_num - * if bud != HEADLESS. - */ -static unsigned long __encode_handle(struct z3fold_header *zhdr, - struct z3fold_buddy_slots *slots, - enum buddy bud) -{ - unsigned long h = (unsigned long)zhdr; - int idx = 0; - - /* - * For a headless page, its handle is its pointer with the extra - * PAGE_HEADLESS bit set - */ - if (bud == HEADLESS) - return h | (1 << PAGE_HEADLESS); - - /* otherwise, return pointer to encoded handle */ - idx = __idx(zhdr, bud); - h += idx; - if (bud == LAST) - h |= (zhdr->last_chunks << BUDDY_SHIFT); - - write_lock(&slots->lock); - slots->slot[idx] = h; - write_unlock(&slots->lock); - return (unsigned long)&slots->slot[idx]; -} - -static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) -{ - return __encode_handle(zhdr, zhdr->slots, bud); -} - -/* only for LAST bud, returns zero otherwise */ -static unsigned short handle_to_chunks(unsigned long handle) -{ - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; -} - -/* - * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle - * but that doesn't matter. because the masking will result in the - * correct buddy number. - */ -static enum buddy handle_to_buddy(unsigned long handle) -{ - struct z3fold_header *zhdr; - struct z3fold_buddy_slots *slots = handle_to_slots(handle); - unsigned long addr; - - read_lock(&slots->lock); - WARN_ON(handle & (1 << PAGE_HEADLESS)); - addr = *(unsigned long *)handle; - read_unlock(&slots->lock); - zhdr = (struct z3fold_header *)(addr & PAGE_MASK); - return (addr - zhdr->first_num) & BUDDY_MASK; -} - -static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) -{ - return zhdr->pool; -} - -static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) -{ - struct page *page = virt_to_page(zhdr); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - WARN_ON(!list_empty(&zhdr->buddy)); - set_bit(PAGE_STALE, &page->private); - clear_bit(NEEDS_COMPACTING, &page->private); - spin_lock(&pool->lock); - spin_unlock(&pool->lock); - - if (locked) - z3fold_page_unlock(zhdr); - - spin_lock(&pool->stale_lock); - list_add(&zhdr->buddy, &pool->stale); - queue_work(pool->release_wq, &pool->work); - spin_unlock(&pool->stale_lock); - - atomic64_dec(&pool->pages_nr); -} - -static void release_z3fold_page_locked(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static void release_z3fold_page_locked_list(struct kref *ref) -{ - struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, - refcount); - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - WARN_ON(z3fold_page_trylock(zhdr)); - __release_z3fold_page(zhdr, true); -} - -static inline int put_z3fold_locked(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked); -} - -static inline int put_z3fold_locked_list(struct z3fold_header *zhdr) -{ - return kref_put(&zhdr->refcount, release_z3fold_page_locked_list); -} - -static void free_pages_work(struct work_struct *w) -{ - struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); - - spin_lock(&pool->stale_lock); - while (!list_empty(&pool->stale)) { - struct z3fold_header *zhdr = list_first_entry(&pool->stale, - struct z3fold_header, buddy); - struct page *page = virt_to_page(zhdr); - - list_del(&zhdr->buddy); - if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) - continue; - spin_unlock(&pool->stale_lock); - cancel_work_sync(&zhdr->work); - free_z3fold_page(page, false); - cond_resched(); - spin_lock(&pool->stale_lock); - } - spin_unlock(&pool->stale_lock); -} - -/* - * Returns the number of free chunks in a z3fold page. - * NB: can't be used with HEADLESS pages. - */ -static int num_free_chunks(struct z3fold_header *zhdr) -{ - int nfree; - /* - * If there is a middle object, pick up the bigger free space - * either before or after it. Otherwise just subtract the number - * of chunks occupied by the first and the last objects. - */ - if (zhdr->middle_chunks != 0) { - int nfree_before = zhdr->first_chunks ? - 0 : zhdr->start_middle - ZHDR_CHUNKS; - int nfree_after = zhdr->last_chunks ? - 0 : TOTAL_CHUNKS - - (zhdr->start_middle + zhdr->middle_chunks); - nfree = max(nfree_before, nfree_after); - } else - nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; - return nfree; -} - -/* Add to the appropriate unbuddied list */ -static inline void add_to_unbuddied(struct z3fold_pool *pool, - struct z3fold_header *zhdr) -{ - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || - zhdr->middle_chunks == 0) { - struct list_head *unbuddied; - int freechunks = num_free_chunks(zhdr); - - migrate_disable(); - unbuddied = this_cpu_ptr(pool->unbuddied); - spin_lock(&pool->lock); - list_add(&zhdr->buddy, &unbuddied[freechunks]); - spin_unlock(&pool->lock); - zhdr->cpu = smp_processor_id(); - migrate_enable(); - } -} - -static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) -{ - enum buddy bud = HEADLESS; - - if (zhdr->middle_chunks) { - if (!zhdr->first_chunks && - chunks <= zhdr->start_middle - ZHDR_CHUNKS) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - } else { - if (!zhdr->first_chunks) - bud = FIRST; - else if (!zhdr->last_chunks) - bud = LAST; - else - bud = MIDDLE; - } - - return bud; -} - -static inline void *mchunk_memmove(struct z3fold_header *zhdr, - unsigned short dst_chunk) -{ - void *beg = zhdr; - return memmove(beg + (dst_chunk << CHUNK_SHIFT), - beg + (zhdr->start_middle << CHUNK_SHIFT), - zhdr->middle_chunks << CHUNK_SHIFT); -} - -static inline bool buddy_single(struct z3fold_header *zhdr) -{ - return !((zhdr->first_chunks && zhdr->middle_chunks) || - (zhdr->first_chunks && zhdr->last_chunks) || - (zhdr->middle_chunks && zhdr->last_chunks)); -} - -static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - void *p = zhdr; - unsigned long old_handle = 0; - size_t sz = 0; - struct z3fold_header *new_zhdr = NULL; - int first_idx = __idx(zhdr, FIRST); - int middle_idx = __idx(zhdr, MIDDLE); - int last_idx = __idx(zhdr, LAST); - unsigned short *moved_chunks = NULL; - - /* - * No need to protect slots here -- all the slots are "local" and - * the page lock is already taken - */ - if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { - p += ZHDR_SIZE_ALIGNED; - sz = zhdr->first_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; - moved_chunks = &zhdr->first_chunks; - } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { - p += zhdr->start_middle << CHUNK_SHIFT; - sz = zhdr->middle_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; - moved_chunks = &zhdr->middle_chunks; - } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { - p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - sz = zhdr->last_chunks << CHUNK_SHIFT; - old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; - moved_chunks = &zhdr->last_chunks; - } - - if (sz > 0) { - enum buddy new_bud = HEADLESS; - short chunks = size_to_chunks(sz); - void *q; - - new_zhdr = __z3fold_alloc(pool, sz, false); - if (!new_zhdr) - return NULL; - - if (WARN_ON(new_zhdr == zhdr)) - goto out_fail; - - new_bud = get_free_buddy(new_zhdr, chunks); - q = new_zhdr; - switch (new_bud) { - case FIRST: - new_zhdr->first_chunks = chunks; - q += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - new_zhdr->middle_chunks = chunks; - new_zhdr->start_middle = - new_zhdr->first_chunks + ZHDR_CHUNKS; - q += new_zhdr->start_middle << CHUNK_SHIFT; - break; - case LAST: - new_zhdr->last_chunks = chunks; - q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); - break; - default: - goto out_fail; - } - new_zhdr->foreign_handles++; - memcpy(q, p, sz); - write_lock(&zhdr->slots->lock); - *(unsigned long *)old_handle = (unsigned long)new_zhdr + - __idx(new_zhdr, new_bud); - if (new_bud == LAST) - *(unsigned long *)old_handle |= - (new_zhdr->last_chunks << BUDDY_SHIFT); - write_unlock(&zhdr->slots->lock); - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - - *moved_chunks = 0; - } - - return new_zhdr; - -out_fail: - if (new_zhdr && !put_z3fold_locked(new_zhdr)) { - add_to_unbuddied(pool, new_zhdr); - z3fold_page_unlock(new_zhdr); - } - return NULL; - -} - -#define BIG_CHUNK_GAP 3 -/* Has to be called with lock held */ -static int z3fold_compact_page(struct z3fold_header *zhdr) -{ - struct page *page = virt_to_page(zhdr); - - if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) - return 0; /* can't move middle chunk, it's used */ - - if (unlikely(PageIsolated(page))) - return 0; - - if (zhdr->middle_chunks == 0) - return 0; /* nothing to compact */ - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* move to the beginning */ - mchunk_memmove(zhdr, ZHDR_CHUNKS); - zhdr->first_chunks = zhdr->middle_chunks; - zhdr->middle_chunks = 0; - zhdr->start_middle = 0; - zhdr->first_num++; - return 1; - } - - /* - * moving data is expensive, so let's only do that if - * there's substantial gain (at least BIG_CHUNK_GAP chunks) - */ - if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && - zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= - BIG_CHUNK_GAP) { - mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - return 1; - } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && - TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle - + zhdr->middle_chunks) >= - BIG_CHUNK_GAP) { - unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - - zhdr->middle_chunks; - mchunk_memmove(zhdr, new_start); - zhdr->start_middle = new_start; - return 1; - } - - return 0; -} - -static void do_compact_page(struct z3fold_header *zhdr, bool locked) -{ - struct z3fold_pool *pool = zhdr_to_pool(zhdr); - struct page *page; - - page = virt_to_page(zhdr); - if (locked) - WARN_ON(z3fold_page_trylock(zhdr)); - else - z3fold_page_lock(zhdr); - if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { - z3fold_page_unlock(zhdr); - return; - } - spin_lock(&pool->lock); - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - if (put_z3fold_locked(zhdr)) - return; - - if (test_bit(PAGE_STALE, &page->private) || - test_and_set_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - return; - } - - if (!zhdr->foreign_handles && buddy_single(zhdr) && - zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { - if (!put_z3fold_locked(zhdr)) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - } - return; - } - - z3fold_compact_page(zhdr); - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static void compact_page_work(struct work_struct *w) -{ - struct z3fold_header *zhdr = container_of(w, struct z3fold_header, - work); - - do_compact_page(zhdr, false); -} - -/* returns _locked_ z3fold page header or NULL */ -static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, - size_t size, bool can_sleep) -{ - struct z3fold_header *zhdr = NULL; - struct page *page; - struct list_head *unbuddied; - int chunks = size_to_chunks(size), i; - -lookup: - migrate_disable(); - /* First, try to find an unbuddied z3fold page. */ - unbuddied = this_cpu_ptr(pool->unbuddied); - for_each_unbuddied_list(i, chunks) { - struct list_head *l = &unbuddied[i]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr) - continue; - - /* Re-check under lock. */ - spin_lock(&pool->lock); - if (unlikely(zhdr != list_first_entry(READ_ONCE(l), - struct z3fold_header, buddy)) || - !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - migrate_enable(); - if (can_sleep) - cond_resched(); - goto lookup; - } - - /* - * this page could not be removed from its unbuddied - * list while pool lock was held, and then we've taken - * page lock so kref_put could not be called before - * we got here, so it's safe to just call kref_get() - */ - kref_get(&zhdr->refcount); - break; - } - migrate_enable(); - - if (!zhdr) { - int cpu; - - /* look for _exact_ match on other cpus' lists */ - for_each_online_cpu(cpu) { - struct list_head *l; - - unbuddied = per_cpu_ptr(pool->unbuddied, cpu); - spin_lock(&pool->lock); - l = &unbuddied[chunks]; - - zhdr = list_first_entry_or_null(READ_ONCE(l), - struct z3fold_header, buddy); - - if (!zhdr || !z3fold_page_trylock(zhdr)) { - spin_unlock(&pool->lock); - zhdr = NULL; - continue; - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - spin_unlock(&pool->lock); - - page = virt_to_page(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - if (can_sleep) - cond_resched(); - continue; - } - kref_get(&zhdr->refcount); - break; - } - } - - if (zhdr && !zhdr->slots) { - zhdr->slots = alloc_slots(pool, GFP_ATOMIC); - if (!zhdr->slots) - goto out_fail; - } - return zhdr; - -out_fail: - if (!put_z3fold_locked(zhdr)) { - add_to_unbuddied(pool, zhdr); - z3fold_page_unlock(zhdr); - } - return NULL; -} - -/* - * API Functions - */ - -/** - * z3fold_create_pool() - create a new z3fold pool - * @name: pool name - * @gfp: gfp flags when allocating the z3fold pool structure - * - * Return: pointer to the new z3fold pool or NULL if the metadata allocation - * failed. - */ -static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) -{ - struct z3fold_pool *pool = NULL; - int i, cpu; - - pool = kzalloc(sizeof(struct z3fold_pool), gfp); - if (!pool) - goto out; - pool->c_handle = kmem_cache_create("z3fold_handle", - sizeof(struct z3fold_buddy_slots), - SLOTS_ALIGN, 0, NULL); - if (!pool->c_handle) - goto out_c; - spin_lock_init(&pool->lock); - spin_lock_init(&pool->stale_lock); - pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, - __alignof__(struct list_head)); - if (!pool->unbuddied) - goto out_pool; - for_each_possible_cpu(cpu) { - struct list_head *unbuddied = - per_cpu_ptr(pool->unbuddied, cpu); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&unbuddied[i]); - } - INIT_LIST_HEAD(&pool->stale); - atomic64_set(&pool->pages_nr, 0); - pool->name = name; - pool->compact_wq = create_singlethread_workqueue(pool->name); - if (!pool->compact_wq) - goto out_unbuddied; - pool->release_wq = create_singlethread_workqueue(pool->name); - if (!pool->release_wq) - goto out_wq; - INIT_WORK(&pool->work, free_pages_work); - return pool; - -out_wq: - destroy_workqueue(pool->compact_wq); -out_unbuddied: - free_percpu(pool->unbuddied); -out_pool: - kmem_cache_destroy(pool->c_handle); -out_c: - kfree(pool); -out: - return NULL; -} - -/** - * z3fold_destroy_pool() - destroys an existing z3fold pool - * @pool: the z3fold pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void z3fold_destroy_pool(struct z3fold_pool *pool) -{ - kmem_cache_destroy(pool->c_handle); - - /* - * We need to destroy pool->compact_wq before pool->release_wq, - * as any pending work on pool->compact_wq will call - * queue_work(pool->release_wq, &pool->work). - * - * There are still outstanding pages until both workqueues are drained, - * so we cannot unregister migration until then. - */ - - destroy_workqueue(pool->compact_wq); - destroy_workqueue(pool->release_wq); - free_percpu(pool->unbuddied); - kfree(pool); -} - -static const struct movable_operations z3fold_mops; - -/** - * z3fold_alloc() - allocates a region of a given size - * @pool: z3fold pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks = size_to_chunks(size); - struct z3fold_header *zhdr = NULL; - struct page *page = NULL; - enum buddy bud; - bool can_sleep = gfpflags_allow_blocking(gfp); - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - - if (size > PAGE_SIZE) - return -ENOSPC; - - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - bud = HEADLESS; - else { -retry: - zhdr = __z3fold_alloc(pool, size, can_sleep); - if (zhdr) { - bud = get_free_buddy(zhdr, chunks); - if (bud == HEADLESS) { - if (!put_z3fold_locked(zhdr)) - z3fold_page_unlock(zhdr); - pr_err("No free chunks in unbuddied\n"); - WARN_ON(1); - goto retry; - } - page = virt_to_page(zhdr); - goto found; - } - bud = FIRST; - } - - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - - zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); - if (!zhdr) { - __free_page(page); - return -ENOMEM; - } - atomic64_inc(&pool->pages_nr); - - if (bud == HEADLESS) { - set_bit(PAGE_HEADLESS, &page->private); - goto headless; - } - if (can_sleep) { - lock_page(page); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } else { - WARN_ON(!trylock_page(page)); - __SetPageMovable(page, &z3fold_mops); - unlock_page(page); - } - z3fold_page_lock(zhdr); - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else if (bud == LAST) - zhdr->last_chunks = chunks; - else { - zhdr->middle_chunks = chunks; - zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; - } - add_to_unbuddied(pool, zhdr); - -headless: - spin_lock(&pool->lock); - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - if (bud != HEADLESS) - z3fold_page_unlock(zhdr); - - return 0; -} - -/** - * z3fold_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by z3fold_alloc() - * - * In the case that the z3fold page in which the allocation resides is under - * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function - * only sets the first|middle|last_chunks to 0. The page is actually freed - * once all buddies are evicted (see z3fold_reclaim_page() below). - */ -static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy bud; - bool page_claimed; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); - - if (test_bit(PAGE_HEADLESS, &page->private)) { - /* if a headless page is under reclaim, just leave. - * NB: we use test_and_set_bit for a reason: if the bit - * has not been set before, we release this page - * immediately so we don't care about its value any more. - */ - if (!page_claimed) { - put_z3fold_header(zhdr); - free_z3fold_page(page, true); - atomic64_dec(&pool->pages_nr); - } - return; - } - - /* Non-headless case */ - bud = handle_to_buddy(handle); - - switch (bud) { - case FIRST: - zhdr->first_chunks = 0; - break; - case MIDDLE: - zhdr->middle_chunks = 0; - break; - case LAST: - zhdr->last_chunks = 0; - break; - default: - pr_err("%s: unknown bud %d\n", __func__, bud); - WARN_ON(1); - put_z3fold_header(zhdr); - return; - } - - if (!page_claimed) - free_handle(handle, zhdr); - if (put_z3fold_locked_list(zhdr)) - return; - if (page_claimed) { - /* the page has not been claimed by us */ - put_z3fold_header(zhdr); - return; - } - if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { - clear_bit(PAGE_CLAIMED, &page->private); - put_z3fold_header(zhdr); - return; - } - if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { - zhdr->cpu = -1; - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - do_compact_page(zhdr, true); - return; - } - kref_get(&zhdr->refcount); - clear_bit(PAGE_CLAIMED, &page->private); - queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); - put_z3fold_header(zhdr); -} - -/** - * z3fold_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * Extracts the buddy number from handle and constructs the pointer to the - * correct starting chunk within the page. - * - * Returns: a pointer to the mapped allocation - */ -static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - void *addr; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - addr = zhdr; - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - goto out; - - buddy = handle_to_buddy(handle); - switch (buddy) { - case FIRST: - addr += ZHDR_SIZE_ALIGNED; - break; - case MIDDLE: - addr += zhdr->start_middle << CHUNK_SHIFT; - set_bit(MIDDLE_CHUNK_MAPPED, &page->private); - break; - case LAST: - addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); - break; - default: - pr_err("unknown buddy id %d\n", buddy); - WARN_ON(1); - addr = NULL; - break; - } - - if (addr) - zhdr->mapped_count++; -out: - put_z3fold_header(zhdr); - return addr; -} - -/** - * z3fold_unmap() - unmaps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) -{ - struct z3fold_header *zhdr; - struct page *page; - enum buddy buddy; - - zhdr = get_z3fold_header(handle); - page = virt_to_page(zhdr); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return; - - buddy = handle_to_buddy(handle); - if (buddy == MIDDLE) - clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); - zhdr->mapped_count--; - put_z3fold_header(zhdr); -} - -/** - * z3fold_get_pool_pages() - gets the z3fold pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. - */ -static u64 z3fold_get_pool_pages(struct z3fold_pool *pool) -{ - return atomic64_read(&pool->pages_nr); -} - -static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(PageIsolated(page), page); - - if (test_bit(PAGE_HEADLESS, &page->private)) - return false; - - zhdr = page_address(page); - z3fold_page_lock(zhdr); - if (test_bit(NEEDS_COMPACTING, &page->private) || - test_bit(PAGE_STALE, &page->private)) - goto out; - - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) - goto out; - - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) - goto out; - pool = zhdr_to_pool(zhdr); - spin_lock(&pool->lock); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - spin_unlock(&pool->lock); - - kref_get(&zhdr->refcount); - z3fold_page_unlock(zhdr); - return true; - -out: - z3fold_page_unlock(zhdr); - return false; -} - -static int z3fold_page_migrate(struct page *newpage, struct page *page, - enum migrate_mode mode) -{ - struct z3fold_header *zhdr, *new_zhdr; - struct z3fold_pool *pool; - - VM_BUG_ON_PAGE(!PageIsolated(page), page); - VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); - VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - if (!z3fold_page_trylock(zhdr)) - return -EAGAIN; - if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - return -EBUSY; - } - if (work_pending(&zhdr->work)) { - z3fold_page_unlock(zhdr); - return -EAGAIN; - } - new_zhdr = page_address(newpage); - memcpy(new_zhdr, zhdr, PAGE_SIZE); - newpage->private = page->private; - set_bit(PAGE_MIGRATED, &page->private); - z3fold_page_unlock(zhdr); - spin_lock_init(&new_zhdr->page_lock); - INIT_WORK(&new_zhdr->work, compact_page_work); - /* - * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, - * so we only have to reinitialize it. - */ - INIT_LIST_HEAD(&new_zhdr->buddy); - __ClearPageMovable(page); - - get_page(newpage); - z3fold_page_lock(new_zhdr); - if (new_zhdr->first_chunks) - encode_handle(new_zhdr, FIRST); - if (new_zhdr->last_chunks) - encode_handle(new_zhdr, LAST); - if (new_zhdr->middle_chunks) - encode_handle(new_zhdr, MIDDLE); - set_bit(NEEDS_COMPACTING, &newpage->private); - new_zhdr->cpu = smp_processor_id(); - __SetPageMovable(newpage, &z3fold_mops); - z3fold_page_unlock(new_zhdr); - - queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); - - /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */ - page->private = 0; - put_page(page); - return 0; -} - -static void z3fold_page_putback(struct page *page) -{ - struct z3fold_header *zhdr; - struct z3fold_pool *pool; - - zhdr = page_address(page); - pool = zhdr_to_pool(zhdr); - - z3fold_page_lock(zhdr); - if (!list_empty(&zhdr->buddy)) - list_del_init(&zhdr->buddy); - INIT_LIST_HEAD(&page->lru); - if (put_z3fold_locked(zhdr)) - return; - if (list_empty(&zhdr->buddy)) - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); -} - -static const struct movable_operations z3fold_mops = { - .isolate_page = z3fold_page_isolate, - .migrate_page = z3fold_page_migrate, - .putback_page = z3fold_page_putback, -}; - -/***************** - * zpool - ****************/ - -static void *z3fold_zpool_create(const char *name, gfp_t gfp) -{ - return z3fold_create_pool(name, gfp); -} - -static void z3fold_zpool_destroy(void *pool) -{ - z3fold_destroy_pool(pool); -} - -static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return z3fold_alloc(pool, size, gfp, handle); -} -static void z3fold_zpool_free(void *pool, unsigned long handle) -{ - z3fold_free(pool, handle); -} - -static void *z3fold_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return z3fold_map(pool, handle); -} -static void z3fold_zpool_unmap(void *pool, unsigned long handle) -{ - z3fold_unmap(pool, handle); -} - -static u64 z3fold_zpool_total_pages(void *pool) -{ - return z3fold_get_pool_pages(pool); -} - -static struct zpool_driver z3fold_zpool_driver = { - .type = "z3fold", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = z3fold_zpool_create, - .destroy = z3fold_zpool_destroy, - .malloc = z3fold_zpool_malloc, - .free = z3fold_zpool_free, - .map = z3fold_zpool_map, - .unmap = z3fold_zpool_unmap, - .total_pages = z3fold_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-z3fold"); - -static int __init init_z3fold(void) -{ - /* - * Make sure the z3fold header is not larger than the page size and - * there has remaining spaces for its buddy. - */ - BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE); - zpool_register_driver(&z3fold_zpool_driver); - - return 0; -} - -static void __exit exit_z3fold(void) -{ - zpool_unregister_driver(&z3fold_zpool_driver); -} - -module_init(init_z3fold); -module_exit(exit_z3fold); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Vitaly Wool "); -MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); From 6df8bae8e851eacf2acf2237860213e002aba74f Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 29 Jan 2025 18:06:32 +0000 Subject: [PATCH 038/431] mm: zbud: remove zbud The zbud compressed pages allocator is rarely used, most users use zsmalloc. zbud consumes much more memory (only stores 1 or 2 compressed pages per physical page). The only advantage of zbud is a marginal performance improvement that by no means justify the memory overhead. Historically, zsmalloc had significantly worse latency than zbud and z3fold but offered better memory savings. This is no longer the case as shown by a simple recent analysis [1]. In a kernel build test on tmpfs in a limited cgroup, zbud 2-3% less time than zsmalloc, but at the cost of using ~32% more memory (1.5G vs 1.13G). The tradeoff does not make sense for zbud in any practical scenario. The only alleged advantage of zbud is not having the dependency on CONFIG_MMU, but CONFIG_SWAP already depends on CONFIG_MMU anyway, and zbud is only used by zswap. Remove zbud after z3fold's removal, leaving zsmalloc as the one and only zpool allocator. Leave the removal of the zpool API (and its associated config options) to a followup cleanup after no more allocators show up. Deprecating zbud for a few cycles before removing it was initially proposed [2], like z3fold was marked as deprecated for 2 cycles [3]. However, Johannes rightfully pointed out that the 2 cycles is too short for most downstream consumers, and z3fold was deprecated first only as a courtesy anyway. [1]https://lore.kernel.org/lkml/CAJD7tkbRF6od-2x_L8-A1QL3=2Ww13sCj4S3i4bNndqF+3+_Vg@mail.gmail.com/ [2]https://lore.kernel.org/lkml/Z5gdnSX5Lv-nfjQL@google.com/ [3]https://lore.kernel.org/lkml/20240904233343.933462-1-yosryahmed@google.com/ Link: https://lkml.kernel.org/r/20250129180633.3501650-3-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Alexander Gordeev Cc: Chengming Zhou Cc: Christian Borntraeger Cc: Dan Streetman Cc: Heiko Carstens Cc: Huacai Chen Cc: Miaohe Lin Cc: Seth Jennings Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vitaly Wool Cc: Vlastimil Babka Cc: WANG Xuerui Signed-off-by: Andrew Morton --- CREDITS | 2 + Documentation/admin-guide/mm/zswap.rst | 10 +- MAINTAINERS | 7 - arch/loongarch/configs/loongson3_defconfig | 3 +- arch/s390/configs/debug_defconfig | 2 +- arch/s390/configs/defconfig | 2 +- include/linux/zpool.h | 5 +- mm/Kconfig | 18 - mm/Makefile | 1 - mm/zbud.c | 455 --------------------- mm/zpool.c | 4 +- 11 files changed, 13 insertions(+), 496 deletions(-) delete mode 100644 mm/zbud.c diff --git a/CREDITS b/CREDITS index aeccd79f9778..5e65bf8553ba 100644 --- a/CREDITS +++ b/CREDITS @@ -1895,6 +1895,7 @@ S: Czech Republic N: Seth Jennings E: sjenning@redhat.com D: Creation and maintenance of zswap +D: Creation and maintenace of the zbud allocator N: Jeremy Kerr D: Maintainer of SPU File System @@ -3788,6 +3789,7 @@ N: Dan Streetman E: ddstreet@ieee.org D: Maintenance and development of zswap D: Creation and maintenance of the zpool API +D: Maintenace of the zbud allocator N: Drew Sullivan E: drew@ss.org diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst index 3598dcd7dbe7..fd3370aa43fe 100644 --- a/Documentation/admin-guide/mm/zswap.rst +++ b/Documentation/admin-guide/mm/zswap.rst @@ -60,15 +60,13 @@ accessed. The compressed memory pool grows on demand and shrinks as compressed pages are freed. The pool is not preallocated. By default, a zpool of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created, but it can be overridden at boot time by setting the ``zpool`` attribute, -e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs +e.g. ``zswap.zpool=zsmalloc``. It can also be changed at runtime using the sysfs ``zpool`` attribute, e.g.:: - echo zbud > /sys/module/zswap/parameters/zpool + echo zsmalloc > /sys/module/zswap/parameters/zpool -The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which -means the compression ratio will always be 2:1 or worse (because of half-full -zbud pages). The zsmalloc type zpool has a more complex compressed page -storage method, and it can achieve greater storage densities. +The zsmalloc type zpool has a complex compressed page storage method, and it +can achieve great storage densities. When a swap page is passed from swapout to zswap, zswap maintains a mapping of the swap entry, a combination of the swap type and swap offset, to the zpool diff --git a/MAINTAINERS b/MAINTAINERS index a65a73227e3d..60421d3e48a8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26191,13 +26191,6 @@ F: Documentation/networking/device_drivers/hamradio/z8530drv.rst F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h -ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings -M: Dan Streetman -L: linux-mm@kvack.org -S: Maintained -F: mm/zbud.c - ZD1211RW WIRELESS DRIVER L: linux-wireless@vger.kernel.org S: Orphan diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 73c77500ac46..7ce5beb3cbf3 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -109,8 +109,7 @@ CONFIG_BINFMT_MISC=m CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y # CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 80bdfbae6e5b..074df6328376 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -92,7 +92,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y CONFIG_SLUB_STATS=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 449a0e996b96..ac68e663dd4d 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -86,7 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y # CONFIG_COMPAT_BRK is not set diff --git a/include/linux/zpool.h b/include/linux/zpool.h index a67d62b79698..5e6dc46b8cc4 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -4,9 +4,8 @@ * * Copyright (C) 2014 Dan Streetman * - * This is a common frontend for the zbud and zsmalloc memory - * storage pool implementations. Typically, this is used to - * store compressed memory. + * This is a common frontend for the zswap compressed memory storage + * implementations. */ #ifndef _ZPOOL_H_ diff --git a/mm/Kconfig b/mm/Kconfig index 6fa19022c09b..fba9757e5814 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -129,7 +129,6 @@ choice prompt "Default allocator" depends on ZSWAP default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU - default ZSWAP_ZPOOL_DEFAULT_ZBUD help Selects the default allocator for the compressed cache for swap pages. @@ -140,12 +139,6 @@ choice The selection made here can be overridden by using the kernel command line 'zswap.zpool=' option. -config ZSWAP_ZPOOL_DEFAULT_ZBUD - bool "zbud" - select ZBUD - help - Use the zbud allocator as the default allocator. - config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC bool "zsmalloc" select ZSMALLOC @@ -156,20 +149,9 @@ endchoice config ZSWAP_ZPOOL_DEFAULT string depends on ZSWAP - default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC default "" -config ZBUD - tristate "2:1 compression allocator (zbud)" - depends on ZSWAP - help - A special purpose allocator for storing compressed pages. - It is designed to store up to two compressed pages per physical - page. While this design limits storage density, it has simple and - deterministic reclaim properties that make it preferable to a higher - density approach when reclaim will be used. - config ZSMALLOC tristate prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM) diff --git a/mm/Makefile b/mm/Makefile index e4c03da3c084..53392d2af3a5 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -113,7 +113,6 @@ obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o -obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o diff --git a/mm/zbud.c b/mm/zbud.c deleted file mode 100644 index e9836fff9438..000000000000 --- a/mm/zbud.c +++ /dev/null @@ -1,455 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * zbud.c - * - * Copyright (C) 2013, Seth Jennings, IBM - * - * Concepts based on zcache internal zbud allocator by Dan Magenheimer. - * - * zbud is an special purpose allocator for storing compressed pages. Contrary - * to what its name may suggest, zbud is not a buddy allocator, but rather an - * allocator that "buddies" two compressed pages together in a single memory - * page. - * - * While this design limits storage density, it has simple and deterministic - * reclaim properties that make it preferable to a higher density approach when - * reclaim will be used. - * - * zbud works by storing compressed pages, or "zpages", together in pairs in a - * single memory page called a "zbud page". The first buddy is "left - * justified" at the beginning of the zbud page, and the last buddy is "right - * justified" at the end of the zbud page. The benefit is that if either - * buddy is freed, the freed buddy space, coalesced with whatever slack space - * that existed between the buddies, results in the largest possible free region - * within the zbud page. - * - * zbud also provides an attractive lower bound on density. The ratio of zpages - * to zbud pages can not be less than 1. This ensures that zbud can never "do - * harm" by using more pages to store zpages than the uncompressed zpages would - * have used on their own. - * - * zbud pages are divided into "chunks". The size of the chunks is fixed at - * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages - * into chunks allows organizing unbuddied zbud pages into a manageable number - * of unbuddied lists according to the number of free chunks available in the - * zbud page. - * - * The zbud API differs from that of conventional allocators in that the - * allocation function, zbud_alloc(), returns an opaque handle to the user, - * not a dereferenceable pointer. The user must map the handle using - * zbud_map() in order to get a usable pointer by which to access the - * allocation data and unmap the handle with zbud_unmap() when operations - * on the allocation data are complete. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -/***************** - * Structures -*****************/ -/* - * NCHUNKS_ORDER determines the internal allocation granularity, effectively - * adjusting internal fragmentation. It also determines the number of - * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the - * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk - * in allocated page is occupied by zbud header, NCHUNKS will be calculated to - * 63 which shows the max number of free chunks in zbud page, also there will be - * 63 freelists per pool. - */ -#define NCHUNKS_ORDER 6 - -#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define ZHDR_SIZE_ALIGNED CHUNK_SIZE -#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) - -struct zbud_pool; - -/** - * struct zbud_pool - stores metadata for each zbud pool - * @lock: protects all pool fields and first|last_chunk fields of any - * zbud page in the pool - * @unbuddied: array of lists tracking zbud pages that only contain one buddy; - * the lists each zbud page is added to depends on the size of - * its free region. - * @buddied: list tracking the zbud pages that contain two buddies; - * these zbud pages are full - * @pages_nr: number of zbud pages in the pool. - * - * This structure is allocated at pool creation time and maintains metadata - * pertaining to a particular zbud pool. - */ -struct zbud_pool { - spinlock_t lock; - union { - /* - * Reuse unbuddied[0] as buddied on the ground that - * unbuddied[0] is unused. - */ - struct list_head buddied; - struct list_head unbuddied[NCHUNKS]; - }; - u64 pages_nr; -}; - -/* - * struct zbud_header - zbud page metadata occupying the first chunk of each - * zbud page. - * @buddy: links the zbud page into the unbuddied/buddied lists in the pool - * @first_chunks: the size of the first buddy in chunks, 0 if free - * @last_chunks: the size of the last buddy in chunks, 0 if free - */ -struct zbud_header { - struct list_head buddy; - unsigned int first_chunks; - unsigned int last_chunks; -}; - -/***************** - * Helpers -*****************/ -/* Just to make the code easier to read */ -enum buddy { - FIRST, - LAST -}; - -/* Converts an allocation size in bytes to size in zbud chunks */ -static int size_to_chunks(size_t size) -{ - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -#define for_each_unbuddied_list(_iter, _begin) \ - for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) - -/* Initializes the zbud header of a newly allocated zbud page */ -static struct zbud_header *init_zbud_page(struct page *page) -{ - struct zbud_header *zhdr = page_address(page); - zhdr->first_chunks = 0; - zhdr->last_chunks = 0; - INIT_LIST_HEAD(&zhdr->buddy); - return zhdr; -} - -/* Resets the struct page fields and frees the page */ -static void free_zbud_page(struct zbud_header *zhdr) -{ - __free_page(virt_to_page(zhdr)); -} - -/* - * Encodes the handle of a particular buddy within a zbud page - * Pool lock should be held as this function accesses first|last_chunks - */ -static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) -{ - unsigned long handle; - - /* - * For now, the encoded handle is actually just the pointer to the data - * but this might not always be the case. A little information hiding. - * Add CHUNK_SIZE to the handle if it is the first allocation to jump - * over the zbud header in the first chunk. - */ - handle = (unsigned long)zhdr; - if (bud == FIRST) - /* skip over zbud header */ - handle += ZHDR_SIZE_ALIGNED; - else /* bud == LAST */ - handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); - return handle; -} - -/* Returns the zbud page where a given handle is stored */ -static struct zbud_header *handle_to_zbud_header(unsigned long handle) -{ - return (struct zbud_header *)(handle & PAGE_MASK); -} - -/* Returns the number of free chunks in a zbud page */ -static int num_free_chunks(struct zbud_header *zhdr) -{ - /* - * Rather than branch for different situations, just use the fact that - * free buddies have a length of zero to simplify everything. - */ - return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; -} - -/***************** - * API Functions -*****************/ -/** - * zbud_create_pool() - create a new zbud pool - * @gfp: gfp flags when allocating the zbud pool structure - * - * Return: pointer to the new zbud pool or NULL if the metadata allocation - * failed. - */ -static struct zbud_pool *zbud_create_pool(gfp_t gfp) -{ - struct zbud_pool *pool; - int i; - - pool = kzalloc(sizeof(struct zbud_pool), gfp); - if (!pool) - return NULL; - spin_lock_init(&pool->lock); - for_each_unbuddied_list(i, 0) - INIT_LIST_HEAD(&pool->unbuddied[i]); - INIT_LIST_HEAD(&pool->buddied); - pool->pages_nr = 0; - return pool; -} - -/** - * zbud_destroy_pool() - destroys an existing zbud pool - * @pool: the zbud pool to be destroyed - * - * The pool should be emptied before this function is called. - */ -static void zbud_destroy_pool(struct zbud_pool *pool) -{ - kfree(pool); -} - -/** - * zbud_alloc() - allocates a region of a given size - * @pool: zbud pool from which to allocate - * @size: size in bytes of the desired allocation - * @gfp: gfp flags used if the pool needs to grow - * @handle: handle of the new allocation - * - * This function will attempt to find a free region in the pool large enough to - * satisfy the allocation request. A search of the unbuddied lists is - * performed first. If no suitable free region is found, then a new page is - * allocated and added to the pool to satisfy the request. - * - * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used - * as zbud pool pages. - * - * Return: 0 if success and handle is set, otherwise -EINVAL if the size or - * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate - * a new page. - */ -static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - int chunks, i, freechunks; - struct zbud_header *zhdr = NULL; - enum buddy bud; - struct page *page; - - if (!size || (gfp & __GFP_HIGHMEM)) - return -EINVAL; - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) - return -ENOSPC; - chunks = size_to_chunks(size); - spin_lock(&pool->lock); - - /* First, try to find an unbuddied zbud page. */ - for_each_unbuddied_list(i, chunks) { - if (!list_empty(&pool->unbuddied[i])) { - zhdr = list_first_entry(&pool->unbuddied[i], - struct zbud_header, buddy); - list_del(&zhdr->buddy); - if (zhdr->first_chunks == 0) - bud = FIRST; - else - bud = LAST; - goto found; - } - } - - /* Couldn't find unbuddied zbud page, create new one */ - spin_unlock(&pool->lock); - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - spin_lock(&pool->lock); - pool->pages_nr++; - zhdr = init_zbud_page(page); - bud = FIRST; - -found: - if (bud == FIRST) - zhdr->first_chunks = chunks; - else - zhdr->last_chunks = chunks; - - if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } else { - /* Add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } - - *handle = encode_handle(zhdr, bud); - spin_unlock(&pool->lock); - - return 0; -} - -/** - * zbud_free() - frees the allocation associated with the given handle - * @pool: pool in which the allocation resided - * @handle: handle associated with the allocation returned by zbud_alloc() - */ -static void zbud_free(struct zbud_pool *pool, unsigned long handle) -{ - struct zbud_header *zhdr; - int freechunks; - - spin_lock(&pool->lock); - zhdr = handle_to_zbud_header(handle); - - /* If first buddy, handle will be page aligned */ - if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) - zhdr->last_chunks = 0; - else - zhdr->first_chunks = 0; - - /* Remove from existing buddy list */ - list_del(&zhdr->buddy); - - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* zbud page is empty, free */ - free_zbud_page(zhdr); - pool->pages_nr--; - } else { - /* Add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } - - spin_unlock(&pool->lock); -} - -/** - * zbud_map() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be mapped - * - * While trivial for zbud, the mapping functions for others allocators - * implementing this allocation API could have more complex information encoded - * in the handle and could create temporary mappings to make the data - * accessible to the user. - * - * Returns: a pointer to the mapped allocation - */ -static void *zbud_map(struct zbud_pool *pool, unsigned long handle) -{ - return (void *)(handle); -} - -/** - * zbud_unmap() - maps the allocation associated with the given handle - * @pool: pool in which the allocation resides - * @handle: handle associated with the allocation to be unmapped - */ -static void zbud_unmap(struct zbud_pool *pool, unsigned long handle) -{ -} - -/** - * zbud_get_pool_pages() - gets the zbud pool size in pages - * @pool: pool whose size is being queried - * - * Returns: size in pages of the given pool. The pool lock need not be - * taken to access pages_nr. - */ -static u64 zbud_get_pool_pages(struct zbud_pool *pool) -{ - return pool->pages_nr; -} - -/***************** - * zpool - ****************/ - -static void *zbud_zpool_create(const char *name, gfp_t gfp) -{ - return zbud_create_pool(gfp); -} - -static void zbud_zpool_destroy(void *pool) -{ - zbud_destroy_pool(pool); -} - -static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, - unsigned long *handle) -{ - return zbud_alloc(pool, size, gfp, handle); -} -static void zbud_zpool_free(void *pool, unsigned long handle) -{ - zbud_free(pool, handle); -} - -static void *zbud_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - return zbud_map(pool, handle); -} -static void zbud_zpool_unmap(void *pool, unsigned long handle) -{ - zbud_unmap(pool, handle); -} - -static u64 zbud_zpool_total_pages(void *pool) -{ - return zbud_get_pool_pages(pool); -} - -static struct zpool_driver zbud_zpool_driver = { - .type = "zbud", - .sleep_mapped = true, - .owner = THIS_MODULE, - .create = zbud_zpool_create, - .destroy = zbud_zpool_destroy, - .malloc = zbud_zpool_malloc, - .free = zbud_zpool_free, - .map = zbud_zpool_map, - .unmap = zbud_zpool_unmap, - .total_pages = zbud_zpool_total_pages, -}; - -MODULE_ALIAS("zpool-zbud"); - -static int __init init_zbud(void) -{ - /* Make sure the zbud header will fit in one chunk */ - BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); - pr_info("loaded\n"); - - zpool_register_driver(&zbud_zpool_driver); - - return 0; -} - -static void __exit exit_zbud(void) -{ - zpool_unregister_driver(&zbud_zpool_driver); - pr_info("unloaded\n"); -} - -module_init(init_zbud); -module_exit(exit_zbud); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Seth Jennings "); -MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages"); diff --git a/mm/zpool.c b/mm/zpool.c index b9fda1fa857d..4bbd12d4b659 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -95,7 +95,7 @@ static void zpool_put_driver(struct zpool_driver *driver) /** * zpool_has_pool() - Check if the pool driver is available - * @type: The type of the zpool to check (e.g. zbud, zsmalloc) + * @type: The type of the zpool to check (e.g. zsmalloc) * * This checks if the @type pool driver is available. This will try to load * the requested module, if needed, but there is no guarantee the module will @@ -130,7 +130,7 @@ EXPORT_SYMBOL(zpool_has_pool); /** * zpool_create_pool() - Create a new zpool - * @type: The type of the zpool to create (e.g. zbud, zsmalloc) + * @type: The type of the zpool to create (e.g. zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. * From 3a75ccba047b11a4e8c437e8347b2d1746f1d808 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:49 +0000 Subject: [PATCH 039/431] mm: simplify vma merge structure and expand comments Patch series "mm: further simplify VMA merge operation", v3. While significant efforts have been made to improve the VMA merge operation, there remains remnants of the bad (or rather confusing) old days, which make the code difficult to understand, more bug prone and thus harder to modify. This series attempts to significantly improve matters in a number of respects - with a focus on simplifying the commit_merge() function which actually actions the merge operation - and importantly, adjusting the two most confusing merge cases - those in which we 'adjust' the VMA immediately adjacent to the one being merged. One source of confusion are the VMAs being threaded through the operation themselves - vmg->prev, vmg->vma and vmg->next. At the start of the operation, vmg->vma is either NULL if a new VMA is propose to be added, or if not then a pointer to an existing VMA being modified, and prev/next are (perhaps not present) VMAs sat immediately before and after the range specified in vmg->start, end, respectively. However, during the VMA merge operation, we change vmg->start, end and pgoff to span the newly merged range and vmg->vma to either be: a. The ultimately returned VMA (in most cases) or b. A VMA which we will manipulate, but ultimately instead return vmg->next. Case b. especially here is confusing for somebody reading this code, but the fact we update this state, along with vmg->start, end, pgoff only makes matters worse. We simplify things by replacing vmg->vma with vmg->middle and never changing it - this is always either NULL (for a new VMA) or the VMA being modified between vmg->prev and vmg->next. We further simplify by placing the merged VMA in a new vmg->target field - whether case b. above is the case or not. The reader of the code can now simply rely on vmg->middle being the middle VMA and vmg->target being the ultimately merged VMA. We additionally tackle the confusing cases where we 'adjust' VMAs other than the one we ultimately return as the merged VMA (this includes case b. above). These are: (1) merge <-----------> |------||--------| |------------|---| | prev || middle | -> | target | m | |------||--------| |------------|---| In which case middle must be adjusted so middle->vm_start is increased as well as performing the merge. (2) (equivalent to case b. above) <-------------> |---------||------| |---|-------------| | middle || next | -> | m | target | |---------||------| |---|-------------| In which case next must be adjusted so next->vm_start is decreased as well as performing the merge. This cases have previously been performed by calculating and passing around a dubious and confusing 'adj_start' parameter along side a pointer to an 'adjust' VMA indicating which VMA requires additional adjustment (middle in case 1 and next in case 2). With the VMG structure in place we are able to avoid this by simply setting a merge flag to describe each case: (1) Sets the vmg->__adjust_middle_start flag (2) Sets the vmg->__adjust_next_start flag By doing so it turns out we can vastly simplify the logic and calculate what is required to perform the operation. Taken together the refactorings make it far easier to understand what is being done even in these more confusing cases, make the code far more maintainable, debuggable, and testable, providing more internal state indicating what is happening in the merge operation. The changes have no functional net impact on the merge operation and everything should still behave as it did before. This patch (of 5): The merge code, while much improved, still has a number of points of confusion. As part of a broader series cleaning this up to make this more maintainable, we start by addressing some confusion around vma_merge_struct fields. So far, the caller either provides no vmg->vma (a new VMA) or supplies the existing VMA which is being altered, setting vmg->start,end,pgoff to the proposed VMA dimensions. vmg->vma is then updated, as are vmg->start,end,pgoff as the merge process proceeds and the appropriate merge strategy is determined. This is rather confusing, as vmg->vma starts off as the 'middle' VMA between vmg->prev,next, but becomes the 'target' VMA, except in one specific edge case (merge next, shrink middle). Int his patch we introduce vmg->middle to describe the VMA that is between vmg->prev and vmg->next, and does NOT change during the merge operation. We replace vmg->vma with vmg->target, and use this only during the merge operation itself. Aside from the merge right, shrink middle case, this becomes the VMA that forms the basis of the VMA that is returned. This edge case can be addressed in a future commit. We also add a number of comments to explain what is going on. Finally, we adjust the ASCII diagrams showing each merge case in vma_merge_existing_range() to be clearer - the arrow range previously showed the vmg->start, end spanned area, but it is clearer to change this to show the final merged VMA. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/cover.1738326519.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/4dfe60f1419d55e5d0516f56349695d73a57184c.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 18 ++--- mm/mmap.c | 2 +- mm/vma.c | 166 +++++++++++++++++++++------------------- mm/vma.h | 42 ++++++++-- tools/testing/vma/vma.c | 52 ++++++------- 5 files changed, 159 insertions(+), 121 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index 8d2acf432385..c9e07651677b 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -261,7 +261,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px state: mm %px pgoff %lx\n" "vmi %px [%lx,%lx)\n" - "prev %px next %px vma %px\n" + "prev %px middle %px next %px target %px\n" "start %lx end %lx flags %lx\n" "file %px anon_vma %px policy %px\n" "uffd_ctx %px\n" @@ -270,7 +270,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, - vmg->prev, vmg->next, vmg->vma, + vmg->prev, vmg->middle, vmg->next, vmg->target, vmg->start, vmg->end, vmg->flags, vmg->file, vmg->anon_vma, vmg->policy, #ifdef CONFIG_USERFAULTFD @@ -288,13 +288,6 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px mm: (NULL)\n", vmg); } - if (vmg->vma) { - pr_warn("vmg %px vma:\n", vmg); - dump_vma(vmg->vma); - } else { - pr_warn("vmg %px vma: (NULL)\n", vmg); - } - if (vmg->prev) { pr_warn("vmg %px prev:\n", vmg); dump_vma(vmg->prev); @@ -302,6 +295,13 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) pr_warn("vmg %px prev: (NULL)\n", vmg); } + if (vmg->middle) { + pr_warn("vmg %px middle:\n", vmg); + dump_vma(vmg->middle); + } else { + pr_warn("vmg %px middle: (NULL)\n", vmg); + } + if (vmg->next) { pr_warn("vmg %px next:\n", vmg); dump_vma(vmg->next); diff --git a/mm/mmap.c b/mm/mmap.c index cda01071c7b1..6401a1d73f4a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1707,7 +1707,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) /* * cover the whole range: [new_start, old_end) */ - vmg.vma = vma; + vmg.middle = vma; if (vma_expand(&vmg)) return -ENOMEM; diff --git a/mm/vma.c b/mm/vma.c index 71ca012c616c..7fca21dee7b3 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -52,7 +52,7 @@ struct mmap_state { .pgoff = (map_)->pgoff, \ .file = (map_)->file, \ .prev = (map_)->prev, \ - .vma = vma_, \ + .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ .merge_flags = VMG_FLAG_DEFAULT, \ @@ -639,7 +639,7 @@ static int commit_merge(struct vma_merge_struct *vmg, { struct vma_prepare vp; - init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2); + init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && vp.anon_vma != adjust->anon_vma); @@ -652,15 +652,15 @@ static int commit_merge(struct vma_merge_struct *vmg, adjust->vm_end); } - if (vma_iter_prealloc(vmg->vmi, vmg->vma)) + if (vma_iter_prealloc(vmg->vmi, vmg->target)) return -ENOMEM; vma_prepare(&vp); - vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start); - vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); + vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); + vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); if (expanded) - vma_iter_store(vmg->vmi, vmg->vma); + vma_iter_store(vmg->vmi, vmg->target); if (adj_start) { adjust->vm_start += adj_start; @@ -671,7 +671,7 @@ static int commit_merge(struct vma_merge_struct *vmg, } } - vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); + vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); return 0; } @@ -694,8 +694,9 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * identical properties. * * This function checks for the existence of any such mergeable VMAs and updates - * the maple tree describing the @vmg->vma->vm_mm address space to account for - * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge. + * the maple tree describing the @vmg->middle->vm_mm address space to account + * for this, as well as any VMAs shrunk/expanded/deleted as a result of this + * merge. * * As part of this operation, if a merge occurs, the @vmg object will have its * vma, start, end, and pgoff fields modified to execute the merge. Subsequent @@ -704,45 +705,47 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * Returns: The merged VMA if merge succeeds, or NULL otherwise. * * ASSUMPTIONS: - * - The caller must assign the VMA to be modifed to @vmg->vma. + * - The caller must assign the VMA to be modifed to @vmg->middle. * - The caller must have set @vmg->prev to the previous VMA, if there is one. * - The caller must not set @vmg->next, as we determine this. * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. - * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end). + * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). */ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next, *res; struct vm_area_struct *anon_dup = NULL; struct vm_area_struct *adjust = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; - bool left_side = vma && start == vma->vm_start; - bool right_side = vma && end == vma->vm_end; + bool left_side = middle && start == middle->vm_start; + bool right_side = middle && end == middle->vm_end; int err = 0; long adj_start = 0; - bool merge_will_delete_vma, merge_will_delete_next; + bool merge_will_delete_middle, merge_will_delete_next; bool merge_left, merge_right, merge_both; bool expanded; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */ + VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); VM_WARN_ON_VMG(start >= end, vmg); /* - * If vma == prev, then we are offset into a VMA. Otherwise, if we are + * If middle == prev, then we are offset into a VMA. Otherwise, if we are * not, we must span a portion of the VMA. */ - VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) || - vmg->end > vma->vm_end), vmg); - /* The vmi must be positioned within vmg->vma. */ - VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && - vma_iter_addr(vmg->vmi) < vma->vm_end), vmg); + VM_WARN_ON_VMG(middle && + ((middle != prev && vmg->start != middle->vm_start) || + vmg->end > middle->vm_end), vmg); + /* The vmi must be positioned within vmg->middle. */ + VM_WARN_ON_VMG(middle && + !(vma_iter_addr(vmg->vmi) >= middle->vm_start && + vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); vmg->state = VMA_MERGE_NOMERGE; @@ -776,13 +779,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ - merge_will_delete_vma = left_side && right_side; + merge_will_delete_middle = left_side && right_side; /* - * If we need to remove vma in its entirety but are unable to do so, + * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ - if (merge_will_delete_vma && !can_merge_remove_vma(vma)) + if (merge_will_delete_middle && !can_merge_remove_vma(middle)) return NULL; /* @@ -793,7 +796,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* * If we cannot delete next, then we can reduce the operation to merging - * prev and vma (thereby deleting vma). + * prev and middle (thereby deleting middle). */ if (merge_will_delete_next && !can_merge_remove_vma(next)) { merge_will_delete_next = false; @@ -801,8 +804,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = false; } - /* No matter what happens, we will be adjusting vma. */ - vma_start_write(vma); + /* No matter what happens, we will be adjusting middle. */ + vma_start_write(middle); if (merge_left) vma_start_write(prev); @@ -812,13 +815,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (merge_both) { /* - * |<----->| - * |-------*********-------| - * prev vma next - * extend delete delete + * |<-------------------->| + * |-------********-------| + * prev middle next + * extend delete delete */ - vmg->vma = prev; + vmg->target = prev; vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; @@ -826,78 +829,79 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* * We already ensured anon_vma compatibility above, so now it's * simply a case of, if prev has no anon_vma object, which of - * next or vma contains the anon_vma we must duplicate. + * next or middle contains the anon_vma we must duplicate. */ - err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup); + err = dup_anon_vma(prev, next->anon_vma ? next : middle, + &anon_dup); } else if (merge_left) { /* - * |<----->| OR - * |<--------->| + * |<------------>| OR + * |<----------------->| * |-------************* - * prev vma + * prev middle * extend shrink/delete */ - vmg->vma = prev; + vmg->target = prev; vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - if (!merge_will_delete_vma) { - adjust = vma; - adj_start = vmg->end - vma->vm_start; + if (!merge_will_delete_middle) { + adjust = middle; + adj_start = vmg->end - middle->vm_start; } - err = dup_anon_vma(prev, vma, &anon_dup); + err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ /* - * |<----->| OR - * |<--------->| + * |<------------->| OR + * |<----------------->| * *************-------| - * vma next + * middle next * shrink/delete extend */ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); VM_WARN_ON_VMG(!merge_right, vmg); - /* If we are offset into a VMA, then prev must be vma. */ - VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg); + /* If we are offset into a VMA, then prev must be middle. */ + VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); - if (merge_will_delete_vma) { - vmg->vma = next; + if (merge_will_delete_middle) { + vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { /* - * We shrink vma and expand next. + * We shrink middle and expand next. * * IMPORTANT: This is the ONLY case where the final - * merged VMA is NOT vmg->vma, but rather vmg->next. + * merged VMA is NOT vmg->target, but rather vmg->next. */ - - vmg->start = vma->vm_start; + vmg->target = middle; + vmg->start = middle->vm_start; vmg->end = start; - vmg->pgoff = vma->vm_pgoff; + vmg->pgoff = middle->vm_pgoff; adjust = next; - adj_start = -(vma->vm_end - start); + adj_start = -(middle->vm_end - start); } - err = dup_anon_vma(next, vma, &anon_dup); + err = dup_anon_vma(next, middle, &anon_dup); } if (err) goto abort; /* - * In nearly all cases, we expand vmg->vma. There is one exception - + * In nearly all cases, we expand vmg->middle. There is one exception - * merge_right where we partially span the VMA. In this case we shrink - * the end of vmg->vma and adjust the start of vmg->next accordingly. + * the end of vmg->middle and adjust the start of vmg->next accordingly. */ - expanded = !merge_right || merge_will_delete_vma; + expanded = !merge_right || merge_will_delete_middle; if (commit_merge(vmg, adjust, - merge_will_delete_vma ? vma : NULL, + merge_will_delete_middle ? middle : NULL, merge_will_delete_next ? next : NULL, adj_start, expanded)) { if (anon_dup) @@ -973,7 +977,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON_VMG(vmg->vma, vmg); + VM_WARN_ON_VMG(vmg->middle, vmg); /* vmi must point at or before the gap. */ VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); @@ -989,13 +993,13 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { vmg->end = next->vm_end; - vmg->vma = next; + vmg->middle = next; } /* If we can merge with the previous VMA, adjust vmg accordingly. */ if (can_merge_left) { vmg->start = prev->vm_start; - vmg->vma = prev; + vmg->middle = prev; vmg->pgoff = prev->vm_pgoff; /* @@ -1017,10 +1021,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * Now try to expand adjacent VMA(s). This takes care of removing the * following VMA if we have VMAs on both sides. */ - if (vmg->vma && !vma_expand(vmg)) { - khugepaged_enter_vma(vmg->vma, vmg->flags); + if (vmg->middle && !vma_expand(vmg)) { + khugepaged_enter_vma(vmg->middle, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return vmg->vma; + return vmg->middle; } return NULL; @@ -1032,44 +1036,46 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) * @vmg: Describes a VMA expansion operation. * * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. - * Will expand over vmg->next if it's different from vmg->vma and vmg->end == - * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with + * Will expand over vmg->next if it's different from vmg->middle and vmg->end == + * vmg->next->vm_end. Checking if the vmg->middle can expand and merge with * vmg->next needs to be handled by the caller. * * Returns: 0 on success. * * ASSUMPTIONS: - * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. - * - The caller must have set @vmg->vma and @vmg->next. + * - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock. + * - The caller must have set @vmg->middle and @vmg->next. */ int vma_expand(struct vma_merge_struct *vmg) { struct vm_area_struct *anon_dup = NULL; bool remove_next = false; - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *next = vmg->next; mmap_assert_write_locked(vmg->mm); - vma_start_write(vma); - if (next && (vma != next) && (vmg->end == next->vm_end)) { + vma_start_write(middle); + if (next && (middle != next) && (vmg->end == next->vm_end)) { int ret; remove_next = true; /* This should already have been checked by this point. */ VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg); vma_start_write(next); - ret = dup_anon_vma(vma, next, &anon_dup); + ret = dup_anon_vma(middle, next, &anon_dup); if (ret) return ret; } /* Not merging but overwriting any part of next is not handled. */ VM_WARN_ON_VMG(next && !remove_next && - next != vma && vmg->end > next->vm_start, vmg); + next != middle && vmg->end > next->vm_start, vmg); /* Only handles expanding */ - VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg); + VM_WARN_ON_VMG(middle->vm_start < vmg->start || + middle->vm_end > vmg->end, vmg); + vmg->target = middle; if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) goto nomem; @@ -1508,7 +1514,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, */ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { - struct vm_area_struct *vma = vmg->vma; + struct vm_area_struct *vma = vmg->middle; unsigned long start = vmg->start; unsigned long end = vmg->end; struct vm_area_struct *merged; @@ -1609,7 +1615,7 @@ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); vmg.next = vma_iter_next_rewind(vmi, NULL); - vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ + vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ return vma_merge_new_range(&vmg); } @@ -1730,7 +1736,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ - vmg.vma = NULL; /* New VMA range. */ + vmg.middle = NULL; /* New VMA range. */ vmg.pgoff = pgoff; vmg.next = vma_iter_next_rewind(&vmi, NULL); new_vma = vma_merge_new_range(&vmg); diff --git a/mm/vma.h b/mm/vma.h index a2e8710b8c47..5b5dd07e478c 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -69,16 +69,48 @@ enum vma_merge_flags { VMG_FLAG_JUST_EXPAND = 1 << 0, }; -/* Represents a VMA merge operation. */ +/* + * Describes a VMA merge operation and is threaded throughout it. + * + * Any of the fields may be mutated by the merge operation, so no guarantees are + * made to the contents of this structure after a merge operation has completed. + */ struct vma_merge_struct { struct mm_struct *mm; struct vma_iterator *vmi; - pgoff_t pgoff; + /* + * Adjacent VMAs, any of which may be NULL if not present: + * + * |------|--------|------| + * | prev | middle | next | + * |------|--------|------| + * + * middle may not yet exist in the case of a proposed new VMA being + * merged, or it may be an existing VMA. + * + * next may be assigned by the caller. + */ struct vm_area_struct *prev; - struct vm_area_struct *next; /* Modified by vma_merge(). */ - struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ + struct vm_area_struct *middle; + struct vm_area_struct *next; + /* + * This is the VMA we ultimately target to become the merged VMA, except + * for the one exception of merge right, shrink next (for details of + * this scenario see vma_merge_existing_range()). + */ + struct vm_area_struct *target; + /* + * Initially, the start, end, pgoff fields are provided by the caller + * and describe the proposed new VMA range, whether modifying an + * existing VMA (which will be 'middle'), or adding a new one. + * + * During the merge process these fields are updated to describe the new + * range _including those VMAs which will be merged_. + */ unsigned long start; unsigned long end; + pgoff_t pgoff; + unsigned long flags; struct file *file; struct anon_vma *anon_vma; @@ -118,8 +150,8 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .mm = vma_->vm_mm, \ .vmi = vmi_, \ .prev = prev_, \ + .middle = vma_, \ .next = NULL, \ - .vma = vma_, \ .start = start_, \ .end = end_, \ .flags = vma_->vm_flags, \ diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 04ab45e27fb8..3c0572120e94 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -147,8 +147,8 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vma_iter_set(vmg->vmi, start); vmg->prev = NULL; + vmg->middle = NULL; vmg->next = NULL; - vmg->vma = NULL; vmg->start = start; vmg->end = end; @@ -338,7 +338,7 @@ static bool test_simple_expand(void) VMA_ITERATOR(vmi, &mm, 0); struct vma_merge_struct vmg = { .vmi = &vmi, - .vma = vma, + .middle = vma, .start = 0, .end = 0x3000, .pgoff = 0, @@ -631,7 +631,7 @@ static bool test_vma_merge_special_flags(void) */ vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); ASSERT_NE(vma, NULL); - vmg.vma = vma; + vmg.middle = vma; for (i = 0; i < ARRAY_SIZE(special_flags); i++) { vm_flags_t special_flag = special_flags[i]; @@ -760,7 +760,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; /* * The VMA being modified in a way that would otherwise merge should @@ -787,7 +787,7 @@ static bool test_vma_merge_with_close(void) vma->vm_ops = &vm_ops; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); /* * Initially this is misapprehended as an out of memory report, as the @@ -817,7 +817,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -843,7 +843,7 @@ static bool test_vma_merge_with_close(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -940,7 +940,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); - vmg.vma = vma; + vmg.middle = vma; vmg.prev = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); @@ -973,7 +973,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); vma_next->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags); - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1003,7 +1003,7 @@ static bool test_merge_existing(void) vma->vm_ops = &vm_ops; /* This should have no impact. */ vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); @@ -1037,7 +1037,7 @@ static bool test_merge_existing(void) vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1067,7 +1067,7 @@ static bool test_merge_existing(void) vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; vma->anon_vma = &dummy_anon_vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1102,37 +1102,37 @@ static bool test_merge_existing(void) vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), NULL); ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); @@ -1197,7 +1197,7 @@ static bool test_anon_vma_non_mergeable(void) vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1277,7 +1277,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 0, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma_next; ASSERT_EQ(expand_existing(&vmg), 0); @@ -1309,7 +1309,7 @@ static bool test_dup_anon_vma(void) vma_next->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1338,7 +1338,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1366,7 +1366,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_prev); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1394,7 +1394,7 @@ static bool test_dup_anon_vma(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma; - vmg.vma = vma; + vmg.middle = vma; ASSERT_EQ(merge_existing(&vmg), vma_next); ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); @@ -1432,7 +1432,7 @@ static bool test_vmi_prealloc_fail(void) vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.vma = vma; + vmg.middle = vma; fail_prealloc = true; @@ -1458,7 +1458,7 @@ static bool test_vmi_prealloc_fail(void) vma->anon_vma = &dummy_anon_vma; vmg_set_range(&vmg, 0, 0x5000, 3, flags); - vmg.vma = vma_prev; + vmg.middle = vma_prev; vmg.next = vma; fail_prealloc = true; From 6ab2d9c7c680c0a041477126722cebe7dc57f713 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:50 +0000 Subject: [PATCH 040/431] mm: further refactor commit_merge() The current VMA merge mechanism contains a number of confusing mechanisms around removal of VMAs on merge and the shrinking of the VMA adjacent to vma->target in the case of merges which result in a partial merge with that adjacent VMA. Since we now have a STABLE set of VMAs - prev, middle, next - we are now able to have the caller of commit_merge() explicitly tell us which VMAs need deleting, using newly introduced internal VMA merge flags. Doing so allows us to embed this state within the VMG and remove the confusing remove, remove2 parameters from commit_merge(). We additionally are able to eliminate the highly confusing and misleading 'expanded' parameter - a parameter that in reality refers to whether or not the return VMA is the target one or the one immediately adjacent. We can infer which is the case from whether or not the adj_start parameter is negative. This also allows us to simplify further logic around iterator configuration and VMA iterator stores. Doing so means we can also eliminate the adjust parameter, as we are able to infer which VMA ought to be adjusted from adj_start - a positive value implies we adjust the start of 'middle', a negative one implies we adjust the start of 'next'. We are then able to have commit_merge() explicitly return the target VMA, or NULL on inability to pre-allocate memory. Errors were previously filtered so behaviour does not change. We additionally move from the slightly odd use of a bitwise-flag enum vmg->merge_flags field to vmg bitfields. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/7bf2ed24af68aac18672b7acebbd9102f48c5b03.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 6 ++- mm/vma.c | 101 +++++++++++++++++++++------------------- mm/vma.h | 38 +++++++++------ tools/testing/vma/vma.c | 9 +++- 4 files changed, 87 insertions(+), 67 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index c9e07651677b..60c6f1134383 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -266,7 +266,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) "file %px anon_vma %px policy %px\n" "uffd_ctx %px\n" "anon_name %px\n" - "merge_flags %x state %x\n", + "state %x\n" + "just_expand %d __remove_middle %d __remove_next %d\n", vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, @@ -279,7 +280,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) (void *)0, #endif vmg->anon_name, - (int)vmg->merge_flags, (int)vmg->state); + (int)vmg->state, + vmg->just_expand, vmg->__remove_middle, vmg->__remove_next); if (vmg->mm) { pr_warn("vmg %px mm:\n", vmg); diff --git a/mm/vma.c b/mm/vma.c index 7fca21dee7b3..dd9fdf5c9429 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -55,7 +55,6 @@ struct mmap_state { .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) @@ -120,8 +119,8 @@ static void init_multi_vma_prep(struct vma_prepare *vp, memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; - vp->remove = remove; - vp->remove2 = remove2; + vp->remove = remove ? remove : remove2; + vp->remove2 = remove ? remove2 : NULL; vp->adj_next = next; if (!vp->anon_vma && next) vp->anon_vma = next->anon_vma; @@ -129,7 +128,6 @@ static void init_multi_vma_prep(struct vma_prepare *vp, vp->file = vma->vm_file; if (vp->file) vp->mapping = vma->vm_file->f_mapping; - } /* @@ -629,22 +627,40 @@ void validate_mm(struct mm_struct *mm) } #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ -/* Actually perform the VMA merge operation. */ -static int commit_merge(struct vma_merge_struct *vmg, - struct vm_area_struct *adjust, - struct vm_area_struct *remove, - struct vm_area_struct *remove2, - long adj_start, - bool expanded) +/* + * Actually perform the VMA merge operation. + * + * On success, returns the merged VMA. Otherwise returns NULL. + */ +static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg, + long adj_start) { struct vma_prepare vp; + struct vm_area_struct *remove = NULL; + struct vm_area_struct *remove2 = NULL; + struct vm_area_struct *adjust = NULL; + /* + * In all cases but that of merge right, shrink next, we write + * vmg->target to the maple tree and return this as the merged VMA. + */ + bool merge_target = adj_start >= 0; + + if (vmg->__remove_middle) + remove = vmg->middle; + if (vmg->__remove_next) + remove2 = vmg->next; + + if (adj_start > 0) + adjust = vmg->middle; + else if (adj_start < 0) + adjust = vmg->next; init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && vp.anon_vma != adjust->anon_vma); - if (expanded) { + if (merge_target) { /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); } else { @@ -653,27 +669,26 @@ static int commit_merge(struct vma_merge_struct *vmg, } if (vma_iter_prealloc(vmg->vmi, vmg->target)) - return -ENOMEM; + return NULL; vma_prepare(&vp); vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); - if (expanded) + if (merge_target) vma_iter_store(vmg->vmi, vmg->target); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); - if (adj_start < 0) { - WARN_ON(expanded); + + if (!merge_target) vma_iter_store(vmg->vmi, adjust); - } } vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); - return 0; + return merge_target ? vmg->target : vmg->next; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -718,16 +733,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next, *res; struct vm_area_struct *anon_dup = NULL; - struct vm_area_struct *adjust = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; bool left_side = middle && start == middle->vm_start; bool right_side = middle && end == middle->vm_end; int err = 0; long adj_start = 0; - bool merge_will_delete_middle, merge_will_delete_next; bool merge_left, merge_right, merge_both; - bool expanded; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ @@ -779,27 +791,27 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ - merge_will_delete_middle = left_side && right_side; + vmg->__remove_middle = left_side && right_side; /* * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ - if (merge_will_delete_middle && !can_merge_remove_vma(middle)) + if (vmg->__remove_middle && !can_merge_remove_vma(middle)) return NULL; /* * If we merge both VMAs, then next is also deleted. This implies * merge_will_delete_vma also. */ - merge_will_delete_next = merge_both; + vmg->__remove_next = merge_both; /* * If we cannot delete next, then we can reduce the operation to merging * prev and middle (thereby deleting middle). */ - if (merge_will_delete_next && !can_merge_remove_vma(next)) { - merge_will_delete_next = false; + if (vmg->__remove_next && !can_merge_remove_vma(next)) { + vmg->__remove_next = false; merge_right = false; merge_both = false; } @@ -846,10 +858,11 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - if (!merge_will_delete_middle) { - adjust = middle; + /* + * We both expand prev and shrink middle. + */ + if (!vmg->__remove_middle) adj_start = vmg->end - middle->vm_start; - } err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ @@ -867,7 +880,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* If we are offset into a VMA, then prev must be middle. */ VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); - if (merge_will_delete_middle) { + if (vmg->__remove_middle) { vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; @@ -883,7 +896,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->end = start; vmg->pgoff = middle->vm_pgoff; - adjust = next; adj_start = -(middle->vm_end - start); } @@ -893,17 +905,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - /* - * In nearly all cases, we expand vmg->middle. There is one exception - - * merge_right where we partially span the VMA. In this case we shrink - * the end of vmg->middle and adjust the start of vmg->next accordingly. - */ - expanded = !merge_right || merge_will_delete_middle; - - if (commit_merge(vmg, adjust, - merge_will_delete_middle ? middle : NULL, - merge_will_delete_next ? next : NULL, - adj_start, expanded)) { + res = commit_merge(vmg, adj_start); + if (!res) { if (anon_dup) unlink_anon_vmas(anon_dup); @@ -911,9 +914,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( return NULL; } - res = merge_left ? prev : next; khugepaged_enter_vma(res, vmg->flags); - vmg->state = VMA_MERGE_SUCCESS; return res; @@ -974,7 +975,6 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) struct vm_area_struct *next = vmg->next; unsigned long end = vmg->end; bool can_merge_left, can_merge_right; - bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(vmg->middle, vmg); @@ -988,7 +988,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) return NULL; can_merge_left = can_vma_merge_left(vmg); - can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left); + can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { @@ -1011,7 +1011,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) vmg->end = end; /* In expand-only case we are already positioned at prev. */ - if (!just_expand) { + if (!vmg->just_expand) { /* Equivalent to going to the previous range. */ vma_prev(vmg->vmi); } @@ -1076,7 +1076,10 @@ int vma_expand(struct vma_merge_struct *vmg) middle->vm_end > vmg->end, vmg); vmg->target = middle; - if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) + if (remove_next) + vmg->__remove_next = true; + + if (!commit_merge(vmg, 0)) goto nomem; return 0; @@ -2593,7 +2596,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, vmg.prev = vma; /* vmi is positioned at prev, which this mode expects. */ - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; if (vma_merge_new_range(&vmg)) goto out; diff --git a/mm/vma.h b/mm/vma.h index 5b5dd07e478c..7935681a2db8 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -58,17 +58,6 @@ enum vma_merge_state { VMA_MERGE_SUCCESS, }; -enum vma_merge_flags { - VMG_FLAG_DEFAULT = 0, - /* - * If we can expand, simply do so. We know there is nothing to merge to - * the right. Does not reset state upon failure to merge. The VMA - * iterator is assumed to be positioned at the previous VMA, rather than - * at the gap. - */ - VMG_FLAG_JUST_EXPAND = 1 << 0, -}; - /* * Describes a VMA merge operation and is threaded throughout it. * @@ -117,8 +106,31 @@ struct vma_merge_struct { struct mempolicy *policy; struct vm_userfaultfd_ctx uffd_ctx; struct anon_vma_name *anon_name; - enum vma_merge_flags merge_flags; enum vma_merge_state state; + + /* Flags which callers can use to modify merge behaviour: */ + + /* + * If we can expand, simply do so. We know there is nothing to merge to + * the right. Does not reset state upon failure to merge. The VMA + * iterator is assumed to be positioned at the previous VMA, rather than + * at the gap. + */ + bool just_expand :1; + + /* Internal flags set during merge process: */ + + /* + * Internal flag used during the merge operation to indicate we will + * remove vmg->middle. + */ + bool __remove_middle :1; + /* + * Internal flag used during the merge operationr to indicate we will + * remove vmg->next. + */ + bool __remove_next :1; + }; static inline bool vmg_nomem(struct vma_merge_struct *vmg) @@ -142,7 +154,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .flags = flags_, \ .pgoff = pgoff_, \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ @@ -162,7 +173,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, .uffd_ctx = vma_->vm_userfaultfd_ctx, \ .anon_name = anon_vma_name(vma_), \ .state = VMA_MERGE_START, \ - .merge_flags = VMG_FLAG_DEFAULT, \ } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 3c0572120e94..7728498b2f7e 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -149,11 +149,16 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vmg->prev = NULL; vmg->middle = NULL; vmg->next = NULL; + vmg->target = NULL; vmg->start = start; vmg->end = end; vmg->pgoff = pgoff; vmg->flags = flags; + + vmg->just_expand = false; + vmg->__remove_middle = false; + vmg->__remove_next = false; } /* @@ -1546,7 +1551,7 @@ static bool test_expand_only_mode(void) /* * Place a VMA prior to the one we're expanding so we assert that we do * not erroneously try to traverse to the previous VMA even though we - * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not + * have, through the use of the just_expand flag, indicated we do not * need to do so. */ alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); @@ -1558,7 +1563,7 @@ static bool test_expand_only_mode(void) vma_iter_set(&vmi, 0x3000); vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); vmg.prev = vma_prev; - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + vmg.just_expand = true; vma = vma_merge_new_range(&vmg); ASSERT_NE(vma, NULL); From fe3e9cf0d7a28d333523189d1405770d980b07d6 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:51 +0000 Subject: [PATCH 041/431] mm: eliminate adj_start parameter from commit_merge() Introduce internal vmg->__adjust_middle_start and vmg->__adjust_next_start merge flags, enabling us to indicate to commit_merge() that we are performing a merge which either spans only part of vmg->middle, or part of vmg->next respectively. In the former instance, we change the start of vmg->middle to match the attributes of vmg->prev, without spanning all of vmg->middle. This implies that vmg->prev->vm_end and vmg->middle->vm_start are both increased to form the new merged VMA (vmg->prev) and the new subsequent VMA (vmg->middle). In the latter case, we change the end of vmg->middle to match the attributes of vmg->next, without spanning all of vmg->next. This implies that vmg->middle->vm_end and vmg->next->vm_start are both decreased to form the new merged VMA (vmg->next) and the new prior VMA (vmg->middle). Since we now have a stable set of prev, middle, next VMAs threaded through vmg and with these flags set know what is happening, we can perform the calculation in commit_merge() instead. This allows us to drop the confusing adj_start parameter and instead pass semantic information to commit_merge(). In the latter case the -(middle->vm_end - start) calculation becomes -(middle->vm-end - vmg->end), however this is correct as vmg->end is set to the start parameter. This is because in this case (rather confusingly), we manipulate vmg->middle, but ultimately return vmg->next, whose range will be correctly specified. At this point vmg->start, end is the new range for the prior VMA rather than the merged one. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/bcec0cd980b373a5eb02236cb033034ce1effe42.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/debug.c | 8 +++++-- mm/vma.c | 50 ++++++++++++++++++++++++----------------- mm/vma.h | 10 +++++++++ tools/testing/vma/vma.c | 2 ++ 4 files changed, 48 insertions(+), 22 deletions(-) diff --git a/mm/debug.c b/mm/debug.c index 60c6f1134383..e1282b85a877 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -267,7 +267,9 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) "uffd_ctx %px\n" "anon_name %px\n" "state %x\n" - "just_expand %d __remove_middle %d __remove_next %d\n", + "just_expand %d\n" + "__adjust_middle_start %d __adjust_next_start %d\n" + "__remove_middle %d __remove_next %d\n", vmg, vmg->mm, vmg->pgoff, vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, vmg->vmi ? vma_iter_end(vmg->vmi) : 0, @@ -281,7 +283,9 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) #endif vmg->anon_name, (int)vmg->state, - vmg->just_expand, vmg->__remove_middle, vmg->__remove_next); + vmg->just_expand, + vmg->__adjust_middle_start, vmg->__adjust_next_start, + vmg->__remove_middle, vmg->__remove_next); if (vmg->mm) { pr_warn("vmg %px mm:\n", vmg); diff --git a/mm/vma.c b/mm/vma.c index dd9fdf5c9429..bac0e72ccb62 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -632,29 +632,44 @@ void validate_mm(struct mm_struct *mm) * * On success, returns the merged VMA. Otherwise returns NULL. */ -static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg, - long adj_start) +static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) { - struct vma_prepare vp; struct vm_area_struct *remove = NULL; struct vm_area_struct *remove2 = NULL; + struct vma_prepare vp; struct vm_area_struct *adjust = NULL; + long adj_start; + bool merge_target; + /* - * In all cases but that of merge right, shrink next, we write - * vmg->target to the maple tree and return this as the merged VMA. + * If modifying an existing VMA and we don't remove vmg->middle, then we + * shrink the adjacent VMA. */ - bool merge_target = adj_start >= 0; + if (vmg->__adjust_middle_start) { + adjust = vmg->middle; + /* The POSITIVE value by which we offset vmg->middle->vm_start. */ + adj_start = vmg->end - vmg->middle->vm_start; + merge_target = true; + } else if (vmg->__adjust_next_start) { + adjust = vmg->next; + /* The NEGATIVE value by which we offset vmg->next->vm_start. */ + adj_start = -(vmg->middle->vm_end - vmg->end); + /* + * In all cases but this - merge right, shrink next - we write + * vmg->target to the maple tree and return this as the merged VMA. + */ + merge_target = false; + } else { + adjust = NULL; + adj_start = 0; + merge_target = true; + } if (vmg->__remove_middle) remove = vmg->middle; if (vmg->__remove_next) remove2 = vmg->next; - if (adj_start > 0) - adjust = vmg->middle; - else if (adj_start < 0) - adjust = vmg->next; - init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && @@ -738,7 +753,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( bool left_side = middle && start == middle->vm_start; bool right_side = middle && end == middle->vm_end; int err = 0; - long adj_start = 0; bool merge_left, merge_right, merge_both; mmap_assert_write_locked(vmg->mm); @@ -858,11 +872,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; - /* - * We both expand prev and shrink middle. - */ if (!vmg->__remove_middle) - adj_start = vmg->end - middle->vm_start; + vmg->__adjust_middle_start = true; err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ @@ -891,12 +902,11 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * IMPORTANT: This is the ONLY case where the final * merged VMA is NOT vmg->target, but rather vmg->next. */ + vmg->__adjust_next_start = true; vmg->target = middle; vmg->start = middle->vm_start; vmg->end = start; vmg->pgoff = middle->vm_pgoff; - - adj_start = -(middle->vm_end - start); } err = dup_anon_vma(next, middle, &anon_dup); @@ -905,7 +915,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - res = commit_merge(vmg, adj_start); + res = commit_merge(vmg); if (!res) { if (anon_dup) unlink_anon_vmas(anon_dup); @@ -1079,7 +1089,7 @@ int vma_expand(struct vma_merge_struct *vmg) if (remove_next) vmg->__remove_next = true; - if (!commit_merge(vmg, 0)) + if (!commit_merge(vmg)) goto nomem; return 0; diff --git a/mm/vma.h b/mm/vma.h index 7935681a2db8..e18487797fa4 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -120,6 +120,16 @@ struct vma_merge_struct { /* Internal flags set during merge process: */ + /* + * Internal flag indicating the merge increases vmg->middle->vm_start + * (and thereby, vmg->prev->vm_end). + */ + bool __adjust_middle_start :1; + /* + * Internal flag indicating the merge decreases vmg->next->vm_start + * (and thereby, vmg->middle->vm_end). + */ + bool __adjust_next_start :1; /* * Internal flag used during the merge operation to indicate we will * remove vmg->middle. diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 7728498b2f7e..c7ffa71841ca 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -159,6 +159,8 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, vmg->just_expand = false; vmg->__remove_middle = false; vmg->__remove_next = false; + vmg->__adjust_middle_start = false; + vmg->__adjust_next_start = false; } /* From 0e5ffe9b2bd6d9ab7bf45f512c016e4710bf6d5d Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:52 +0000 Subject: [PATCH 042/431] mm: make vmg->target consistent and further simplify commit_merge() It is confusing for vmg->target to sometimes be the target merged VMA and in one case not. Fix this by having commit_merge() use its awareness of the vmg->_adjust_next_start case to know that it is manipulating a separate vma, abstracted in the 'vma' local variable. Place removal and adjust VMA determination logic into init_multi_vma_prep(), as the flags give us enough information to do so, and since this is the function that sets up the vma_prepare struct it makes sense to do so here. Doing this significantly simplifies commit_merge(), allowing us to eliminate the 'merge_target' handling, initialise the VMA iterator in a more sensible place and simply return vmg->target consistently. This also allows us to simplify setting vmg->target in vma_merge_existing_range() since we are then left only with two cases - merge left (or both) where the target is vmg->prev or merge right in which the target is vmg->next. This makes it easy for somebody reading the code to know what VMA will actually be the one returned and merged into and removes a great deal of the confusing 'adjust' nonsense. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/50f96e31ab1980eaaf1006e34a4f6e6dad9320b8.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- mm/vma.c | 119 ++++++++++++++++++++++++++++--------------------------- mm/vma.h | 6 +-- 2 files changed, 62 insertions(+), 63 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index bac0e72ccb62..b10625e8fc99 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -106,24 +106,40 @@ static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, * init_multi_vma_prep() - Initializer for struct vma_prepare * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked - * @next: The next vma if it is to be adjusted - * @remove: The first vma to be removed - * @remove2: The second vma to be removed + * @vmg: The merge state that will be used to determine adjustment and VMA + * removal. */ static void init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, - struct vm_area_struct *next, - struct vm_area_struct *remove, - struct vm_area_struct *remove2) + struct vma_merge_struct *vmg) { + struct vm_area_struct *adjust; + struct vm_area_struct **remove = &vp->remove; + memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; - vp->remove = remove ? remove : remove2; - vp->remove2 = remove ? remove2 : NULL; - vp->adj_next = next; - if (!vp->anon_vma && next) - vp->anon_vma = next->anon_vma; + + if (vmg && vmg->__remove_middle) { + *remove = vmg->middle; + remove = &vp->remove2; + } + if (vmg && vmg->__remove_next) + *remove = vmg->next; + + if (vmg && vmg->__adjust_middle_start) + adjust = vmg->middle; + else if (vmg && vmg->__adjust_next_start) + adjust = vmg->next; + else + adjust = NULL; + + vp->adj_next = adjust; + if (!vp->anon_vma && adjust) + vp->anon_vma = adjust->anon_vma; + + VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && + vp->anon_vma != adjust->anon_vma); vp->file = vma->vm_file; if (vp->file) @@ -360,7 +376,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, */ static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) { - init_multi_vma_prep(vp, vma, NULL, NULL, NULL); + init_multi_vma_prep(vp, vma, NULL); } /* @@ -634,76 +650,63 @@ void validate_mm(struct mm_struct *mm) */ static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) { - struct vm_area_struct *remove = NULL; - struct vm_area_struct *remove2 = NULL; + struct vm_area_struct *vma; struct vma_prepare vp; - struct vm_area_struct *adjust = NULL; + struct vm_area_struct *adjust; long adj_start; - bool merge_target; /* * If modifying an existing VMA and we don't remove vmg->middle, then we * shrink the adjacent VMA. */ if (vmg->__adjust_middle_start) { + vma = vmg->target; adjust = vmg->middle; /* The POSITIVE value by which we offset vmg->middle->vm_start. */ adj_start = vmg->end - vmg->middle->vm_start; - merge_target = true; + + /* Note: vma iterator must be pointing to 'start'. */ + vma_iter_config(vmg->vmi, vmg->start, vmg->end); } else if (vmg->__adjust_next_start) { + /* + * In this case alone, the VMA we manipulate is vmg->middle, but + * we ultimately return vmg->next. + */ + vma = vmg->middle; adjust = vmg->next; /* The NEGATIVE value by which we offset vmg->next->vm_start. */ adj_start = -(vmg->middle->vm_end - vmg->end); - /* - * In all cases but this - merge right, shrink next - we write - * vmg->target to the maple tree and return this as the merged VMA. - */ - merge_target = false; + + vma_iter_config(vmg->vmi, vmg->next->vm_start + adj_start, + vmg->next->vm_end); } else { + vma = vmg->target; adjust = NULL; adj_start = 0; - merge_target = true; - } - if (vmg->__remove_middle) - remove = vmg->middle; - if (vmg->__remove_next) - remove2 = vmg->next; - - init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2); - - VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && - vp.anon_vma != adjust->anon_vma); - - if (merge_target) { - /* Note: vma iterator must be pointing to 'start'. */ + /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); - } else { - vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, - adjust->vm_end); } - if (vma_iter_prealloc(vmg->vmi, vmg->target)) + init_multi_vma_prep(&vp, vma, vmg); + + if (vma_iter_prealloc(vmg->vmi, vma)) return NULL; vma_prepare(&vp); - vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start); - vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff); - - if (merge_target) - vma_iter_store(vmg->vmi, vmg->target); + vma_adjust_trans_huge(vma, vmg->start, vmg->end, adj_start); + vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); - - if (!merge_target) - vma_iter_store(vmg->vmi, adjust); } - vma_complete(&vp, vmg->vmi, vmg->target->vm_mm); + vma_iter_store(vmg->vmi, vmg->target); - return merge_target ? vmg->target : vmg->next; + vma_complete(&vp, vmg->vmi, vma->vm_mm); + + return vmg->target; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -833,11 +836,15 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( /* No matter what happens, we will be adjusting middle. */ vma_start_write(middle); - if (merge_left) - vma_start_write(prev); - - if (merge_right) + if (merge_right) { vma_start_write(next); + vmg->target = next; + } + + if (merge_left) { + vma_start_write(prev); + vmg->target = prev; + } if (merge_both) { /* @@ -847,7 +854,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * extend delete delete */ - vmg->target = prev; vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; @@ -868,7 +874,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * extend shrink/delete */ - vmg->target = prev; vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; @@ -892,7 +897,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); if (vmg->__remove_middle) { - vmg->target = next; vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { @@ -903,7 +907,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( * merged VMA is NOT vmg->target, but rather vmg->next. */ vmg->__adjust_next_start = true; - vmg->target = middle; vmg->start = middle->vm_start; vmg->end = start; vmg->pgoff = middle->vm_pgoff; diff --git a/mm/vma.h b/mm/vma.h index e18487797fa4..e55e68abfbe3 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -82,11 +82,7 @@ struct vma_merge_struct { struct vm_area_struct *prev; struct vm_area_struct *middle; struct vm_area_struct *next; - /* - * This is the VMA we ultimately target to become the merged VMA, except - * for the one exception of merge right, shrink next (for details of - * this scenario see vma_merge_existing_range()). - */ + /* This is the VMA we ultimately target to become the merged VMA. */ struct vm_area_struct *target; /* * Initially, the start, end, pgoff fields are provided by the caller From c372473a545edff2fdbc002fc67c181e17c7557b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 31 Jan 2025 12:31:53 +0000 Subject: [PATCH 043/431] mm: completely abstract unnecessary adj_start calculation The adj_start calculation has been a constant source of confusion in the VMA merge code. There are two cases to consider, one where we adjust the start of the vmg->middle VMA (i.e. the vmg->__adjust_middle_start merge flag is set), in which case adj_start is calculated as: (1) adj_start = vmg->end - vmg->middle->vm_start And the case where we adjust the start of the vmg->next VMA (i.e. the vmg->__adjust_next_start merge flag is set), in which case adj_start is calculated as: (2) adj_start = -(vmg->middle->vm_end - vmg->end) We apply (1) thusly: vmg->middle->vm_start = vmg->middle->vm_start + vmg->end - vmg->middle->vm_start Which simplifies to: vmg->middle->vm_start = vmg->end Similarly, we apply (2) as: vmg->next->vm_start = vmg->next->vm_start + -(vmg->middle->vm_end - vmg->end) Noting that for these VMAs to be mergeable vmg->middle->vm_end == vmg->next->vm_start and so this simplifies to: vmg->next->vm_start = vmg->next->vm_start + -(vmg->next->vm_start - vmg->end) Which simplifies to: vmg->next->vm_start = vmg->end Therefore in each case, we simply need to adjust the start of the VMA to vmg->end (!) and can do away with this adj_start calculation. The only caveat is that we must ensure we update the vm_pgoff field correctly. We therefore abstract this entire calculation to a new function vmg_adjust_set_range() which performs this calculation and sets the adjusted VMA's new range using the general vma_set_range() function. We also must update vma_adjust_trans_huge() which expects the now-abstracted adj_start parameter. It turns out this is wholly unnecessary. In vma_adjust_trans_huge() the relevant code is: if (adjust_next > 0) { struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); } The only case where this is relevant is when vmg->__adjust_middle_start is specified (in which case adj_next would have been positive), i.e. the one in which the vma specified is vmg->prev and this the sought 'next' VMA would be vmg->middle. We can therefore eliminate the find_vma() invocation altogether and simply provide the vmg->middle VMA in this instance, or NULL otherwise. Again we have an adj_next offset calculation: next->vm_start + vmg->end - vmg->middle->vm_start Where next == vmg->middle this simplifies to vmg->end as previously demonstrated. Therefore nstart is equal to vmg->end, which is already passed to vma_adjust_trans_huge() via the 'end' parameter and so this code (rather delightfully) simplifies to: if (next) split_huge_pmd_if_needed(next, end); With these changes in place, it becomes silly for commit_merge() to return vmg->target, as it is always the same and threaded through vmg, so we finally change commit_merge() to return an error value once again. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/7bce2cd4b5afb56211822835d145471280c3dccc.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Jann Horn Cc: Liam Howlett Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 4 +- mm/huge_memory.c | 19 ++---- mm/vma.c | 101 +++++++++++++++---------------- tools/testing/vma/vma_internal.h | 4 +- 4 files changed, 58 insertions(+), 70 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 93e509b6c00e..e1bea54820ff 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -404,7 +404,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end); void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, - unsigned long end, long adjust_next); + unsigned long end, struct vm_area_struct *next); spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); @@ -571,7 +571,7 @@ static inline int madvise_collapse(struct vm_area_struct *vma, static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { } static inline int is_swap_pmd(pmd_t pmd) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e33da765c428..e7ac4f0dc21d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3017,9 +3017,9 @@ static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned } void vma_adjust_trans_huge(struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - long adjust_next) + unsigned long start, + unsigned long end, + struct vm_area_struct *next) { /* Check if we need to split start first. */ split_huge_pmd_if_needed(vma, start); @@ -3027,16 +3027,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, /* Check if we need to split end next. */ split_huge_pmd_if_needed(vma, end); - /* - * If we're also updating the next vma vm_start, - * check if we need to split it. - */ - if (adjust_next > 0) { - struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); - unsigned long nstart = next->vm_start; - nstart += adjust_next; - split_huge_pmd_if_needed(next, nstart); - } + /* If we're incrementing next->vm_start, we might need to split it. */ + if (next) + split_huge_pmd_if_needed(next, end); } static void unmap_folio(struct folio *folio) diff --git a/mm/vma.c b/mm/vma.c index b10625e8fc99..603e538a093f 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -513,7 +513,7 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); - vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); + vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); if (new_below) { vma->vm_start = addr; @@ -643,47 +643,45 @@ void validate_mm(struct mm_struct *mm) } #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ +/* + * Based on the vmg flag indicating whether we need to adjust the vm_start field + * for the middle or next VMA, we calculate what the range of the newly adjusted + * VMA ought to be, and set the VMA's range accordingly. + */ +static void vmg_adjust_set_range(struct vma_merge_struct *vmg) +{ + struct vm_area_struct *adjust; + pgoff_t pgoff; + + if (vmg->__adjust_middle_start) { + adjust = vmg->middle; + pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); + } else if (vmg->__adjust_next_start) { + adjust = vmg->next; + pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); + } else { + return; + } + + vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); +} + /* * Actually perform the VMA merge operation. * - * On success, returns the merged VMA. Otherwise returns NULL. + * Returns 0 on success, or an error value on failure. */ -static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) +static int commit_merge(struct vma_merge_struct *vmg) { struct vm_area_struct *vma; struct vma_prepare vp; - struct vm_area_struct *adjust; - long adj_start; - /* - * If modifying an existing VMA and we don't remove vmg->middle, then we - * shrink the adjacent VMA. - */ - if (vmg->__adjust_middle_start) { - vma = vmg->target; - adjust = vmg->middle; - /* The POSITIVE value by which we offset vmg->middle->vm_start. */ - adj_start = vmg->end - vmg->middle->vm_start; - - /* Note: vma iterator must be pointing to 'start'. */ - vma_iter_config(vmg->vmi, vmg->start, vmg->end); - } else if (vmg->__adjust_next_start) { - /* - * In this case alone, the VMA we manipulate is vmg->middle, but - * we ultimately return vmg->next. - */ + if (vmg->__adjust_next_start) { + /* We manipulate middle and adjust next, which is the target. */ vma = vmg->middle; - adjust = vmg->next; - /* The NEGATIVE value by which we offset vmg->next->vm_start. */ - adj_start = -(vmg->middle->vm_end - vmg->end); - - vma_iter_config(vmg->vmi, vmg->next->vm_start + adj_start, - vmg->next->vm_end); + vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); } else { vma = vmg->target; - adjust = NULL; - adj_start = 0; - /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); } @@ -691,22 +689,22 @@ static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg) init_multi_vma_prep(&vp, vma, vmg); if (vma_iter_prealloc(vmg->vmi, vma)) - return NULL; + return -ENOMEM; vma_prepare(&vp); - vma_adjust_trans_huge(vma, vmg->start, vmg->end, adj_start); + /* + * THP pages may need to do additional splits if we increase + * middle->vm_start. + */ + vma_adjust_trans_huge(vma, vmg->start, vmg->end, + vmg->__adjust_middle_start ? vmg->middle : NULL); vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); - - if (adj_start) { - adjust->vm_start += adj_start; - adjust->vm_pgoff += PHYS_PFN(adj_start); - } - + vmg_adjust_set_range(vmg); vma_iter_store(vmg->vmi, vmg->target); vma_complete(&vp, vmg->vmi, vma->vm_mm); - return vmg->target; + return 0; } /* We can only remove VMAs when merging if they do not have a close hook. */ @@ -749,7 +747,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( { struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; - struct vm_area_struct *next, *res; + struct vm_area_struct *next; struct vm_area_struct *anon_dup = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; @@ -900,12 +898,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { - /* - * We shrink middle and expand next. - * - * IMPORTANT: This is the ONLY case where the final - * merged VMA is NOT vmg->target, but rather vmg->next. - */ + /* We shrink middle and expand next. */ vmg->__adjust_next_start = true; vmg->start = middle->vm_start; vmg->end = start; @@ -918,8 +911,10 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( if (err) goto abort; - res = commit_merge(vmg); - if (!res) { + err = commit_merge(vmg); + if (err) { + VM_WARN_ON(err != -ENOMEM); + if (anon_dup) unlink_anon_vmas(anon_dup); @@ -927,9 +922,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( return NULL; } - khugepaged_enter_vma(res, vmg->flags); + khugepaged_enter_vma(vmg->target, vmg->flags); vmg->state = VMA_MERGE_SUCCESS; - return res; + return vmg->target; abort: vma_iter_set(vmg->vmi, start); @@ -1092,7 +1087,7 @@ int vma_expand(struct vma_merge_struct *vmg) if (remove_next) vmg->__remove_next = true; - if (!commit_merge(vmg)) + if (commit_merge(vmg)) goto nomem; return 0; @@ -1132,7 +1127,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, init_vma_prep(&vp, vma); vma_prepare(&vp); - vma_adjust_trans_huge(vma, start, end, 0); + vma_adjust_trans_huge(vma, start, end, NULL); vma_iter_clear(vmi); vma_set_range(vma, start, end, pgoff); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 1eae23039854..bb273927af0f 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -796,12 +796,12 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, - long adjust_next) + struct vm_area_struct *next) { (void)vma; (void)start; (void)end; - (void)adjust_next; + (void)next; } static inline void vma_iter_free(struct vma_iterator *vmi) From 51ff4d7486f0c0b4110a6da4af805b179dd7b11e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:00 -0800 Subject: [PATCH 044/431] mm: avoid extra mem_alloc_profiling_enabled() checks Refactor code to avoid extra mem_alloc_profiling_enabled() checks inside pgalloc_tag_get() function which is often called after that check was already done. Link: https://lkml.kernel.org/r/20250201231803.2661189-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Steven Rostedt Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Vlastimil Babka Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- include/linux/pgalloc_tag.h | 33 ++++++++++++++++++--------------- lib/alloc_tag.c | 6 +++--- mm/page_alloc.c | 3 +-- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 3469c4b20105..4a82b6b4820e 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -205,28 +205,32 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) } } -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) { struct alloc_tag *tag = NULL; + union pgtag_ref_handle handle; + union codetag_ref ref; - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub_check(&ref); - if (ref.ct) - tag = ct_to_alloc_tag(ref.ct); - put_page_tag_ref(handle); - } + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub_check(&ref); + if (ref.ct) + tag = ct_to_alloc_tag(ref.ct); + put_page_tag_ref(handle); } return tag; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) { - if (mem_alloc_profiling_enabled() && tag) + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = __pgalloc_tag_get(page); + if (tag) this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); } @@ -241,8 +245,7 @@ static inline void clear_page_tag_ref(struct page *page) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } -static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 19b45617bdcf..1d893e313614 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) if (!mem_alloc_profiling_enabled()) return; - tag = pgalloc_tag_get(&folio->page); + tag = __pgalloc_tag_get(&folio->page); if (!tag) return; @@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old) if (!mem_alloc_profiling_enabled()) return; - tag_old = pgalloc_tag_get(&old->page); + tag_old = __pgalloc_tag_get(&old->page); if (!tag_old) return; - tag_new = pgalloc_tag_get(&new->page); + tag_new = __pgalloc_tag_get(&new->page); if (!tag_new) return; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 542d25f77be8..38c2b2d20b1d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4833,12 +4833,11 @@ void __free_pages(struct page *page, unsigned int order) { /* get PageHead before we drop reference */ int head = PageHead(page); - struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) free_frozen_pages(page, order); else if (!head) { - pgalloc_tag_sub_pages(tag, (1 << order) - 1); + pgalloc_tag_sub_pages(page, (1 << order) - 1); while (order-- > 0) free_frozen_pages(page + (1 << order), order); } From a642b27b991fd663de1676bf91583d1e2397d93d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:01 -0800 Subject: [PATCH 045/431] alloc_tag: uninline code gated by mem_alloc_profiling_key in slab allocator When a sizable code section is protected by a disabled static key, that code gets into the instruction cache even though it's not executed and consumes the cache, increasing cache misses. This can be remedied by moving such code into a separate uninlined function. On a Pixel6 phone, slab allocation profiling overhead measured with CONFIG_MEM_ALLOC_PROFILING=y and profiling disabled is: baseline modified Big core 3.31% 0.17% Medium core 3.79% 0.57% Little core 6.68% 1.28% This improvement comes at the expense of the configuration when profiling gets enabled, since there is now an additional function call. The overhead from this additional call on Pixel6 is: Big core 0.66% Middle core 1.23% Little core 2.42% However this is negligible when compared with the overall overhead of the memory allocation profiling when it is enabled. On x86 this patch does not make noticeable difference because the overhead with mem_alloc_profiling_key disabled is much lower (under 1%) to start with, so any improvement is less visible and hard to distinguish from the noise. The overhead from additional call when profiling is enabled is also within noise levels. Link: https://lkml.kernel.org/r/20250201231803.2661189-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Steven Rostedt Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- mm/slub.c | 51 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 1f50129dcfb3..184fd2b14758 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2000,7 +2000,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; } -static inline void free_slab_obj_exts(struct slab *slab) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts; @@ -2077,33 +2078,37 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return slab_obj_exts(slab) + obj_to_index(s, slab, p); } -static inline void -alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) { - struct slabobj_ext *obj_exts; + struct slabobj_ext *obj_exts; - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); - /* - * Currently obj_exts is used only for allocation profiling. - * If other users appear then mem_alloc_profiling_enabled() - * check should be added before alloc_tag_add(). - */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); - } + obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + /* + * Currently obj_exts is used only for allocation profiling. + * If other users appear then mem_alloc_profiling_enabled() + * check should be added before alloc_tag_add(). + */ + if (likely(obj_exts)) + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); } static inline void -alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, - int objects) +alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +{ + if (need_slab_obj_ext()) + __alloc_tagging_slab_alloc_hook(s, object, flags); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline void +__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) { struct slabobj_ext *obj_exts; int i; - if (!mem_alloc_profiling_enabled()) - return; - /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) return; @@ -2119,6 +2124,14 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, } } +static inline void +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) +{ + if (mem_alloc_profiling_enabled()) + __alloc_tagging_slab_free_hook(s, slab, p, objects); +} + #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void From 93d5440ece3c0aa341fb02e3a44a1b7ab44304c8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 1 Feb 2025 15:18:02 -0800 Subject: [PATCH 046/431] alloc_tag: uninline code gated by mem_alloc_profiling_key in page allocator When a sizable code section is protected by a disabled static key, that code gets into the instruction cache even though it's not executed and consumes the cache, increasing cache misses. This can be remedied by moving such code into a separate uninlined function. On a Pixel6 phone, page allocation profiling overhead measured with CONFIG_MEM_ALLOC_PROFILING=y and profiling disabled is: baseline modified Big core 4.93% 1.53% Medium core 4.39% 1.41% Little core 1.02% 0.36% This improvement comes at the expense of the configuration when profiling gets enabled, since there is now an additional function call. The overhead from this additional call on Pixel6 is: Big core 0.24% Middle core 0.63% Little core 1.1% However this is negligible when compared with the overall overhead of the memory allocation profiling when it is enabled. On x86 this patch does not make noticeable difference because the overhead with mem_alloc_profiling_key disabled is much lower (under 1%) to start with, so any improvement is less visible and hard to distinguish from the noise. The overhead from additional call when profiling is enabled is also within noise levels. Link: https://lkml.kernel.org/r/20250201231803.2661189-3-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Minchan Kim Cc: Pasha Tatashin Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Steven Rostedt Cc: Vlastimil Babka Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- include/linux/pgalloc_tag.h | 60 +++------------------------- mm/page_alloc.c | 78 +++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 55 deletions(-) diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 4a82b6b4820e..c74077977830 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -162,47 +162,13 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code } } +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page); + static inline void clear_page_tag_ref(struct page *page) { - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - set_codetag_empty(&ref); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} - -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } -} - -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) -{ - if (mem_alloc_profiling_enabled()) { - union pgtag_ref_handle handle; - union codetag_ref ref; - - if (get_page_tag_ref(page, &ref, &handle)) { - alloc_tag_sub(&ref, PAGE_SIZE * nr); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); - } - } + if (mem_alloc_profiling_enabled()) + __clear_page_tag_ref(page); } /* Should be called only if mem_alloc_profiling_enabled() */ @@ -222,18 +188,6 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) return tag; } -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) -{ - struct alloc_tag *tag; - - if (!mem_alloc_profiling_enabled()) - return; - - tag = __pgalloc_tag_get(page); - if (tag) - this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); -} - void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); void pgalloc_tag_swap(struct folio *new, struct folio *old); @@ -242,10 +196,6 @@ void __init alloc_tag_sec_init(void); #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void clear_page_tag_ref(struct page *page) {} -static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, - unsigned int nr) {} -static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 38c2b2d20b1d..d875f055aa53 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1041,6 +1041,84 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); } +#ifdef CONFIG_MEM_ALLOC_PROFILING + +/* Should be called only if mem_alloc_profiling_enabled() */ +void __clear_page_tag_ref(struct page *page) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + set_codetag_empty(&ref); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_add(page, task, nr); +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static noinline +void __pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub(&ref, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) + __pgalloc_tag_sub(page, nr); +} + +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) +{ + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = __pgalloc_tag_get(page); + if (tag) + this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + __always_inline bool free_pages_prepare(struct page *page, unsigned int order) { From 023fff71d893d9aa7814078f24d730afccbab9b9 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 4 Feb 2025 22:53:43 +0000 Subject: [PATCH 047/431] selftests/mm: fix thuge-gen test name uniqueness The thuge-gen test_mmap() and test_shmget() tests are repeatedly run for a variety of sizes but always report the result of their test with the same name, meaning that automated sysetms running the tests are unable to distinguish between the various tests. Add the supplied sizes to the logged test names to distinguish between runs. My test automation was getting pretty confused about what was going on - the test names are a pretty important external interface. Link: https://lkml.kernel.org/r/20250204-kselftest-mm-fix-dups-v1-1-6afe417ef4bb@kernel.org Fixes: b38bd9b2c448 ("selftests/mm: thuge-gen: conform to TAP format output") Signed-off-by: Mark Brown Reviewed-by: Dev Jain Cc: Muhammad Usama Anjum Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/thuge-gen.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c index e4370b79b62f..cd5174d735be 100644 --- a/tools/testing/selftests/mm/thuge-gen.c +++ b/tools/testing/selftests/mm/thuge-gen.c @@ -127,7 +127,7 @@ void test_mmap(unsigned long size, unsigned flags) show(size); ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, - "%s mmap\n", __func__); + "%s mmap %lu\n", __func__, size); if (munmap(map, size * NUM_PAGES)) ksft_exit_fail_msg("%s: unmap %s\n", __func__, strerror(errno)); @@ -165,7 +165,7 @@ void test_shmget(unsigned long size, unsigned flags) show(size); ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, - "%s: mmap\n", __func__); + "%s: mmap %lu\n", __func__, size); if (shmdt(map)) ksft_exit_fail_msg("%s: shmdt: %s\n", __func__, strerror(errno)); } From 4cc39f91ef6c6f876651eb231974a59ffbcb3a21 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Feb 2025 22:15:14 -0800 Subject: [PATCH 048/431] mm/madvise: split out mmap locking operations for madvise() Patch series "mm/madvise: remove redundant mmap_lock operations from process_madvise()". process_madvise() calls do_madvise() for each address range. Then, each do_madvise() invocation holds and releases same mmap_lock. Optimize the redundant lock operations by splitting do_madvise() internal logic including the mmap_lock operations, and calling the small logic directly from process_madvise() in a sequence that removes the redundant locking. As a result of this change, process_madvise() becomes more efficient and less racy in terms of its results and latency. Note that the potential downside of this series is that other mmap_lock holders may take more time due to the increased length of mmap_lock critical section for process_madvise() calls. But there is maximum limit in the kernel space (IOV_MAX), and userspace can control the critical section length by setting the request size. Hence, the downside would be limited and controllable. Evaluation ========== I measured the time to apply MADV_DONTNEED advice to 256 MiB memory using multiple madvise() calls, 4 KiB per each call. I also do the same with process_madvise(), but with varying batch size (vlen) from 1 to 1024. The source code for the measurement is available at GitHub[1]. Because the microbenchmark result is not that stable, I ran each configuration five times and use the average. The measurement results are as below. 'sz_batches' column shows the batch size of process_madvise() calls. '0' batch size is for madvise() calls case. 'before' and 'after' columns are the measured time to apply MADV_DONTNEED to the 256 MiB memory buffer in nanoseconds, on kernels that built without and with the last patch of this series, respectively. So lower value means better efficiency. 'after/before' column is the ratio of 'after' to 'before'. sz_batches before after after/before 0 146294215.2 121280536.2 0.829017989769427 1 165851018.8 136305598.2 0.821855658085351 2 129469321.2 103740383.6 0.801273866569094 4 110369232.4 87835896.2 0.795836795182785 8 102906232.4 77420920.2 0.752344327397609 16 97551017.4 74959714.4 0.768415506038587 32 94809848.2 71200848.4 0.750985786305689 64 96087575.6 72593180 0.755489765942227 128 96154163.8 68517055.4 0.712575022154163 256 92901257.6 69054216.6 0.743307662177439 512 93646170.8 67053296.2 0.716028168874151 1024 92663219.2 70168196.8 0.75723892830177 Despite the unstable nature of the test program, the trend is as we expect. The measurement shows this patchset reduces the process_madvise() latency, proportional to the batching size. The latency gain was about 20% with the batch size 2, and it has increased to about 28% with the batch size 512, since more number of mmap locking is reduced with larger batch size. Note that the standard devitation of the measurements for each sz_batches configuration ranged from 1.9% to 7.2%. That is, this result is not very stable. The average of the standard deviations for different batch sizes were 4.62% and 4.70% for the 'before' and 'after' kernel measurements. Also note that this patch has somehow decreased latencies of madvise() and single batch size process_madvise(). Seems this code path is small enough to significantly be affected by compiler optimizations including inlining of split-out functions. Please focus on only the improvement amount that changed by the batch size. [1] https://github.com/sjp38/eval_proc_madvise This patch (of 4): Split out the madvise behavior-dependent mmap_lock operations from do_madvise(), for easier reuse of the logic in an upcoming change. [lorenzo.stoakes@oracle.com: fix madvise_[un]lock() issue] Link: https://lkml.kernel.org/r/2f448f7b-1da7-4099-aa9e-0179d47fde40@lucifer.local [akpm@linux-foundation.org: coding-style cleanups] Link: https://lkml.kernel.org/r/20250206061517.2958-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250206061517.2958-2-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Shakeel Butt Reviewed-by: Lorenzo Stoakes Reviewed-by: Davidlohr Bueso Reviewed-by: Liam R. Howlett Cc: David Hildenbrand Cc: SeongJae Park Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 62 +++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 08b207f8e61e..fa5dae5a7723 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1574,6 +1574,50 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, madvise_vma_anon_name); } #endif /* CONFIG_ANON_VMA_NAME */ + +#ifdef CONFIG_MEMORY_FAILURE +static bool is_memory_failure(int behavior) +{ + switch (behavior) { + case MADV_HWPOISON: + case MADV_SOFT_OFFLINE: + return true; + default: + return false; + } +} +#else +static bool is_memory_failure(int behavior) +{ + return false; +} +#endif + +static int madvise_lock(struct mm_struct *mm, int behavior) +{ + if (is_memory_failure(behavior)) + return 0; + + if (madvise_need_mmap_write(behavior)) { + if (mmap_write_lock_killable(mm)) + return -EINTR; + } else { + mmap_read_lock(mm); + } + return 0; +} + +static void madvise_unlock(struct mm_struct *mm, int behavior) +{ + if (is_memory_failure(behavior)) + return; + + if (madvise_need_mmap_write(behavior)) + mmap_write_unlock(mm); + else + mmap_read_unlock(mm); +} + /* * The madvise(2) system call. * @@ -1650,7 +1694,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh { unsigned long end; int error; - int write; size_t len; struct blk_plug plug; @@ -1672,19 +1715,15 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh if (end == start) return 0; + error = madvise_lock(mm, behavior); + if (error) + return error; + #ifdef CONFIG_MEMORY_FAILURE if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) return madvise_inject_error(behavior, start, start + len_in); #endif - write = madvise_need_mmap_write(behavior); - if (write) { - if (mmap_write_lock_killable(mm)) - return -EINTR; - } else { - mmap_read_lock(mm); - } - start = untagged_addr_remote(mm, start); end = start + len; @@ -1701,10 +1740,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh } blk_finish_plug(&plug); - if (write) - mmap_write_unlock(mm); - else - mmap_read_unlock(mm); + madvise_unlock(mm, behavior); return error; } From dbb0020bbc2c9f563d68564b36d6e8d32f82008b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Feb 2025 22:15:15 -0800 Subject: [PATCH 049/431] mm/madvise: split out madvise input validity check Split out the madvise parameters validation logic from do_madvise(), for easy reuse of the logic from a future change. Link: https://lkml.kernel.org/r/20250206061517.2958-3-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Shakeel Butt Reviewed-by: Lorenzo Stoakes Reviewed-by: Davidlohr Bueso Reviewed-by: Liam R. Howlett Cc: David Hildenbrand Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index fa5dae5a7723..ca858b8a837b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1618,6 +1618,27 @@ static void madvise_unlock(struct mm_struct *mm, int behavior) mmap_read_unlock(mm); } +static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) +{ + size_t len; + + if (!madvise_behavior_valid(behavior)) + return false; + + if (!PAGE_ALIGNED(start)) + return false; + len = PAGE_ALIGN(len_in); + + /* Check to see whether len was rounded up from small -ve to zero */ + if (len_in && !len) + return false; + + if (start + len < start) + return false; + + return true; +} + /* * The madvise(2) system call. * @@ -1697,20 +1718,11 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh size_t len; struct blk_plug plug; - if (!madvise_behavior_valid(behavior)) + if (!is_valid_madvise(start, len_in, behavior)) return -EINVAL; - if (!PAGE_ALIGNED(start)) - return -EINVAL; len = PAGE_ALIGN(len_in); - - /* Check to see whether len was rounded up from small -ve to zero */ - if (len_in && !len) - return -EINVAL; - end = start + len; - if (end < start) - return -EINVAL; if (end == start) return 0; From 457753da6462024ad821bcb4df2d828cf2ef18be Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Feb 2025 22:15:16 -0800 Subject: [PATCH 050/431] mm/madvise: split out madvise() behavior execution Split out the madvise behavior applying logic from do_madvise() to make it easier to reuse from the following change. Link: https://lkml.kernel.org/r/20250206061517.2958-4-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Shakeel Butt Reviewed-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: David Hildenbrand Cc: Davidlohr Bueso Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 53 +++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index ca858b8a837b..6e31e3202d71 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1639,6 +1639,35 @@ static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) return true; } +static int madvise_do_behavior(struct mm_struct *mm, + unsigned long start, size_t len_in, size_t len, int behavior) +{ + struct blk_plug plug; + unsigned long end; + int error; + +#ifdef CONFIG_MEMORY_FAILURE + if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) + return madvise_inject_error(behavior, start, start + len_in); +#endif + start = untagged_addr_remote(mm, start); + end = start + len; + + blk_start_plug(&plug); + switch (behavior) { + case MADV_POPULATE_READ: + case MADV_POPULATE_WRITE: + error = madvise_populate(mm, start, end, behavior); + break; + default: + error = madvise_walk_vmas(mm, start, end, behavior, + madvise_vma_behavior); + break; + } + blk_finish_plug(&plug); + return error; +} + /* * The madvise(2) system call. * @@ -1716,7 +1745,6 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh unsigned long end; int error; size_t len; - struct blk_plug plug; if (!is_valid_madvise(start, len_in, behavior)) return -EINVAL; @@ -1730,28 +1758,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh error = madvise_lock(mm, behavior); if (error) return error; - -#ifdef CONFIG_MEMORY_FAILURE - if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) - return madvise_inject_error(behavior, start, start + len_in); -#endif - - start = untagged_addr_remote(mm, start); - end = start + len; - - blk_start_plug(&plug); - switch (behavior) { - case MADV_POPULATE_READ: - case MADV_POPULATE_WRITE: - error = madvise_populate(mm, start, end, behavior); - break; - default: - error = madvise_walk_vmas(mm, start, end, behavior, - madvise_vma_behavior); - break; - } - blk_finish_plug(&plug); - + error = madvise_do_behavior(mm, start, len_in, len, behavior); madvise_unlock(mm, behavior); return error; From 4000e3d0a367c5ff2035a0394b01b93974be6cb1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Feb 2025 22:15:17 -0800 Subject: [PATCH 051/431] mm/madvise: remove redundant mmap_lock operations from process_madvise() Optimize redundant mmap lock operations from process_madvise() by directly doing the mmap locking first, and then the remaining works for all ranges in the loop. [akpm@linux-foundation.org: update comment, per Lorenzo] Link: https://lkml.kernel.org/r/20250206061517.2958-5-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Shakeel Butt Reviewed-by: Liam R. Howlett Reviewed-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Davidlohr Bueso Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 6e31e3202d71..6ecead476a80 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1778,16 +1778,33 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, total_len = iov_iter_count(iter); + ret = madvise_lock(mm, behavior); + if (ret) + return ret; + while (iov_iter_count(iter)) { - ret = do_madvise(mm, (unsigned long)iter_iov_addr(iter), - iter_iov_len(iter), behavior); + unsigned long start = (unsigned long)iter_iov_addr(iter); + size_t len_in = iter_iov_len(iter); + size_t len; + + if (!is_valid_madvise(start, len_in, behavior)) { + ret = -EINVAL; + break; + } + + len = PAGE_ALIGN(len_in); + if (start + len == start) + ret = 0; + else + ret = madvise_do_behavior(mm, start, len_in, len, + behavior); /* * An madvise operation is attempting to restart the syscall, * but we cannot proceed as it would not be correct to repeat * the operation in aggregate, and would be surprising to the * user. * - * As we have already dropped locks, it is safe to just loop and + * We drop and reacquire locks so it is safe to just loop and * try again. We check for fatal signals in case we need exit * early anyway. */ @@ -1796,12 +1813,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, ret = -EINTR; break; } + + /* Drop and reacquire lock to unwind race. */ + madvise_unlock(mm, behavior); + madvise_lock(mm, behavior); continue; } if (ret < 0) break; iov_iter_advance(iter, iter_iov_len(iter)); } + madvise_unlock(mm, behavior); ret = (total_len - iov_iter_count(iter)) ? : ret; From 33c9b01ed2fcbc101cdfeb497f4581e981e7c1e7 Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Thu, 6 Feb 2025 14:09:58 +0800 Subject: [PATCH 052/431] mm/memfd: fix spelling and grammatical issues The comment "If a private mapping then writability is irrelevant" contains a typo. It should be "If a private mapping then writability is irrelevant". The comment "SEAL_EXEC implys SEAL_WRITE, making W^X from the start." contains a typo. It should be "SEAL_EXEC implies SEAL_WRITE, making W^X from the start." Link: https://lkml.kernel.org/r/20250206060958.98010-1-liuye@kylinos.cn Signed-off-by: Liu Ye Signed-off-by: Andrew Morton --- mm/memfd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memfd.c b/mm/memfd.c index 37f7be57c2f5..c64df1343059 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -259,7 +259,7 @@ static int memfd_add_seals(struct file *file, unsigned int seals) } /* - * SEAL_EXEC implys SEAL_WRITE, making W^X from the start. + * SEAL_EXEC implies SEAL_WRITE, making W^X from the start. */ if (seals & F_SEAL_EXEC && inode->i_mode & 0111) seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE; @@ -337,7 +337,7 @@ static int check_write_seal(unsigned long *vm_flags_ptr) unsigned long vm_flags = *vm_flags_ptr; unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE); - /* If a private matting then writability is irrelevant. */ + /* If a private mapping then writability is irrelevant. */ if (!(mask & VM_SHARED)) return 0; From 81fe88a946051f1dceef72fb3f87bb0880392464 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:10 +0800 Subject: [PATCH 053/431] mm/swap_state.c: fix the obsolete code comment Patch series "Tiny cleanup and improvements about SWAP code". These are all made during review and from reading the patchset "[PATCH v3 00/13] mm, swap: rework of swap allocator locks" from Kairui. This patch (of 12): Since commit 85a1333417a7 ("mm/swap: use dedicated entry for swap in folio"), there's a dedicated field in folio for swap entry. Let's update the code comment above add_to_swap_cache() accordingly. Link: https://lkml.kernel.org/r/20250205092721.9395-1-bhe@redhat.com Link: https://lkml.kernel.org/r/20250205092721.9395-2-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Kairui Song Cc: Baoquan he Cc: Chris Li (Google) Signed-off-by: Andrew Morton --- mm/swap_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index 2e1acb210e57..aabde86d1f47 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -85,7 +85,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry) /* * add_to_swap_cache resembles filemap_add_folio on swapper_space, - * but sets SwapCache flag and private instead of mapping and index. + * but sets SwapCache flag and 'swap' instead of mapping and index. */ int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp) From cd57a3fb37f91ef6106bc970d2b5c5878d3b5627 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:11 +0800 Subject: [PATCH 054/431] mm/swap_state.c: optimize the code in clear_shadow_from_swap_cache() Use ALIGN to achieve the same effect and simplify the code. Link: https://lkml.kernel.org/r/20250205092721.9395-3-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swap_state.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index aabde86d1f47..8a84371980e9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -270,9 +270,7 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, xa_unlock_irq(&address_space->i_pages); /* search the next swapcache until we meet end */ - curr >>= SWAP_ADDRESS_SPACE_SHIFT; - curr++; - curr <<= SWAP_ADDRESS_SPACE_SHIFT; + curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES); if (curr > end) break; } From 0eb7d2c337f9df3b6adbcbdce213595d94781e05 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:12 +0800 Subject: [PATCH 055/431] mm/swap: remove SWAP_FLAG_PRIO_SHIFT It doesn't make sense to have a zero value of shift. Remove it to avoid confusion. Link: https://lkml.kernel.org/r/20250205092721.9395-4-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 - mm/swapfile.c | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 9a48e79a0a52..bbd06cbd1f2b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -24,7 +24,6 @@ struct pagevec; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff -#define SWAP_FLAG_PRIO_SHIFT 0 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ diff --git a/mm/swapfile.c b/mm/swapfile.c index df7c4e8b089c..3a17e40f4c95 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3453,8 +3453,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) mutex_lock(&swapon_mutex); prio = -1; if (swap_flags & SWAP_FLAG_PREFER) - prio = - (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; + prio = swap_flags & SWAP_FLAG_PRIO_MASK; enable_swap_info(si, prio, swap_map, cluster_info, zeromap); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", From 9b9cba7289ba7e5076fbfe913860306b4672631c Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:13 +0800 Subject: [PATCH 056/431] mm/swap: skip scanning cluster range if it's empty cluster Since ci->lock has been taken when isolating cluster from si->free_clusters or taking si->percpu_cluster->next[order], it's unnecessary to scan and check the cluster range availability if i'ts empty cluster, and this can accelerate the huge page swapping. Link: https://lkml.kernel.org/r/20250205092721.9395-5-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/swapfile.c b/mm/swapfile.c index 3a17e40f4c95..0a0c4118759e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -730,6 +730,9 @@ static bool cluster_scan_range(struct swap_info_struct *si, unsigned long offset, end = start + nr_pages; unsigned char *map = si->swap_map; + if (cluster_is_empty(ci)) + return true; + for (offset = start; offset < end; offset++) { switch (READ_ONCE(map[offset])) { case 0: From b4735d94c29f6a65362d3cce0d55aa65e0e319e3 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:14 +0800 Subject: [PATCH 057/431] mm/swap: rename swap_is_has_cache() to swap_only_has_cache() There are two predicates in the name of swap_is_has_cache() which is confusing. Renaming it to remove the confusion and can better reflect its functionality. Link: https://lkml.kernel.org/r/20250205092721.9395-6-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 0a0c4118759e..0a9c1efeffd6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -161,7 +161,7 @@ static long swap_usage_in_pages(struct swap_info_struct *si) /* Reclaim directly, bypass the slot cache and don't touch device lock */ #define TTRS_DIRECT 0x8 -static bool swap_is_has_cache(struct swap_info_struct *si, +static bool swap_only_has_cache(struct swap_info_struct *si, unsigned long offset, int nr_pages) { unsigned char *map = si->swap_map + offset; @@ -243,7 +243,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, * reference or pending writeback, and can't be allocated to others. */ ci = lock_cluster(si, offset); - need_reclaim = swap_is_has_cache(si, offset, nr_pages); + need_reclaim = swap_only_has_cache(si, offset, nr_pages); unlock_cluster(ci); if (!need_reclaim) goto out_unlock; @@ -1577,7 +1577,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) return; ci = lock_cluster(si, offset); - if (swap_is_has_cache(si, offset, size)) + if (swap_only_has_cache(si, offset, size)) swap_entry_range_free(si, ci, entry, size); else { for (int i = 0; i < size; i++, entry.val++) { From f80ddc148ca6505060fbef6f8365ca151103e16f Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:15 +0800 Subject: [PATCH 058/431] mm/swapfile.c: update the code comment above swap_count_continued() Now, swap_count_continued() has two callers, __swap_duplicate() and __swap_entry_free_locked(), the relevant code comment is stale. Update it to reflect the current situation. [bhe@redhat.com: v2] Link: https://lkml.kernel.org/r/Z6V0/UvG1fvkQ4t/@fedora Link: https://lkml.kernel.org/r/20250205092721.9395-7-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 0a9c1efeffd6..08debaacbbc0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3797,8 +3797,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) * into, carry if so, or else fail until a new continuation page is allocated; * when the original swap_map count is decremented from 0 with continuation, * borrow from the continuation and report whether it still holds more. - * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster - * lock. + * Called while __swap_duplicate() or caller of __swap_entry_free_locked() + * holds cluster lock. */ static bool swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) From a46a6bc21c223b852ff324d7c7ef3da868b90fd0 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:16 +0800 Subject: [PATCH 059/431] mm/swapfile.c: optimize code in setup_clusters() In the last 'for' loop inside setup_clusters(), using two local variable 'k' and 'j' are obvisouly redundant. Using 'j' is enough and simpler. And also move macro SWAP_CLUSTER_COLS close to its only user setup_clusters(). Link: https://lkml.kernel.org/r/20250205092721.9395-8-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 08debaacbbc0..cbee03aa74b8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3127,13 +3127,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si, return maxpages; } -#define SWAP_CLUSTER_INFO_COLS \ - DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) -#define SWAP_CLUSTER_SPACE_COLS \ - DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) -#define SWAP_CLUSTER_COLS \ - max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) - static int setup_swap_map_and_extents(struct swap_info_struct *si, union swap_header *swap_header, unsigned char *swap_map, @@ -3173,13 +3166,20 @@ static int setup_swap_map_and_extents(struct swap_info_struct *si, return nr_extents; } +#define SWAP_CLUSTER_INFO_COLS \ + DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) +#define SWAP_CLUSTER_SPACE_COLS \ + DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) +#define SWAP_CLUSTER_COLS \ + max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) + static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, union swap_header *swap_header, unsigned long maxpages) { unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); struct swap_cluster_info *cluster_info; - unsigned long i, j, k, idx; + unsigned long i, j, idx; int cpu, err = -ENOMEM; cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); @@ -3240,8 +3240,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, * Reduce false cache line sharing between cluster_info and * sharing same address space. */ - for (k = 0; k < SWAP_CLUSTER_COLS; k++) { - j = k % SWAP_CLUSTER_COLS; + for (j = 0; j < SWAP_CLUSTER_COLS; j++) { for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; From e89c45c700e7d9ab4913ec98f1fe9f23d46c1397 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:17 +0800 Subject: [PATCH 060/431] mm/swap_state.c: remove the meaningless code comment Since commit 8d93b41c09d1 ("mm: Convert add_to_swap_cache to XArray"), there's no returned _EEXIT, so the code comment doesn't make sense any more. Link: https://lkml.kernel.org/r/20250205092721.9395-9-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swap_state.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index 8a84371980e9..718a8de0c07d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -203,10 +203,6 @@ bool add_to_swap(struct folio *folio) err = add_to_swap_cache(folio, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); if (err) - /* - * add_to_swap_cache() doesn't return -EEXIST, so we can safely - * clear SWAP_HAS_CACHE flag. - */ goto fail; /* * Normally the folio will be dirtied in unmap because its From ac2d3268284ba4e254e4ca346bf6d63fb8a17518 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:18 +0800 Subject: [PATCH 061/431] mm/swapfile.c: remove the unneeded checking In free_swap_and_cache_nr(), invocation of get_swap_device() has done the checking if it's a swap entry. So remove the redundant checking here. Link: https://lkml.kernel.org/r/20250205092721.9395-10-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index cbee03aa74b8..45d25f170660 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1789,9 +1789,6 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr) bool any_only_cache = false; unsigned long offset; - if (non_swap_entry(entry)) - return; - si = get_swap_device(entry); if (!si) return; From c523aa890760b004eea47a0e00b5750a528f3ced Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:19 +0800 Subject: [PATCH 062/431] mm/swap: rename swap_swapcount() to swap_entry_swapped() The new function name can reflect the real behaviour of the function more clearly and more accurately. And the renaming avoids the confusion between swap_swapcount() and swp_swapcount(). Link: https://lkml.kernel.org/r/20250205092721.9395-11-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- include/linux/swap.h | 6 +++--- mm/swap_state.c | 2 +- mm/swapfile.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index bbd06cbd1f2b..2fe91c293636 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -499,7 +499,7 @@ int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); -extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); +extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); struct swap_info_struct *swp_swap_info(swp_entry_t entry); struct backing_dev_info; @@ -582,9 +582,9 @@ static inline int __swap_count(swp_entry_t entry) return 0; } -static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { - return 0; + return false; } static inline int swp_swapcount(swp_entry_t entry) diff --git a/mm/swap_state.c b/mm/swap_state.c index 718a8de0c07d..a54b035d6a6c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -457,7 +457,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * as SWAP_HAS_CACHE. That's done in later part of code or * else swap_off will be aborted if we return NULL. */ - if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) + if (!swap_entry_swapped(si, entry) && swap_slot_cache_enabled) goto put_and_return; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 45d25f170660..d45349ed3ee1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1620,7 +1620,7 @@ int __swap_count(swp_entry_t entry) * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ -int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; @@ -1629,7 +1629,7 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); unlock_cluster(ci); - return count; + return !!count; } /* @@ -1715,7 +1715,7 @@ static bool folio_swapped(struct folio *folio) return false; if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) - return swap_swapcount(si, entry) != 0; + return swap_entry_swapped(si, entry); return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } From 4ccd4154fafff5516103051bc9162c33d832b880 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:20 +0800 Subject: [PATCH 063/431] mm/swapfile.c: remove the incorrect code comment Since commit eb085574a752 ("mm, swap: fix race between swapoff and some swap operations"), the non_swap_entry() checking has been taken off from function __swap_duplicate(). Hence, in the kernel-doc comment, the line 'swp_entry is migration entry -> EINVAL' is obsolete. Remove that line to avoid misleading people. Link: https://lkml.kernel.org/r/20250205092721.9395-12-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index d45349ed3ee1..c73e258bb588 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3529,7 +3529,6 @@ void si_swapinfo(struct sysinfo *val) * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL - * - swp_entry is migration entry -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM From 1d212293ffd145eebd795d5969a81a3b59f71bcb Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 5 Feb 2025 17:27:21 +0800 Subject: [PATCH 064/431] mm/swapfile.c: open code cluster_alloc_swap() It's only called in scan_swap_map_slots(). And also remove the stale code comment in scan_swap_map_slots() because it's not fit for the current cluster allocation mechanism. Link: https://lkml.kernel.org/r/20250205092721.9395-13-bhe@redhat.com Signed-off-by: Baoquan He Cc: Chris Li Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 38 ++++++++++---------------------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index c73e258bb588..cab68e57f4cc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1163,39 +1163,13 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_usage_sub(si, nr_entries); } -static int cluster_alloc_swap(struct swap_info_struct *si, - unsigned char usage, int nr, - swp_entry_t slots[], int order) -{ - int n_ret = 0; - - while (n_ret < nr) { - unsigned long offset = cluster_alloc_swap_entry(si, order, usage); - - if (!offset) - break; - slots[n_ret++] = swp_entry(si->type, offset); - } - - return n_ret; -} - static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[], int order) { unsigned int nr_pages = 1 << order; + int n_ret = 0; - /* - * We try to cluster swap pages by allocating them sequentially - * in swap. Once we've allocated SWAPFILE_CLUSTER pages this - * way, however, we resort to first-free allocation, starting - * a new cluster. This prevents us from scattering swap pages - * all over the entire swap partition, so that we reduce - * overall disk seek times between swap pages. -- sct - * But we do now try to find an empty cluster. -Andrea - * And we let swap pages go all over an SSD partition. Hugh - */ if (order > 0) { /* * Should not even be attempting large allocations when huge @@ -1215,7 +1189,15 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return 0; } - return cluster_alloc_swap(si, usage, nr, slots, order); + while (n_ret < nr) { + unsigned long offset = cluster_alloc_swap_entry(si, order, usage); + + if (!offset) + break; + slots[n_ret++] = swp_entry(si->type, offset); + } + + return n_ret; } static bool get_swap_device_info(struct swap_info_struct *si) From 9a5b183941b52f84c0f9e5f27ce44e99318c9e0f Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 6 Feb 2025 13:26:33 +0100 Subject: [PATCH 065/431] mm, percpu: do not consider sleepable allocations atomic 28307d938fb2 ("percpu: make pcpu_alloc() aware of current gfp context") has fixed a reclaim recursion for scoped GFP_NOFS context. It has done that by avoiding taking pcpu_alloc_mutex. This is a correct solution as the worker context with full GFP_KERNEL allocation/reclaim power and which is using the same lock cannot block the NOFS pcpu_alloc caller. On the other hand this is a very conservative approach that could lead to failures because pcpu_alloc lockless implementation is quite limited. We have a bug report about premature failures when scsi array of 193 devices is scanned. Sometimes (not consistently) the scanning aborts because the iscsid daemon fails to create the queue for a random scsi device during the scan. iscsid itslef is running with PR_SET_IO_FLUSHER set so all allocations from this process context are GFP_NOIO. This in turn makes any pcpu_alloc lockless (without pcpu_alloc_mutex) which leads to pre-mature failures. It has turned out that iscsid has worked around this by dropping PR_SET_IO_FLUSHER (https://github.com/open-iscsi/open-iscsi/pull/382) when scanning host. But we can do better in this case on the kernel side and use pcpu_alloc_mutex for NOIO resp. NOFS constrained allocation scopes too. We just need the WQ worker to never trigger IO/FS reclaim. Achieve that by enforcing scoped GFP_NOIO for the whole execution of pcpu_balance_workfn (this will imply NOFS constrain as well). This will remove the dependency chain and preserve the full allocation power of the pcpu_alloc call. While at it make is_atomic really test for blockable allocations. Link: https://lkml.kernel.org/r/20250206122633.167896-1-mhocko@kernel.org Fixes: 28307d938fb2 ("percpu: make pcpu_alloc() aware of current gfp context") Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Dennis Zhou Cc: Filipe David Manana Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/percpu.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/percpu.c b/mm/percpu.c index ac61e3fc5f15..027fb6497495 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1745,7 +1745,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp = current_gfp_context(gfp); /* whitelisted flags that can be passed to the backing allocators */ pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; + is_atomic = !gfpflags_allow_blocking(gfp); do_warn = !(gfp & __GFP_NOWARN); /* @@ -2191,7 +2191,12 @@ static void pcpu_balance_workfn(struct work_struct *work) * to grow other chunks. This then gives pcpu_reclaim_populated() time * to move fully free chunks to the active list to be freed if * appropriate. + * + * Enforce GFP_NOIO allocations because we have pcpu_alloc users + * constrained to GFP_NOIO/NOFS contexts and they could form lock + * dependency through pcpu_alloc_mutex */ + unsigned int flags = memalloc_noio_save(); mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); @@ -2202,6 +2207,7 @@ static void pcpu_balance_workfn(struct work_struct *work) spin_unlock_irq(&pcpu_lock); mutex_unlock(&pcpu_alloc_mutex); + memalloc_noio_restore(flags); } /** From 7ddeb91f5b03f25995861e9b2e1eb766aa528892 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 6 Feb 2025 11:45:36 +0000 Subject: [PATCH 066/431] mm: kmemleak: add support for dumping physical and __percpu object info Patch series "mm: kmemleak: Usability improvements". Following a recent false positive tracking that led to commit 488b5b9eca68 ("mm: kmemleak: fix upper boundary check for physical address objects"), I needed kmemleak to give me more debug information about the objects it is tracking. This lead to the first patch of this series. The second patch changes the kmemleak-test module to show the raw pointers for debugging purposes. This patch (of 2): Currently, echo dump=... > /sys/kernel/debug/kmemleak only looks up the main virtual address object tree. However, for debugging, it's useful to dump information about physical address and __percpu objects. Search all three object trees for the dump= command and also print the type of the object if not virtual: "(phys)" or "(percpu)". In addition, allow search by alias (pointer within the object). Link: https://lkml.kernel.org/r/20250206114537.2597764-1-catalin.marinas@arm.com Link: https://lkml.kernel.org/r/20250206114537.2597764-2-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Signed-off-by: Andrew Morton --- mm/kmemleak.c | 54 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index c6ed68604136..c12cef3eeb32 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -352,6 +352,15 @@ static bool unreferenced_object(struct kmemleak_object *object) jiffies_last_scan); } +static const char *__object_type_str(struct kmemleak_object *object) +{ + if (object->flags & OBJECT_PHYS) + return " (phys)"; + if (object->flags & OBJECT_PERCPU) + return " (percpu)"; + return ""; +} + /* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. @@ -364,8 +373,9 @@ static void print_unreferenced(struct seq_file *seq, unsigned int nr_entries; nr_entries = stack_depot_fetch(object->trace_handle, &entries); - warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", - object->pointer, object->size); + warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n", + __object_type_str(object), + object->pointer, object->size); warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); hex_dump_object(seq, object); @@ -384,10 +394,10 @@ static void print_unreferenced(struct seq_file *seq, */ static void dump_object_info(struct kmemleak_object *object) { - pr_notice("Object 0x%08lx (size %zu):\n", - object->pointer, object->size); + pr_notice("Object%s 0x%08lx (size %zu):\n", + __object_type_str(object), object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", - object->comm, object->pid, object->jiffies); + object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%x\n", object->flags); @@ -1998,25 +2008,41 @@ static int kmemleak_open(struct inode *inode, struct file *file) return seq_open(file, &kmemleak_seq_ops); } -static int dump_str_object_info(const char *str) +static bool __dump_str_object_info(unsigned long addr, unsigned int objflags) { unsigned long flags; struct kmemleak_object *object; - unsigned long addr; - if (kstrtoul(str, 0, &addr)) - return -EINVAL; - object = find_and_get_object(addr, 0); - if (!object) { - pr_info("Unknown object at 0x%08lx\n", addr); - return -EINVAL; - } + object = __find_and_get_object(addr, 1, objflags); + if (!object) + return false; raw_spin_lock_irqsave(&object->lock, flags); dump_object_info(object); raw_spin_unlock_irqrestore(&object->lock, flags); put_object(object); + + return true; +} + +static int dump_str_object_info(const char *str) +{ + unsigned long addr; + bool found = false; + + if (kstrtoul(str, 0, &addr)) + return -EINVAL; + + found |= __dump_str_object_info(addr, 0); + found |= __dump_str_object_info(addr, OBJECT_PHYS); + found |= __dump_str_object_info(addr, OBJECT_PERCPU); + + if (!found) { + pr_info("Unknown object at 0x%08lx\n", addr); + return -EINVAL; + } + return 0; } From fe1136b4ccbfac9b8e72d4551d1ce788a67d59cb Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 6 Feb 2025 11:45:37 +0000 Subject: [PATCH 067/431] samples: kmemleak: print the raw pointers for debugging purposes The kmemleak-test.c module is meant to leak some pointers for debugging the kmemleak detection, pointer information dumping. It's no use if it prints the hashed values of such pointers. Change the printk() format from %p to %px. While at it, also display the raw __percpu pointer rather than this_cpu_ptr() since kmemleak now tracks such pointers independently of the standard allocations. Link: https://lkml.kernel.org/r/20250206114537.2597764-3-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Signed-off-by: Andrew Morton --- samples/kmemleak/kmemleak-test.c | 36 ++++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/samples/kmemleak/kmemleak-test.c b/samples/kmemleak/kmemleak-test.c index 544c36d51d56..8609812a37eb 100644 --- a/samples/kmemleak/kmemleak-test.c +++ b/samples/kmemleak/kmemleak-test.c @@ -40,25 +40,25 @@ static int kmemleak_test_init(void) pr_info("Kmemleak testing\n"); /* make some orphan objects */ - pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); - pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); - pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); - pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); - pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); - pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); - pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); - pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); + pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL)); + pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL)); #ifndef CONFIG_MODULES - pr_info("kmem_cache_alloc(files_cachep) = %p\n", + pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); - pr_info("kmem_cache_alloc(files_cachep) = %p\n", + pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); #endif - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); - pr_info("vmalloc(64) = %p\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); + pr_info("vmalloc(64) = 0x%px\n", vmalloc(64)); /* * Add elements to a list. They should only appear as orphan @@ -66,7 +66,7 @@ static int kmemleak_test_init(void) */ for (i = 0; i < 10; i++) { elem = kzalloc(sizeof(*elem), GFP_KERNEL); - pr_info("kzalloc(sizeof(*elem)) = %p\n", elem); + pr_info("kzalloc(sizeof(*elem)) = 0x%px\n", elem); if (!elem) return -ENOMEM; INIT_LIST_HEAD(&elem->list); @@ -75,11 +75,11 @@ static int kmemleak_test_init(void) for_each_possible_cpu(i) { per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); - pr_info("kmalloc(129) = %p\n", + pr_info("kmalloc(129) = 0x%px\n", per_cpu(kmemleak_test_pointer, i)); } - pr_info("__alloc_percpu(64, 4) = %p\n", __alloc_percpu(64, 4)); + pr_info("__alloc_percpu(64, 4) = 0x%px\n", __alloc_percpu(64, 4)); return 0; } From 3a06696305e757f652dd0dcf4dfa2272eda39434 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Fri, 7 Feb 2025 13:20:32 -0800 Subject: [PATCH 068/431] mm/damon/ops: have damon_get_folio return folio even for tail pages Patch series "mm/damon/paddr: fix large folios access and schemes handling". DAMON operations set for physical address space, namely 'paddr', treats tail pages as unaccessed always. It can also apply DAMOS action to a large folio multiple times within single DAMOS' regions walking. As a result, the monitoring output has poor quality and DAMOS works in unexpected ways when large folios are being used. Fix those. The patches were parts of Usama's hugepage_size DAMOS filter patch series[1]. The first fix has collected from there with a slight commit message change for the subject prefix. The second fix is re-written by SJ and posted as an RFC before this series. The second one also got a slight commit message change for the subject prefix. [1] https://lore.kernel.org/20250203225604.44742-1-usamaarif642@gmail.com [2] https://lore.kernel.org/20250206231103.38298-1-sj@kernel.org This patch (of 2): This effectively adds support for large folios in damon for paddr, as damon_pa_mkold/young won't get a null folio from this function and won't ignore it, hence access will be checked and reported. This also means that larger folios will be considered for different DAMOS actions like pageout, prioritization and migration. As these DAMOS actions will consider larger folios, iterate through the region at folio_size and not PAGE_SIZE intervals. This should not have an affect on vaddr, as damon_young_pmd_entry considers pmd entries. Link: https://lkml.kernel.org/r/20250207212033.45269-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250207212033.45269-2-sj@kernel.org Fixes: a28397beb55b ("mm/damon: implement primitives for physical address space monitoring") Signed-off-by: Usama Arif Signed-off-by: SeongJae Park Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 2 +- mm/damon/paddr.c | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index 86a50e8fbc80..0db1fc70c84d 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -26,7 +26,7 @@ struct folio *damon_get_folio(unsigned long pfn) struct page *page = pfn_to_online_page(pfn); struct folio *folio; - if (!page || PageTail(page)) + if (!page) return NULL; folio = page_folio(page); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 1a89920efce9..2ac19ebc7076 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -277,11 +277,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, damos_add_filter(s, filter); } - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -297,6 +300,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, else list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } if (install_young_filter) @@ -312,11 +316,14 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( { unsigned long addr, applied = 0; - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -329,6 +336,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( folio_deactivate(folio); applied += folio_nr_pages(folio); put_folio: + addr += folio_size(folio); folio_put(folio); } return applied * PAGE_SIZE; @@ -475,11 +483,14 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, unsigned long addr, applied; LIST_HEAD(folio_list); - for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + addr = r->ar.start; + while (addr < r->ar.end) { struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - if (!folio) + if (!folio) { + addr += PAGE_SIZE; continue; + } if (damos_pa_filter_out(s, folio)) goto put_folio; @@ -490,6 +501,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, goto put_folio; list_add(&folio->lru, &folio_list); put_folio: + addr += folio_size(folio); folio_put(folio); } applied = damon_pa_migrate_pages(&folio_list, s->target_nid); From 94ba17adaba0f651fdcf745c8891a88e2e028cfa Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 7 Feb 2025 13:20:33 -0800 Subject: [PATCH 069/431] mm/damon: avoid applying DAMOS action to same entity multiple times 'paddr' DAMON operations set can apply a DAMOS scheme's action to a large folio multiple times in single DAMOS-regions-walk if the folio is laid on multiple DAMON regions. Add a field for DAMOS scheme object that can be used by the underlying ops to know what was the last entity that the scheme's action has applied. The core layer unsets the field when each DAMOS-regions-walk is done for the given scheme. And update 'paddr' ops to use the infrastructure to avoid the problem. Link: https://lkml.kernel.org/r/20250207212033.45269-3-sj@kernel.org Fixes: 57223ac29584 ("mm/damon/paddr: support the pageout scheme") Signed-off-by: SeongJae Park Reported-by: Usama Arif Closes: https://lore.kernel.org/20250203225604.44742-3-usamaarif642@gmail.com Cc: Signed-off-by: Andrew Morton --- include/linux/damon.h | 11 +++++++++++ mm/damon/core.c | 1 + mm/damon/paddr.c | 39 +++++++++++++++++++++++++++------------ 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index c9074d569596..b4d37d9b9221 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -432,6 +432,7 @@ struct damos_access_pattern { * @wmarks: Watermarks for automated (in)activation of this scheme. * @target_nid: Destination node if @action is "migrate_{hot,cold}". * @filters: Additional set of &struct damos_filter for &action. + * @last_applied: Last @action applied ops-managing entity. * @stat: Statistics of this scheme. * @list: List head for siblings. * @@ -454,6 +455,15 @@ struct damos_access_pattern { * implementation could check pages of the region and skip &action to respect * &filters * + * The minimum entity that @action can be applied depends on the underlying + * &struct damon_operations. Since it may not be aligned with the core layer + * abstract, namely &struct damon_region, &struct damon_operations could apply + * @action to same entity multiple times. Large folios that underlying on + * multiple &struct damon region objects could be such examples. The &struct + * damon_operations can use @last_applied to avoid that. DAMOS core logic + * unsets @last_applied when each regions walking for applying the scheme is + * finished. + * * After applying the &action to each region, &stat_count and &stat_sz is * updated to reflect the number of regions and total size of regions that the * &action is applied. @@ -482,6 +492,7 @@ struct damos { int target_nid; }; struct list_head filters; + void *last_applied; struct damos_stat stat; struct list_head list; }; diff --git a/mm/damon/core.c b/mm/damon/core.c index 384935ef4e65..dc8f94fe7c3b 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1856,6 +1856,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) s->next_apply_sis = c->passed_sample_intervals + (s->apply_interval_us ? s->apply_interval_us : c->attrs.aggr_interval) / sample_interval; + s->last_applied = NULL; } } diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 2ac19ebc7076..eb10a723b0a7 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -254,6 +254,17 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) return false; } +static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) +{ + if (!folio) + return true; + if (folio == s->last_applied) { + folio_put(folio); + return true; + } + return false; +} + static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, unsigned long *sz_filter_passed) { @@ -261,6 +272,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, LIST_HEAD(folio_list); bool install_young_filter = true; struct damos_filter *filter; + struct folio *folio; /* check access in page level again by default */ damos_for_each_filter(filter, s) { @@ -279,9 +291,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, addr = r->ar.start; while (addr < r->ar.end) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; continue; } @@ -307,6 +318,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, damos_destroy_filter(filter); applied = reclaim_pages(&folio_list); cond_resched(); + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -315,12 +327,12 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( unsigned long *sz_filter_passed) { unsigned long addr, applied = 0; + struct folio *folio; addr = r->ar.start; while (addr < r->ar.end) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; continue; } @@ -339,6 +351,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( addr += folio_size(folio); folio_put(folio); } + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -482,12 +495,12 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, { unsigned long addr, applied; LIST_HEAD(folio_list); + struct folio *folio; addr = r->ar.start; while (addr < r->ar.end) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; continue; } @@ -506,6 +519,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, } applied = damon_pa_migrate_pages(&folio_list, s->target_nid); cond_resched(); + s->last_applied = folio; return applied * PAGE_SIZE; } @@ -523,15 +537,15 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, { unsigned long addr; LIST_HEAD(folio_list); + struct folio *folio; if (!damon_pa_scheme_has_filter(s)) return 0; addr = r->ar.start; while (addr < r->ar.end) { - struct folio *folio = damon_get_folio(PHYS_PFN(addr)); - - if (!folio) { + folio = damon_get_folio(PHYS_PFN(addr)); + if (damon_pa_invalid_damos_folio(folio, s)) { addr += PAGE_SIZE; continue; } @@ -541,6 +555,7 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, addr += folio_size(folio); folio_put(folio); } + s->last_applied = folio; return 0; } From e92b6e7bb6189d688ab8f9b27e3992cd8568ee4b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 7 Feb 2025 17:24:42 +0000 Subject: [PATCH 070/431] mm: use READ/WRITE_ONCE() for vma->vm_flags on migrate, mprotect According to the syzbot report referenced here, it is possible to encounter a race between mprotect() writing to the vma->vm_flags field and migration checking whether the VMA is locked. There is no real problem with timing here per se, only that torn reads/writes may occur. Therefore, as a proximate fix, ensure both operations READ_ONCE() and WRITE_ONCE() to avoid this. This race is possible due to the ability to look up VMAs via the rmap, which migration does in this case, which takes no mmap or VMA lock and therefore does not preclude an operation to modify a VMA. When the final update of VMA flags is performed by mprotect, this will cause the rmap lock to be taken while the VMA is inserted on split/merge. However the means by which we perform splits/merges in the kernel is that we perform the split/merge operation on the VMA, acquiring/releasing locks as needed, and only then, after having done so, modifying fields. We should carefully examine and determine whether we can combine the two operations so as to avoid such races, and whether it might be possible to otherwise annotate these rmap field accesses. Link: https://lkml.kernel.org/r/20250207172442.78836-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reported-by: syzbot+c2e5712cbb14c95d4847@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/67a34e60.050a0220.50516.0040.GAE@google.com/ Cc: Jann Horn Cc: Liam Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/migrate.c | 2 +- mm/mprotect.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 97f0edf0c032..a991d3691bda 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -328,7 +328,7 @@ static bool remove_migration_pte(struct folio *folio, folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } - if (vma->vm_flags & VM_LOCKED) + if (READ_ONCE(vma->vm_flags) & VM_LOCKED) mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), diff --git a/mm/mprotect.c b/mm/mprotect.c index 9cb6ab7c4048..1444878f7aeb 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -599,7 +599,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, unsigned long start, unsigned long end, unsigned long newflags) { struct mm_struct *mm = vma->vm_mm; - unsigned long oldflags = vma->vm_flags; + unsigned long oldflags = READ_ONCE(vma->vm_flags); long nrpages = (end - start) >> PAGE_SHIFT; unsigned int mm_cp_flags = 0; unsigned long charged = 0; @@ -619,7 +619,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, * uncommon case, so doesn't need to be very optimized. */ if (arch_has_pfn_modify_check() && - (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && + (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) && (newflags & VM_ACCESS_FLAGS) == 0) { pgprot_t new_pgprot = vm_get_page_prot(newflags); @@ -668,7 +668,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, * held in write mode. */ vma_start_write(vma); - vm_flags_reset(vma, newflags); + vm_flags_reset_once(vma, newflags); if (vma_wants_manual_pte_write_upgrade(vma)) mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; vma_set_page_prot(vma); From 8d9a2f5d8abd3ba4355f25e60827c1ee082cd215 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 7 Feb 2025 10:04:53 +0000 Subject: [PATCH 071/431] mm/mm_init.c: use round_up() to align movable range Since MAX_ORDER_NR_PAGES is power of 2, let's use a faster version. Link: https://lkml.kernel.org/r/20250207100453.9989-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Anshuman Khandual Reviewed-by: Mike Rapoport (Microsoft) Signed-off-by: Andrew Morton --- mm/mm_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index 2630cc30147e..de18d3ad12e1 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -438,7 +438,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) * was requested by the user */ required_movablecore = - roundup(required_movablecore, MAX_ORDER_NR_PAGES); + round_up(required_movablecore, MAX_ORDER_NR_PAGES); required_movablecore = min(totalpages, required_movablecore); corepages = totalpages - required_movablecore; @@ -549,7 +549,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) unsigned long start_pfn, end_pfn; zone_movable_pfn[nid] = - roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); + round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (zone_movable_pfn[nid] >= end_pfn) From c32696ca5e8e9fff83c951f3aa45cac2e97b0667 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 10 Feb 2025 10:27:34 -0800 Subject: [PATCH 072/431] mm/damon/core: unset damos->walk_completed after confimed set Patch series "mm/damon/core: fix wrong and/or useless damos_walk() behaviors". damos_walk() can finish working earlier or later than expected, and start earlier than practical. First two behaviors are clearly wrong behavior (doesn't follow the documentation) and all three behaviors are only making the feature useless. Fix those. This patch (of 3): damos->walk_completed is only set, not unset. This can cause next damos_walk() finish earlier than expected. Unset it after all walk_completed is confirmed. Link: https://lkml.kernel.org/r/20250210182737.134994-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250210182737.134994-2-sj@kernel.org Fixes: bf0eaba0ff9c ("mm/damon/core: implement damos_walk()") Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/damon/core.c b/mm/damon/core.c index dc8f94fe7c3b..8e4ae9901b19 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1494,6 +1494,9 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) if (!siter->walk_completed) return; } + damon_for_each_scheme(siter, ctx) + siter->walk_completed = false; + complete(&control->completion); mutex_lock(&ctx->walk_control_lock); ctx->walk_control = NULL; From 40eb655b410d5c842313e556f743888033687865 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 10 Feb 2025 10:27:35 -0800 Subject: [PATCH 073/431] mm/damon/core: do not call damos_walk_control->walk() if walk is completed damos_walk() invokes callback functions of schemes until all schemes finishes at least one round of walks. If there are multiple DAMOS schemes having different apply_interval, the callback functions for longer apply interval scheme will be called for more than a round of the walk. The behavior is different from the document (see damos_walk() kernel-doc comment), and not useful. Make the behavior be same to the documented one, by stopping invoking the callback if the walk for the given scheme is completed. Link: https://lkml.kernel.org/r/20250210182737.134994-3-sj@kernel.org Fixes: bf0eaba0ff9c ("mm/damon/core: implement damos_walk()") Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/damon/core.c b/mm/damon/core.c index 8e4ae9901b19..e129fb785970 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1458,6 +1458,9 @@ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, { struct damos_walk_control *control; + if (s->walk_completed) + return; + mutex_lock(&ctx->walk_control_lock); control = ctx->walk_control; mutex_unlock(&ctx->walk_control_lock); From 6fa70372c86162608c522eeaa58201d6c11ab773 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 10 Feb 2025 10:27:36 -0800 Subject: [PATCH 074/431] mm/damon/core: do damos walking in entire regions granularity damos_walk_control can be installed while DAMOS is walking the regions. This means the walk callback function invocations can be started from a region at the middle of the regions list. This makes it hard to be used reliably. Particularly, DAMOS tried regions update for collecting monitoring results gets problematic results. Increase the walk_control_lock critical section to do walking in entire regions granularity. Link: https://lkml.kernel.org/r/20250210182737.134994-4-sj@kernel.org Fixes: bf0eaba0ff9c ("mm/damon/core: implement damos_walk()") Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index e129fb785970..f663c8e99dfa 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1461,11 +1461,10 @@ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, if (s->walk_completed) return; - mutex_lock(&ctx->walk_control_lock); control = ctx->walk_control; - mutex_unlock(&ctx->walk_control_lock); if (!control) return; + control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); } @@ -1485,9 +1484,7 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) struct damos *siter; struct damos_walk_control *control; - mutex_lock(&ctx->walk_control_lock); control = ctx->walk_control; - mutex_unlock(&ctx->walk_control_lock); if (!control) return; @@ -1501,9 +1498,7 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) siter->walk_completed = false; complete(&control->completion); - mutex_lock(&ctx->walk_control_lock); ctx->walk_control = NULL; - mutex_unlock(&ctx->walk_control_lock); } /* @@ -1850,6 +1845,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) if (!has_schemes_to_apply) return; + mutex_lock(&c->walk_control_lock); damon_for_each_target(t, c) { damon_for_each_region_safe(r, next_r, t) damon_do_apply_schemes(c, t, r); @@ -1864,6 +1860,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) c->attrs.aggr_interval) / sample_interval; s->last_applied = NULL; } + mutex_unlock(&c->walk_control_lock); } /* From 6e80c0aaad469e0a923ea0d7018fb1464e992018 Mon Sep 17 00:00:00 2001 From: Bertrand Wlodarczyk Date: Mon, 10 Feb 2025 17:07:49 +0100 Subject: [PATCH 075/431] vmscan, cleanup: add for_each_managed_zone_pgdat macro The macro is introduced to eliminate redundancy in the repeated iteration over managed zones in pgdat data structure, reducing the potential for errors. This change doesn't introduce any functional modifications. Due to concentration of the pattern in vmscan.c the macro is placed locally in that file. Link: https://lkml.kernel.org/r/20250210160818.686-1-bertrand.wlodarczyk@intel.com Signed-off-by: Bertrand Wlodarczyk Reviewed-by: Tim Chen Cc: Andy Whitcroft Cc: Dave Hansen Cc: Dwaipayan Ray Cc: Joe Perches Cc: Lukas Bulwahn Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/vmscan.c | 83 +++++++++++++++++++++-------------------------------- 1 file changed, 32 insertions(+), 51 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index bc1826020159..fcca38bc640f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -271,6 +271,25 @@ static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) } #endif +/* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to + * and including the specified highidx + * @zone: The current zone in the iterator + * @pgdat: The pgdat which node_zones are being iterated + * @idx: The index variable + * @highidx: The index of the highest zone to return + * + * This macro iterates through all managed zones up to and including the specified highidx. + * The zone iterator enters an invalid state after macro call and must be reinitialized + * before it can be used again. + */ +#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \ + for ((idx) = 0, (zone) = (pgdat)->node_zones; \ + (idx) <= (highidx); \ + (idx)++, (zone)++) \ + if (!managed_zone(zone)) \ + continue; \ + else + static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) { @@ -396,13 +415,9 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, { unsigned long size = 0; int zid; + struct zone *zone; - for (zid = 0; zid <= zone_idx; zid++) { - struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) { if (!mem_cgroup_disabled()) size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); else @@ -495,7 +510,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat) { int reclaimable = 0, write_pending = 0; int i; - + struct zone *zone; /* * If kswapd is disabled, reschedule if necessary but do not * throttle as the system is likely near OOM. @@ -508,12 +523,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat) * throttle as throttling will occur when the folios cycle * towards the end of the LRU if still under writeback. */ - for (i = 0; i < MAX_NR_ZONES; i++) { - struct zone *zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) { reclaimable += zone_reclaimable_pages(zone); write_pending += zone_page_state_snapshot(zone, NR_ZONE_WRITE_PENDING); @@ -2372,17 +2382,13 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) unsigned long total_high_wmark = 0; unsigned long free, anon; int z; + struct zone *zone; free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); file = node_page_state(pgdat, NR_ACTIVE_FILE) + node_page_state(pgdat, NR_INACTIVE_FILE); - for (z = 0; z < MAX_NR_ZONES; z++) { - struct zone *zone = &pgdat->node_zones[z]; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) { total_high_wmark += high_wmark_pages(zone); } @@ -5851,6 +5857,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, unsigned long pages_for_compaction; unsigned long inactive_lru_pages; int z; + struct zone *zone; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) @@ -5870,11 +5877,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return false; /* If compaction would go ahead or the allocation would succeed, stop */ - for (z = 0; z <= sc->reclaim_idx; z++) { - struct zone *zone = &pgdat->node_zones[z]; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6401,11 +6404,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) return true; - for (i = 0; i <= ZONE_NORMAL; i++) { - zone = &pgdat->node_zones[i]; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) { if (!zone_reclaimable_pages(zone)) continue; @@ -6710,12 +6709,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) * Check watermarks bottom-up as lower zones are more likely to * meet watermarks. */ - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) mark = promo_wmark_pages(zone); else @@ -6800,11 +6794,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, /* Reclaim a number of pages proportional to the number of zones */ sc->nr_to_reclaim = 0; - for (z = 0; z <= sc->reclaim_idx; z++) { - zone = pgdat->node_zones + z; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); } @@ -6835,12 +6825,7 @@ update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) int i; struct zone *zone; - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { if (active) set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); else @@ -6901,11 +6886,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) * stall or direct reclaim until kswapd is finished. */ nr_boost_reclaim = 0; - for (i = 0; i <= highest_zoneidx; i++) { - zone = pgdat->node_zones + i; - if (!managed_zone(zone)) - continue; - + for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { nr_boost_reclaim += zone->watermark_boost; zone_boosts[i] = zone->watermark_boost; } From 67254c7d70b63797a54173fbe05cd6552aca11d4 Mon Sep 17 00:00:00 2001 From: I Hsin Cheng Date: Mon, 10 Feb 2025 02:10:23 +0800 Subject: [PATCH 076/431] maple_tree: correct comment for mas_start() There's no mas->status of "mas_start", what the function is checking is whether mas->status equals to "ma_start". Correct the comment for the function. Link: https://lkml.kernel.org/r/20250209181023.228856-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f7153ade1be5..42c65974a56c 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1353,7 +1353,7 @@ static void mas_node_count(struct ma_state *mas, int count) * mas_start() - Sets up maple state for operations. * @mas: The maple state. * - * If mas->status == mas_start, then set the min, max and depth to + * If mas->status == ma_start, then set the min, max and depth to * defaults. * * Return: From c2661f5fe888c14eb1f09ed23e74213366fa70f3 Mon Sep 17 00:00:00 2001 From: David Laight Date: Sun, 9 Feb 2025 17:47:11 +0000 Subject: [PATCH 077/431] mm: remove the access_ok() call from gup_fast_fallback() Historiaclly the code relied on access_ok() to validate the address range. Commit 26f4c328079d7 added an explicit wrap check before access_ok(). Commit c28b1fc70390d then changed the wrap test to use check_add_overflow(). Commit 6014bc27561f2 relaxed the checks in x86-64's access_ok() and added an explicit check for TASK_SIZE here to make up for it. That left a pointless access_ok() call with its associated 'lfence' that can never actually fail. So just delete the test. Link: https://lkml.kernel.org/r/20250209174711.60889-1-david.laight.linux@gmail.com Signed-off-by: David Laight Reviewed-by: Jason Gunthorpe Acked-by: David Hildenbrand Cc: Thomas Gleixner Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jan Kara Cc: John Hubbard Cc: Linus Torvalds Cc: Peter Xu Cc: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton --- mm/gup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 61e751baf862..e42e4fdaf765 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2760,7 +2760,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * * *) ptes can be read atomically by the architecture. * - * *) access_ok is sufficient to validate userspace address ranges. + * *) valid user addesses are below TASK_MAX_SIZE * * The last two assumptions can be relaxed by the addition of helper functions. * @@ -3414,8 +3414,6 @@ static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; - if (unlikely(!access_ok((void __user *)start, len))) - return -EFAULT; nr_pinned = gup_fast(start, end, gup_flags, pages); if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) From 3fae696393c7f07aac5db249fc2c537e8744c831 Mon Sep 17 00:00:00 2001 From: Eric Salem Date: Sat, 8 Feb 2025 20:36:36 -0600 Subject: [PATCH 078/431] selftests: mm: fix typo Fix misspelling. Link: https://lkml.kernel.org/r/77e0e915-36c3-4c95-84b8-0b73aaa17951@gmail.com Signed-off-by: Eric Salem Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 7ad6ba660c7d..31e0c8a3110d 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -323,7 +323,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) ret = userfaultfd_open(&features); if (ret) { if (errmsg) - *errmsg = "possible lack of priviledge"; + *errmsg = "possible lack of privilege"; return ret; } From cedae19487a368d899301c16ab576b0aedb9396d Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Sat, 8 Feb 2025 15:52:54 +0000 Subject: [PATCH 079/431] mm: refactor rmap_walk_file() to separate out traversal logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "expose mapping wrprotect, fix fb_defio use", v3. Right now the only means by which we can write-protect a range using the reverse mapping is via folio_mkclean(). However this is not always the appropriate means of doing so, specifically in the case of the framebuffer deferred I/O logic (fb_defio enabled by CONFIG_FB_DEFERRED_IO). There, kernel pages are mapped read-only and write-protect faults used to batch up I/O operations. Each time the deferred work is done, folio_mkclean() is used to mark the framebuffer page as having had I/O performed on it. However doing so requires the kernel page (perhaps allocated via vmalloc()) to have its page->mapping, index fields set so the rmap can find everything that maps it in order to write-protect. This is problematic as firstly, these fields should not be set for kernel-allocated memory, and secondly these are not folios (it's not user memory) and page->index, mapping fields are now deprecated and soon to be removed. The removal of these fields is imminent, rendering this series more urgent than it might first appear. The implementers cannot be blamed for having used this however, as there is simply no other way of performing this operation correctly. This series fixes this - we provide the mapping_wrprotect_range() function to allow the reverse mapping to be used to look up mappings from the page cache object (i.e. its address_space pointer) at a specific offset. The fb_defio logic already stores this offset, and can simply be expanded to keep track of the page cache object, so the change then becomes straight-forward. This series should have no functional change. This patch (of 3): In order to permit the traversal of the reverse mapping at a specified mapping and offset rather than those specified by an input folio, we need to separate out the portion of the rmap file logic which deals with this traversal from those parts of the logic which interact with the folio. This patch achieves this by adding a new static __rmap_walk_file() function which rmap_walk_file() invokes. This function permits the ability to pass NULL folio, on the assumption that the caller has provided for this correctly in the callbacks specified in the rmap_walk_control object. Though it provides for this, and adds debug asserts to ensure that, should a folio be specified, these are equal to the mapping and offset specified in the folio, there should be no functional change as a result of this patch. The reason for adding this is to enable for future changes to permit users to be able to traverse mappings of userland-mapped kernel memory, write-protecting those mappings to enable page_mkwrite() or pfn_mkwrite() fault handlers to be retriggered on subsequent dirty. Link: https://lkml.kernel.org/r/cover.1739029358.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/0d1acec0cba1e5a12f9b53efcabc397541c90517.1739029358.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Helge Deller Cc: Jaya Kumar Cc: Kajtar Zsolt Cc: Maíra Canal Cc: Matthew Wilcox [English fixes] Cc: Simona Vetter Cc: Thomas Zimemrmann Signed-off-by: Andrew Morton --- mm/rmap.c | 79 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 0f760b93fc0a..3da1ca49881c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2614,35 +2614,37 @@ static void rmap_walk_anon(struct folio *folio, anon_vma_unlock_read(anon_vma); } -/* - * rmap_walk_file - do something to file page using the object-based rmap method - * @folio: the folio to be handled - * @rwc: control variable according to each walk type - * @locked: caller holds relevant rmap lock +/** + * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping + * of a page mapped within a specified page cache object at a specified offset. * - * Find all the mappings of a folio using the mapping pointer and the vma chains - * contained in the address_space struct it points to. + * @folio: Either the folio whose mappings to traverse, or if NULL, + * the callbacks specified in @rwc will be configured such + * as to be able to look up mappings correctly. + * @mapping: The page cache object whose mapping VMAs we intend to + * traverse. If @folio is non-NULL, this should be equal to + * folio_mapping(folio). + * @pgoff_start: The offset within @mapping of the page which we are + * looking up. If @folio is non-NULL, this should be equal + * to folio_pgoff(folio). + * @nr_pages: The number of pages mapped by the mapping. If @folio is + * non-NULL, this should be equal to folio_nr_pages(folio). + * @rwc: The reverse mapping walk control object describing how + * the traversal should proceed. + * @locked: Is the @mapping already locked? If not, we acquire the + * lock. */ -static void rmap_walk_file(struct folio *folio, - struct rmap_walk_control *rwc, bool locked) +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked) { - struct address_space *mapping = folio_mapping(folio); - pgoff_t pgoff_start, pgoff_end; + pgoff_t pgoff_end = pgoff_start + nr_pages - 1; struct vm_area_struct *vma; - /* - * The page lock not only makes sure that page->mapping cannot - * suddenly be NULLified by truncation, it makes sure that the - * structure at mapping cannot be freed and reused yet, - * so we can safely take mapping->i_mmap_rwsem. - */ - VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); + VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); + VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); - if (!mapping) - return; - - pgoff_start = folio_pgoff(folio); - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; @@ -2657,8 +2659,7 @@ static void rmap_walk_file(struct folio *folio, lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_address(vma, pgoff_start, - folio_nr_pages(folio)); + unsigned long address = vma_address(vma, pgoff_start, nr_pages); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2671,12 +2672,38 @@ static void rmap_walk_file(struct folio *folio, if (rwc->done && rwc->done(folio)) goto done; } - done: if (!locked) i_mmap_unlock_read(mapping); } +/* + * rmap_walk_file - do something to file page using the object-based rmap method + * @folio: the folio to be handled + * @rwc: control variable according to each walk type + * @locked: caller holds relevant rmap lock + * + * Find all the mappings of a folio using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + */ +static void rmap_walk_file(struct folio *folio, + struct rmap_walk_control *rwc, bool locked) +{ + /* + * The folio lock not only makes sure that folio->mapping cannot + * suddenly be NULLified by truncation, it makes sure that the structure + * at mapping cannot be freed and reused yet, so we can safely take + * mapping->i_mmap_rwsem. + */ + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (!folio->mapping) + return; + + __rmap_walk_file(folio, folio->mapping, folio->index, + folio_nr_pages(folio), rwc, locked); +} + void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) From a4811f53bb89b3524629b1be41add9349fe0a750 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Sat, 8 Feb 2025 15:52:55 +0000 Subject: [PATCH 080/431] mm: provide mapping_wrprotect_range() function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the fb_defio video driver, page dirty state is used to determine when frame buffer pages have been changed, allowing for batched, deferred I/O to be performed for efficiency. This implementation had only one means of doing so effectively - the use of the folio_mkclean() function. However, this use of the function is inappropriate, as the fb_defio implementation allocates kernel memory to back the framebuffer, and then is forced to specified page->index, mapping fields in order to permit the folio_mkclean() rmap traversal to proceed correctly. It is not correct to specify these fields on kernel-allocated memory, and moreover since these are not folios, page->index, mapping are deprecated fields, soon to be removed. We therefore need to provide a means by which we can correctly traverse the reverse mapping and write-protect mappings for a page backing an address_space page cache object at a given offset. This patch provides this - mapping_wrprotect_range() - which allows for this operation to be performed for a specified address_space, offset, PFN and size, without requiring a folio nor, of course, an inappropriate use of page->index, mapping. With this provided, we can subsequently adjust the fb_defio implementation to make use of this function and avoid incorrect invocation of folio_mkclean() and more importantly, incorrect manipulation of page->index and mapping fields. Link: https://lkml.kernel.org/r/e5bf969d64e7f2f2ae944d42341fc8994b736a81.1739029358.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Helge Deller Cc: Jaya Kumar Cc: Kajtar Zsolt Cc: Maíra Canal Cc: Matthew Wilcox Cc: Simona Vetter Cc: Thomas Zimemrmann Signed-off-by: Andrew Morton --- include/linux/rmap.h | 3 ++ mm/rmap.c | 74 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 86425d42c1a9..69e9a431a40e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -738,6 +738,9 @@ unsigned long page_address_in_vma(const struct folio *folio, */ int folio_mkclean(struct folio *); +int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, + unsigned long pfn, unsigned long nr_pages); + int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma); diff --git a/mm/rmap.c b/mm/rmap.c index 3da1ca49881c..24bacce9971f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1135,6 +1135,80 @@ int folio_mkclean(struct folio *folio) } EXPORT_SYMBOL_GPL(folio_mkclean); +struct wrprotect_file_state { + int cleaned; + pgoff_t pgoff; + unsigned long pfn; + unsigned long nr_pages; +}; + +static bool mapping_wrprotect_range_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long address, void *arg) +{ + struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; + struct page_vma_mapped_walk pvmw = { + .pfn = state->pfn, + .nr_pages = state->nr_pages, + .pgoff = state->pgoff, + .vma = vma, + .address = address, + .flags = PVMW_SYNC, + }; + + state->cleaned += page_vma_mkclean_one(&pvmw); + + return true; +} + +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked); + +/** + * mapping_wrprotect_range() - Write-protect all mappings in a specified range. + * + * @mapping: The mapping whose reverse mapping should be traversed. + * @pgoff: The page offset at which @pfn is mapped within @mapping. + * @pfn: The PFN of the page mapped in @mapping at @pgoff. + * @nr_pages: The number of physically contiguous base pages spanned. + * + * Traverses the reverse mapping, finding all VMAs which contain a shared + * mapping of the pages in the specified range in @mapping, and write-protects + * them (that is, updates the page tables to mark the mappings read-only such + * that a write protection fault arises when the mappings are written to). + * + * The @pfn value need not refer to a folio, but rather can reference a kernel + * allocation which is mapped into userland. We therefore do not require that + * the page maps to a folio with a valid mapping or index field, rather the + * caller specifies these in @mapping and @pgoff. + * + * Return: the number of write-protected PTEs, or an error. + */ +int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, + unsigned long pfn, unsigned long nr_pages) +{ + struct wrprotect_file_state state = { + .cleaned = 0, + .pgoff = pgoff, + .pfn = pfn, + .nr_pages = nr_pages, + }; + struct rmap_walk_control rwc = { + .arg = (void *)&state, + .rmap_one = mapping_wrprotect_range_one, + .invalid_vma = invalid_mkclean_vma, + }; + + if (!mapping) + return 0; + + __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, + /* locked = */false); + + return state.cleaned; +} +EXPORT_SYMBOL_GPL(mapping_wrprotect_range); + /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) From 6cdef2ddce2b7de8faca69061a6780080cfecc10 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Sat, 8 Feb 2025 15:52:56 +0000 Subject: [PATCH 081/431] fb_defio: do not use deprecated page->mapping, index fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the introduction of mapping_wrprotect_range() there is no need to use folio_mkclean() in order to write-protect mappings of frame buffer pages, and therefore no need to inappropriately set kernel-allocated page->index, mapping fields to permit this operation. Instead, store the pointer to the page cache object for the mapped driver in the fb_deferred_io object, and use the already stored page offset from the pageref object to look up mappings in order to write-protect them. This is justified, as for the page objects to store a mapping pointer at the point of assignment of pages, they must all reference the same underlying address_space object. Since the life time of the pagerefs is also the lifetime of the fb_deferred_io object, storing the pointer here makes sense. This eliminates the need for all of the logic around setting and maintaining page->index,mapping which we remove. This eliminates the use of folio_mkclean() entirely but otherwise should have no functional change. [lorenzo.stoakes@oracle.com: fixup unused variable warnings] Link: https://lkml.kernel.org/r/d4018405-2762-4385-a816-e54cc23839ac@lucifer.local Link: https://lkml.kernel.org/r/81171ab16c14e3df28f6de9d14982cee528d8519.1739029358.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Tested-by: Kajtar Zsolt Acked-by: Thomas Zimmermann Cc: David Hildenbrand Cc: Helge Deller Cc: Jaya Kumar Cc: Maíra Canal Cc: Matthew Wilcox Cc: Simona Vetter Signed-off-by: Andrew Morton --- drivers/video/fbdev/core/fb_defio.c | 43 ++++++++++------------------- include/linux/fb.h | 1 + 2 files changed, 16 insertions(+), 28 deletions(-) diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 65363df8e81b..4fc93f253e06 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -69,14 +69,6 @@ static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_in return pageref; } -static void fb_deferred_io_pageref_clear(struct fb_deferred_io_pageref *pageref) -{ - struct page *page = pageref->page; - - if (page) - page->mapping = NULL; -} - static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info, unsigned long offset, struct page *page) @@ -140,13 +132,10 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) if (!page) return VM_FAULT_SIGBUS; - if (vmf->vma->vm_file) - page->mapping = vmf->vma->vm_file->f_mapping; - else - printk(KERN_ERR "no mapping available\n"); + if (!vmf->vma->vm_file) + fb_err(info, "no mapping available\n"); - BUG_ON(!page->mapping); - page->index = vmf->pgoff; /* for folio_mkclean() */ + BUG_ON(!info->fbdefio->mapping); vmf->page = page; return 0; @@ -194,9 +183,9 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long /* * We want the page to remain locked from ->page_mkwrite until - * the PTE is marked dirty to avoid folio_mkclean() being called - * before the PTE is updated, which would leave the page ignored - * by defio. + * the PTE is marked dirty to avoid mapping_wrprotect_range() + * being called before the PTE is updated, which would leave + * the page ignored by defio. * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ @@ -274,15 +263,17 @@ static void fb_deferred_io_work(struct work_struct *work) struct fb_deferred_io_pageref *pageref, *next; struct fb_deferred_io *fbdefio = info->fbdefio; - /* here we mkclean the pages, then do all deferred IO */ + /* here we wrprotect the page's mappings, then do all deferred IO. */ mutex_lock(&fbdefio->lock); +#ifdef CONFIG_MMU list_for_each_entry(pageref, &fbdefio->pagereflist, list) { - struct folio *folio = page_folio(pageref->page); + struct page *page = pageref->page; + pgoff_t pgoff = pageref->offset >> PAGE_SHIFT; - folio_lock(folio); - folio_mkclean(folio); - folio_unlock(folio); + mapping_wrprotect_range(fbdefio->mapping, pgoff, + page_to_pfn(page), 1); } +#endif /* driver's callback with pagereflist */ fbdefio->deferred_io(info, &fbdefio->pagereflist); @@ -337,6 +328,7 @@ void fb_deferred_io_open(struct fb_info *info, { struct fb_deferred_io *fbdefio = info->fbdefio; + fbdefio->mapping = file->f_mapping; file->f_mapping->a_ops = &fb_deferred_io_aops; fbdefio->open_count++; } @@ -344,13 +336,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); static void fb_deferred_io_lastclose(struct fb_info *info) { - unsigned long i; - flush_delayed_work(&info->deferred_work); - - /* clear out the mapping that we setup */ - for (i = 0; i < info->npagerefs; ++i) - fb_deferred_io_pageref_clear(&info->pagerefs[i]); } void fb_deferred_io_release(struct fb_info *info) @@ -370,5 +356,6 @@ void fb_deferred_io_cleanup(struct fb_info *info) kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); + fbdefio->mapping = NULL; } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/include/linux/fb.h b/include/linux/fb.h index 5ba187e08cf7..cd653862ab99 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -225,6 +225,7 @@ struct fb_deferred_io { int open_count; /* number of opened files; protected by fb_info lock */ struct mutex lock; /* mutex that protects the pageref list */ struct list_head pagereflist; /* list of pagerefs for touched pages */ + struct address_space *mapping; /* page cache object for fb device */ /* callback */ struct page *(*get_page)(struct fb_info *info, unsigned long offset); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); From 6340584e489f1f8ddb65e44a7ad2ab9f3503c4d3 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 11 Feb 2025 12:59:11 -0800 Subject: [PATCH 082/431] mm/vmstat: revert "fix a W=1 clang compiler warning" Commit 75b5ab134bb5 enabled -Wenum-enum-conversion in W=1 builds. Commit 30c2de0a267c ("mm/vmstat: fix a W=1 clang compiler warning") fixed a -Wenum-enum-conversion warning. Commit 8f6629c004b1 removed the -Wenum-enum-conversion option again from W=1 builds. Since the W=1 compiler warning fix is no longer necessary, revert it. Link: https://lkml.kernel.org/r/20250211205911.1707684-1-bvanassche@acm.org Signed-off-by: Bart Van Assche Acked-by: Vlastimil Babka Cc: Matthew Wilcox Cc: Ivan Shapovalov Cc: David Laight Cc: Nathan Chancellor Signed-off-by: Andrew Morton --- include/linux/vmstat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 9f3a04345b86..d2761bf8ff32 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) From 026e8b55aa05b728e5f5f7858cd91385bf0642e4 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Feb 2025 13:00:23 +0000 Subject: [PATCH 083/431] mm/mmu_gather: update comment on RCU freeing Some recent discussion on LMKL [0] brought up some interesting and useful additional context on RCU-freeing for pagetables. Note down some extra info in here, in particular a) be concrete about the reason why an arch might not have an IPI and b) add the interesting paravirt details. [0] https://lore.kernel.org/linux-kernel/20250206044346.3810242-2-riel@surriel.com/ Link: https://lkml.kernel.org/r/20250211-mmugather-comment-v1-1-1ac1e0c765d2@google.com Signed-off-by: Brendan Jackman Cc: "Aneesh Kumar K.V" Cc: Brendan Jackman Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Rik van Riel Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/mmu_gather.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7aa6f18c500b..db7ba4a725d6 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -246,8 +246,16 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch) * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * - * Architectures that do not have this (PPC) need to delay the freeing by some - * other means, this is that means. + * Not all systems IPI every CPU for this purpose: + * + * - Some architectures have HW support for cross-CPU synchronisation of TLB + * flushes, so there's no IPI at all. + * + * - Paravirt guests can do this TLB flushing in the hypervisor, or coordinate + * with the hypervisor to defer flushing on preempted vCPUs. + * + * Such systems need to delay the freeing by some other means, this is that + * means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling From 0431c42622612a96cce97cdd4cfbe7d487606cf3 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Tue, 11 Feb 2025 12:43:40 +0000 Subject: [PATCH 084/431] mm/damon: introduce DAMOS filter type hugepage_size Patch series "mm/damon: add support for hugepage_size DAMOS filter", v5. hugepage_size DAMOS filter can be used to gather statistics to check if memory regions of specific access tempratures are backed by hugepages of a size in a specific range. This filter can help to observe and prove the effectivenes of different schemes for shrinking/collapsing hugepages. This patch (of 4): This is to gather statistics to check if memory regions of specific access tempratures are backed by pages of a size in a specific range. This filter can help to observe and prove the effectivenes of different schemes for shrinking/collapsing hugepages. [sj@kernel.org: add kernel-doc comment for damos_filter->sz_range] Link: https://lkml.kernel.org/r/20250218223058.52459-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250211124437.278873-1-usamaarif642@gmail.com Link: https://lkml.kernel.org/r/20250211124437.278873-2-usamaarif642@gmail.com Signed-off-by: Usama Arif Reviewed-by: SeongJae Park Cc: David Hildenbrand Cc: Johannes Weiner Cc: Usama Arif Signed-off-by: Andrew Morton --- include/linux/damon.h | 14 ++++++++++++++ mm/damon/core.c | 3 +++ mm/damon/paddr.c | 6 ++++++ mm/damon/sysfs-schemes.c | 1 + 4 files changed, 24 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index b4d37d9b9221..5e7ae7bca5dc 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -35,6 +35,16 @@ struct damon_addr_range { unsigned long end; }; +/** + * struct damon_size_range - Represents size for filter to operate on [@min, @max]. + * @min: Min size (inclusive). + * @max: Max size (inclusive). + */ +struct damon_size_range { + unsigned long min; + unsigned long max; +}; + /** * struct damon_region - Represents a monitoring target region. * @ar: The address range of the region. @@ -326,6 +336,7 @@ struct damos_stat { * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages. + * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage. * @DAMOS_FILTER_TYPE_ADDR: Address range. * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. * @NR_DAMOS_FILTER_TYPES: Number of filter types. @@ -345,6 +356,7 @@ enum damos_filter_type { DAMOS_FILTER_TYPE_ANON, DAMOS_FILTER_TYPE_MEMCG, DAMOS_FILTER_TYPE_YOUNG, + DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, DAMOS_FILTER_TYPE_ADDR, DAMOS_FILTER_TYPE_TARGET, NR_DAMOS_FILTER_TYPES, @@ -360,6 +372,7 @@ enum damos_filter_type { * @target_idx: Index of the &struct damon_target of * &damon_ctx->adaptive_targets if @type is * DAMOS_FILTER_TYPE_TARGET. + * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE. * @list: List head for siblings. * * Before applying the &damos->action to a memory region, DAMOS checks if each @@ -376,6 +389,7 @@ struct damos_filter { unsigned short memcg_id; struct damon_addr_range addr_range; int target_idx; + struct damon_size_range sz_range; }; struct list_head list; }; diff --git a/mm/damon/core.c b/mm/damon/core.c index f663c8e99dfa..b1ce072b56f2 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -777,6 +777,9 @@ static void damos_commit_filter_arg( case DAMOS_FILTER_TYPE_TARGET: dst->target_idx = src->target_idx; break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + dst->sz_range = src->sz_range; + break; default: break; } diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index eb10a723b0a7..1a5974640b93 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -211,6 +211,7 @@ static bool damos_pa_filter_match(struct damos_filter *filter, { bool matched = false; struct mem_cgroup *memcg; + size_t folio_sz; switch (filter->type) { case DAMOS_FILTER_TYPE_ANON: @@ -230,6 +231,11 @@ static bool damos_pa_filter_match(struct damos_filter *filter, if (matched) damon_folio_mkold(folio); break; + case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: + folio_sz = folio_size(folio); + matched = filter->sz_range.min <= folio_sz && + folio_sz <= filter->sz_range.max; + break; default: break; } diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 98f93ae9f59e..9020bc9befac 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -329,6 +329,7 @@ static const char * const damon_sysfs_scheme_filter_type_strs[] = { "anon", "memcg", "young", + "hugepage_size", "addr", "target", }; From ea1f204ba29a65ec70464fdd4de727e76c6e4d9c Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Tue, 11 Feb 2025 12:43:41 +0000 Subject: [PATCH 085/431] mm/damon/sysfs-schemes: add files for setting damos_filter->sz_range Add min and max files for damon filters to let the userspace decide the min/max folio size to operate on. This will be needed to decide what folio sizes to give pa_stat for. Link: https://lkml.kernel.org/r/20250211124437.278873-3-usamaarif642@gmail.com Signed-off-by: Usama Arif Reviewed-by: SeongJae Park Cc: David Hildenbrand Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 54 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 9020bc9befac..881d00bb3a34 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -316,6 +316,7 @@ struct damon_sysfs_scheme_filter { bool allow; char *memcg_path; struct damon_addr_range addr_range; + struct damon_size_range sz_range; int target_idx; }; @@ -474,6 +475,44 @@ static ssize_t addr_end_store(struct kobject *kobj, return err ? err : count; } +static ssize_t min_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.min); +} + +static ssize_t min_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.min); + + return err ? err : count; +} + +static ssize_t max_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%lu\n", filter->sz_range.max); +} + +static ssize_t max_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + int err = kstrtoul(buf, 0, &filter->sz_range.max); + + return err ? err : count; +} + static ssize_t damon_target_idx_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -520,6 +559,12 @@ static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr = static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr = __ATTR_RW_MODE(addr_end, 0600); +static struct kobj_attribute damon_sysfs_scheme_filter_min_attr = + __ATTR_RW_MODE(min, 0600); + +static struct kobj_attribute damon_sysfs_scheme_filter_max_attr = + __ATTR_RW_MODE(max, 0600); + static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = __ATTR_RW_MODE(damon_target_idx, 0600); @@ -530,6 +575,8 @@ static struct attribute *damon_sysfs_scheme_filter_attrs[] = { &damon_sysfs_scheme_filter_memcg_path_attr.attr, &damon_sysfs_scheme_filter_addr_start_attr.attr, &damon_sysfs_scheme_filter_addr_end_attr.attr, + &damon_sysfs_scheme_filter_min_attr.attr, + &damon_sysfs_scheme_filter_max_attr.attr, &damon_sysfs_scheme_filter_damon_target_idx_attr.attr, NULL, }; @@ -1954,6 +2001,13 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme, filter->addr_range = sysfs_filter->addr_range; } else if (filter->type == DAMOS_FILTER_TYPE_TARGET) { filter->target_idx = sysfs_filter->target_idx; + } else if (filter->type == DAMOS_FILTER_TYPE_HUGEPAGE_SIZE) { + if (sysfs_filter->sz_range.min > + sysfs_filter->sz_range.max) { + damos_destroy_filter(filter); + return -EINVAL; + } + filter->sz_range = sysfs_filter->sz_range; } damos_add_filter(scheme, filter); From 807db03c59097db7f94b642cf4394946989d6417 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Tue, 11 Feb 2025 12:43:42 +0000 Subject: [PATCH 086/431] Docs/ABI/damon: document DAMOS sysfs files to set the min/max folio_size This will be used to decide the min and max folio size to operate on for pa_stat. Link: https://lkml.kernel.org/r/20250211124437.278873-4-usamaarif642@gmail.com Signed-off-by: Usama Arif Reviewed-by: SeongJae Park Cc: David Hildenbrand Cc: Johannes Weiner Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-damon | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index b057eddefbfc..ccd13ca668c8 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -345,6 +345,20 @@ Description: If 'addr' is written to the 'type' file, writing to or reading from this file sets or gets the end address of the address range for the filter. +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters//min +Date: Feb 2025 +Contact: SeongJae Park +Description: If 'hugepage_size' is written to the 'type' file, writing to + or reading from this file sets or gets the minimum size of the + hugepage for the filter. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters//max +Date: Feb 2025 +Contact: SeongJae Park +Description: If 'hugepage_size' is written to the 'type' file, writing to + or reading from this file sets or gets the maximum size of the + hugepage for the filter. + What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters//target_idx Date: Dec 2022 Contact: SeongJae Park From 4ddb20926842ff1e892bb8dce64ab93058102601 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Tue, 11 Feb 2025 12:43:43 +0000 Subject: [PATCH 087/431] Docs/admin-guide/mm/damon/usage: document hugepage_size filter type This includes both the 'hugepage_size' filter type and the min/max files used to decide range of sizes to filter on. Link: https://lkml.kernel.org/r/20250211124437.278873-5-usamaarif642@gmail.com Signed-off-by: Usama Arif Reviewed-by: SeongJae Park Cc: David Hildenbrand Cc: Johannes Weiner Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 47a44bd348ab..51af66c208c5 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -83,7 +83,7 @@ comma (","). │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value │ │ │ │ │ │ │ :ref:`watermarks `/metric,interval_us,high,mid,low │ │ │ │ │ │ │ :ref:`filters `/nr_filters - │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx + │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed @@ -406,13 +406,14 @@ number (``N``) to the file creates the number of child directories named ``0`` to ``N-1``. Each directory represents each filter. The filters are evaluated in the numeric order. -Each filter directory contains seven files, namely ``type``, ``matching``, -``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. -To ``type`` file, you can write one of five special keywords: ``anon`` for -anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young -pages, ``addr`` for specific address range (an open-ended interval), or -``target`` for specific DAMON monitoring target filtering. Meaning of the -types are same to the description on the :ref:`design doc +Each filter directory contains nine files, namely ``type``, ``matching``, +``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, ``min``, ``max`` +and ``target_idx``. To ``type`` file, you can write one of six special +keywords: ``anon`` for anonymous pages, ``memcg`` for specific memory cgroup, +``young`` for young pages, ``addr`` for specific address range (an open-ended +interval), ``hugepage_size`` for large folios of a specific size range [``min``, +``max``] or ``target`` for specific DAMON monitoring target filtering. Meaning +of the types are same to the description on the :ref:`design doc `. In case of the memory cgroup filtering, you can specify the memory cgroup of From 4bc2e699e3d8f56f2aaafd14109ff77311f95336 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 11 Feb 2025 08:29:00 +0000 Subject: [PATCH 088/431] mm/mm_init.c: only align start of ZONE_MOVABLE on nodes with memory At the beginning of find_zone_movable_pfns_for_nodes(), it has properly set node_states[N_MEMORY] in early_calculate_totalpages(). Instead of iterating over all possible nodes, we can just do the alignment on nodes with memory. Link: https://lkml.kernel.org/r/20250211082900.10877-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Acked-by: David Hildenbrand Reviewed-by: Dev Jain Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton --- mm/mm_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index de18d3ad12e1..6078b3651f2e 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -545,7 +545,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ - for (nid = 0; nid < MAX_NUMNODES; nid++) { + for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; zone_movable_pfn[nid] = From 6fbea85271c6764d512a5c5b6c1b44b762693a18 Mon Sep 17 00:00:00 2001 From: I Hsin Cheng Date: Tue, 11 Feb 2025 15:18:50 +0800 Subject: [PATCH 089/431] maple_tree: use ma_dead_node() in mte_dead_node() Utilize ma_dead_node() in mte_dead_node(). It can prevent decoding the maple enode for a second time. Use the "node" to find parent for comparison. Link: https://lkml.kernel.org/r/20250211071850.330632-1-richard120310@gmail.com Signed-off-by: I Hsin Cheng Reviewed-by: Liam R. Howlett Cc: Ching-Chun (Jim) Huang Cc: Shuah khan Signed-off-by: Andrew Morton --- lib/maple_tree.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 42c65974a56c..60356ccb11ce 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -584,13 +584,10 @@ static __always_inline bool ma_dead_node(const struct maple_node *node) */ static __always_inline bool mte_dead_node(const struct maple_enode *enode) { - struct maple_node *parent, *node; + struct maple_node *node; node = mte_to_node(enode); - /* Do not reorder reads from the node prior to the parent check */ - smp_rmb(); - parent = mte_parent(enode); - return (parent == node); + return ma_dead_node(node); } /* From 1d23b9403aedea2d8eb4175e8eeb05fcd2967219 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 11 Feb 2025 18:13:39 +0800 Subject: [PATCH 090/431] mm/mmu_gather: remove unused __tlb_remove_page() Nobody is using __tlb_remove_page() now, clean it up. And also remove the code comment above tlb_remove_page() because it's not meaningful any more. Link: https://lkml.kernel.org/r/Z6si0/A/zzEF/bFJ@MiWiFi-R3L-srv Signed-off-by: Baoquan He Reviewed-by: Qi Zheng Cc: "Aneesh Kumar K.V" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Will Deacon Signed-off-by: Andrew Morton --- include/asm-generic/tlb.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index e402aef79c93..1fac1985127d 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -489,16 +489,6 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, tlb_flush_mmu(tlb); } -static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, - struct page *page, bool delay_rmap) -{ - return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE); -} - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { return tlb_remove_page_size(tlb, page, PAGE_SIZE); From 7bd1fa0d5624e78628ad7ce70d7e69082c34c634 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 11 Feb 2025 11:43:48 +0800 Subject: [PATCH 091/431] mm/mmu_gather: clean up the stale code comment In commit d7f861b9c43a ("mm/mmu_gather: add __tlb_remove_folio_pages()"), helper function __tlb_remove_folio_pages_size() was added. And based on the helper, wrapper functions __tlb_remove_folio_pages() and __tlb_remove_page_size() are created and used by upper level functions. So let's update the code comment to reflect the current code about tlb_remove_page()/tlb_remove_page_size(), etc. Link: https://lkml.kernel.org/r/20250211034348.39531-2-bhe@redhat.com Signed-off-by: Baoquan He Cc: "Aneesh Kumar K.V" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Qi Zheng Cc: Will Deacon Signed-off-by: Andrew Morton --- include/asm-generic/tlb.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 1fac1985127d..d1adfba8387e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -67,22 +67,21 @@ * * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. * - * - tlb_remove_page() / __tlb_remove_page() - * - tlb_remove_page_size() / __tlb_remove_page_size() - * - __tlb_remove_folio_pages() + * - tlb_remove_page() / tlb_remove_page_size() + * - __tlb_remove_folio_pages() / __tlb_remove_page_size() + * - __tlb_remove_folio_pages_size() * - * __tlb_remove_page_size() is the basic primitive that queues a page for - * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a - * boolean indicating if the queue is (now) full and a call to - * tlb_flush_mmu() is required. + * __tlb_remove_folio_pages_size() is the basic primitive that queues pages + * for freeing. It will return a boolean indicating if the queue is (now) + * full and a call to tlb_flush_mmu() is required. * * tlb_remove_page() and tlb_remove_page_size() imply the call to * tlb_flush_mmu() when required and has no return value. * - * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however, - * instead of removing a single page, remove the given number of consecutive - * pages that are all part of the same (large) folio: just like calling - * __tlb_remove_page() on each page individually. + * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(), + * however, instead of removing a single page, assume PAGE_SIZE and remove + * the given number of consecutive pages that are all part of the + * same (large) folio. * * - tlb_change_page_size() * From 85968b6a2042cd19c6afd1fb49b64bd32f3a4d07 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 12 Feb 2025 17:44:26 +0000 Subject: [PATCH 092/431] selftests/mm: allow tests to run with no huge pages support Currently the mm selftests refuse to run if huge pages are not available in the current system but this is an optional feature and not all the tests actually require them. Change the test during startup to be non-fatal and skip or omit tests which actually rely on having huge pages, allowing the other tests to be run. The gup_test does support using madvise() to configure huge pages but it ignores the error code so we just let it run. Link: https://lkml.kernel.org/r/20250212-kselftest-mm-no-hugepages-v1-2-44702f538522@kernel.org Signed-off-by: Mark Brown Reviewed-by: Nico Pache Cc: Mariano Pache Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 66 ++++++++++++++--------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 7cc71d942f83..93a606198751 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -187,9 +187,10 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then printf "Not enough huge pages available (%d < %d)\n" \ "$freepgs" "$needpgs" fi + HAVE_HUGEPAGES=1 else echo "no hugetlbfs support in kernel?" - exit 1 + HAVE_HUGEPAGES=0 fi # filter 64bit architectures @@ -218,13 +219,20 @@ pretty_name() { # Usage: run_test [test binary] [arbitrary test arguments...] run_test() { if test_selected ${CATEGORY}; then + local skip=0 + # On memory constrainted systems some tests can fail to allocate hugepages. # perform some cleanup before the test for a higher success rate. if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then - echo 3 > /proc/sys/vm/drop_caches - sleep 2 - echo 1 > /proc/sys/vm/compact_memory - sleep 2 + if [ "${HAVE_HUGEPAGES}" = "1" ]; then + echo 3 > /proc/sys/vm/drop_caches + sleep 2 + echo 1 > /proc/sys/vm/compact_memory + sleep 2 + else + echo "hugepages not supported" | tap_prefix + skip=1 + fi fi local test=$(pretty_name "$*") @@ -232,8 +240,12 @@ run_test() { local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -) printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix - ("$@" 2>&1) | tap_prefix - local ret=${PIPESTATUS[0]} + if [ "${skip}" != "1" ]; then + ("$@" 2>&1) | tap_prefix + local ret=${PIPESTATUS[0]} + else + local ret=$ksft_skip + fi count_total=$(( count_total + 1 )) if [ $ret -eq 0 ]; then count_pass=$(( count_pass + 1 )) @@ -271,13 +283,15 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap CATEGORY="hugetlb" run_test ./hugetlb-madvise CATEGORY="hugetlb" run_test ./hugetlb_dio -nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) -# For this test, we need one and just one huge page -echo 1 > /proc/sys/vm/nr_hugepages -CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv -CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map -# Restore the previous number of huge pages, since further tests rely on it -echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages +if [ "${HAVE_HUGEPAGES}" = "1" ]; then + nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages) + # For this test, we need one and just one huge page + echo 1 > /proc/sys/vm/nr_hugepages + CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv + CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map + # Restore the previous number of huge pages, since further tests rely on it + echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages +fi if test_selected "hugetlb"; then echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix @@ -393,7 +407,9 @@ CATEGORY="memfd_secret" run_test ./memfd_secret fi # KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100 -CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +if [ "${HAVE_HUGEPAGES}" = "1" ]; then + CATEGORY="ksm" run_test ./ksm_tests -H -s 100 +fi # KSM KSM_MERGE_TIME test with size of 100 CATEGORY="ksm" run_test ./ksm_tests -P -s 100 # KSM MADV_MERGEABLE test with 10 identical pages @@ -442,15 +458,17 @@ CATEGORY="thp" run_test ./transhuge-stress -d 20 # Try to create XFS if not provided if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then - if test_selected "thp"; then - if grep xfs /proc/filesystems &>/dev/null; then - XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX) - SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX) - truncate -s 314572800 ${XFS_IMG} - mkfs.xfs -q ${XFS_IMG} - mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} - MOUNTED_XFS=1 - fi + if [ "${HAVE_HUGEPAGES}" = "1" ]; then + if test_selected "thp"; then + if grep xfs /proc/filesystems &>/dev/null; then + XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX) + SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX) + truncate -s 314572800 ${XFS_IMG} + mkfs.xfs -q ${XFS_IMG} + mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH} + MOUNTED_XFS=1 + fi + fi fi fi From bf40aa214195864eeb63165cfb02e959c464d409 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 12 Feb 2025 01:38:18 +0000 Subject: [PATCH 093/431] mm/mm_init.c: use round_up() to calculate usermap size Since pageblock_nr_pages and BITS_PER_LONG are power of 2, we could use round_up() to calculate it. Also we have renamed blockflags to pageblock_flags, adjust the comment accordingly. Link: https://lkml.kernel.org/r/20250212013818.873-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Suggested-by: Shivank Garg Reviewed-by: Shivank Garg Acked-by: David Hildenbrand Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton --- mm/mm_init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index 6078b3651f2e..c767946e8f5f 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1431,7 +1431,7 @@ void __meminit init_currently_empty_zone(struct zone *zone, #ifndef CONFIG_SPARSEMEM /* - * Calculate the size of the zone->blockflags rounded to an unsigned long + * Calculate the size of the zone->pageblock_flags rounded to an unsigned long * Start by making sure zonesize is a multiple of pageblock_order by rounding * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in @@ -1442,10 +1442,10 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l unsigned long usemapsize; zonesize += zone_start_pfn & (pageblock_nr_pages-1); - usemapsize = roundup(zonesize, pageblock_nr_pages); + usemapsize = round_up(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; - usemapsize = roundup(usemapsize, BITS_PER_LONG); + usemapsize = round_up(usemapsize, BITS_PER_LONG); return usemapsize / BITS_PER_BYTE; } From f807123d578df4218e2580a1e1bb3436f4567c4a Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Feb 2025 18:17:00 +0000 Subject: [PATCH 094/431] mm: allow guard regions in file-backed and read-only mappings Patch series "mm: permit guard regions for file-backed/shmem mappings". The guard regions feature was initially implemented to support anonymous mappings only, excluding shmem. This was done so as to introduce the feature carefully and incrementally and to be conservative when considering the various caveats and corner cases that are applicable to file-backed mappings but not to anonymous ones. Now this feature has landed in 6.13, it is time to revisit this and to extend this functionality to file-backed and shmem mappings. In order to make this maximally useful, and since one may map file-backed mappings read-only (for instance ELF images), we also remove the restriction on read-only mappings and permit the establishment of guard regions in any non-hugetlb, non-mlock()'d mapping. It is permissible to permit the establishment of guard regions in read-only mappings because the guard regions only reduce access to the mapping, and when removed simply reinstate the existing attributes of the underlying VMA, meaning no access violations can occur. While the change in kernel code introduced in this series is small, the majority of the effort here is spent in extending the testing to assert that the feature works correctly across numerous file-backed mapping scenarios. Every single guard region self-test performed against anonymous memory (which is relevant and not anon-only) has now been updated to also be performed against shmem and a mapping of a file in the working directory. This confirms that all cases also function correctly for file-backed guard regions. In addition a number of other tests are added for specific file-backed mapping scenarios. There are a number of other concerns that one might have with regard to guard regions, addressed below: Readahead ~~~~~~~~~ Readahead is a process through which the page cache is populated on the assumption that sequential reads will occur, thus amortising I/O and, through a clever use of the PG_readahead folio flag establishing during major fault and checked upon minor fault, provides for asynchronous I/O to occur as dat is processed, reducing I/O stalls as data is faulted in. Guard regions do not alter this mechanism which operates at the folio and fault level, but does of course prevent the faulting of folios that would otherwise be mapped. In the instance of a major fault prior to a guard region, synchronous readahead will occur including populating folios in the page cache which the guard regions will, in the case of the mapping in question, prevent access to. In addition, if PG_readahead is placed in a folio that is now inaccessible, this will prevent asynchronous readahead from occurring as it would otherwise do. However, there are mechanisms for heuristically resetting this within readahead regardless, which will 'recover' correct readahead behaviour. Readahead presumes sequential data access, the presence of a guard region clearly indicates that, at least in the guard region, no such sequential access will occur, as it cannot occur there. So this should have very little impact on any real workload. The far more important point is as to whether readahead causes incorrect or inappropriate mapping of ranges disallowed by the presence of guard regions - this is not the case, as readahead does not 'pre-fault' memory in this fashion. At any rate, any mechanism which would attempt to do so would hit the usual page fault paths, which correctly handle PTE markers as with anonymous mappings. Fault-Around ~~~~~~~~~~~~ The fault-around logic, in a similar vein to readahead, attempts to improve efficiency with regard to file-backed memory mappings, however it differs in that it does not try to fetch folios into the page cache that are about to be accessed, but rather pre-maps a range of folios around the faulting address. Guard regions making use of PTE markers makes this relatively trivial, as this case is already handled - see filemap_map_folio_range() and filemap_map_order0_folio() - in both instances, the solution is to simply keep the established page table mappings and let the fault handler take care of PTE markers, as per the comment: /* * NOTE: If there're PTE markers, we'll leave them to be * handled in the specific fault path, and it'll prohibit * the fault-around logic. */ This works, as establishing guard regions results in page table mappings with PTE markers, and clearing them removes them. Truncation ~~~~~~~~~~ File truncation will not eliminate existing guard regions, as the truncation operation will ultimately zap the range via unmap_mapping_range(), which specifically excludes PTE markers. Zapping ~~~~~~~ Zapping is, as with anonymous mappings, handled by zap_nonpresent_ptes(), which specifically deals with guard entries, leaving them intact except in instances such as process teardown or munmap() where they need to be removed. Reclaim ~~~~~~~ When reclaim is performed on file-backed folios, it ultimately invokes try_to_unmap_one() via the rmap. If the folio is non-large, then map_pte() will ultimately abort the operation for the guard region mapping. If large, then check_pte() will determine that this is a non-device private entry/device-exclusive entry 'swap' PTE and thus abort the operation in that instance. Therefore, no odd things happen in the instance of reclaim being attempted upon a file-backed guard region. Hole Punching ~~~~~~~~~~~~~ This updates the page cache and ultimately invokes unmap_mapping_range(), which explicitly leaves PTE markers in place. Because the establishment of guard regions zapped any existing mappings to file-backed folios, once the guard regions are removed then the hole-punched region will be faulted in as usual and everything will behave as expected. One thing to note with this series is that it now implies file-backed VMAs which install guard regions will now have an anon_vma installed if not already present (i.e. if not post-CoW MAP_PRIVATE). I have audited kernel source for instances of vma->anon_vma checks and found nowhere where this would be problematic for pure file-backed mappings. I also discussed (off-list) with Matthew who confirmed he can't see any issue with this. In effect, we treat these VMAs as if they are MAP_PRIVATE, only with 0 CoW'd pages. As a result, the rmap never has a reason to reference the anon_vma from folios at any point and thus no unexpected or weird behaviour results. The anon_vma logic tries to avoid unnecessary anon_vma propagation on fork so we ought to at least minimise overhead. However, this is still overhead, and unwelcome overhead. The whole reason we do this (in madvise_guard_install()) is to ensure that fork _copies page tables_. Otherwise, in vma_needs_copy(), nothing will indicate that we should do so. This was already an unpleasant thing to have to do, but without a new VMA flag we really have no reasonable means of ensuring this happens. Going forward, I intend to add a new VMA flag, VM_MAYBE_GUARDED or something like this. This would have specific behaviour - for the purposes of merging, it would be ignored. However on both split and merge, it will be propagated. It is therefore 'sticky'. This is to avoid having to traverse page tables to determine which parts of a VMA contain guard regions and of course to maintain the desirable qualities of guard regions - the lack of VMA propagation (+ thus slab allocations of VMAs). Adding this flag and adjusting vma_needs_copy() to reference it would resolve the issue. However :) we have a VMA flag space issue, so it'd render this a 64-bit feature only. Having discussed with Matthew a plan by which to perhaps extend available flags for 32-bit we may going forward be able to avoid this. But this may be a longer term project. In the meantime, we'd have to resort to the anon_vma hack for 32-bit, using the flag for 64-bit. The issue with this however is if we do then intend to allow the flag to enable /proc/$pid/maps visibility (something this could allow), it would also end up being 64-bit only which would be a pity. Regardless - I wanted to highlight this behaviour as it is perhaps somewhat surprising. This patch (of 4): There is no reason to disallow guard regions in file-backed mappings - readahead and fault-around both function correctly in the presence of PTE markers, equally other operations relating to memory-mapped files function correctly. Additionally, read-only mappings if introducing guard-regions, only restrict the mapping further, which means there is no violation of any access rights by permitting this to be so. Removing this restriction allows for read-only mapped files (such as executable files) correctly which would otherwise not be permitted. Link: https://lkml.kernel.org/r/cover.1739469950.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/d885cb259174736c2830a5dfe07f81b214ef3faa.1739469950.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Acked-by: David Hildenbrand Cc: Jann Horn Cc: John Hubbard Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcox Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/madvise.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 6ecead476a80..e01e93e179a8 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1051,13 +1051,7 @@ static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) if (!allow_locked) disallowed |= VM_LOCKED; - if (!vma_is_anonymous(vma)) - return false; - - if ((vma->vm_flags & (VM_MAYWRITE | disallowed)) != VM_MAYWRITE) - return false; - - return true; + return !(vma->vm_flags & disallowed); } static bool is_guard_pte_marker(pte_t ptent) From ce1c0824fc2a2e81b384483e4ab66e06f8fc0f3c Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Feb 2025 18:17:01 +0000 Subject: [PATCH 095/431] selftests/mm: rename guard-pages to guard-regions The feature formerly referred to as guard pages is more correctly referred to as 'guard regions', as in fact no pages are ever allocated in the process of installing the regions. To avoid confusion, rename the tests accordingly. [lorenzo.stoakes@oracle.com: fix guard regions invocation] Link: https://lkml.kernel.org/r/13426c71-d069-4407-9340-b227ff8b8736@lucifer.local Link: https://lkml.kernel.org/r/1c3cd04a3f69b5756b94bda701ac88325a9be18b.1739469950.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: David Hildenbrand Cc: Jann Horn Cc: John Hubbard Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcox Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 2 +- tools/testing/selftests/mm/Makefile | 2 +- .../mm/{guard-pages.c => guard-regions.c} | 42 +++++++++---------- tools/testing/selftests/mm/run_vmtests.sh | 2 +- 4 files changed, 24 insertions(+), 24 deletions(-) rename tools/testing/selftests/mm/{guard-pages.c => guard-regions.c} (98%) diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 121000c28c10..c5241b193db8 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -57,4 +57,4 @@ droppable hugetlb_dio pkey_sighandler_tests_32 pkey_sighandler_tests_64 -guard-pages +guard-regions diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 63ce39d024bb..8270895039d1 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -97,7 +97,7 @@ TEST_GEN_FILES += hugetlb_fault_after_madv TEST_GEN_FILES += hugetlb_madv_vs_map TEST_GEN_FILES += hugetlb_dio TEST_GEN_FILES += droppable -TEST_GEN_FILES += guard-pages +TEST_GEN_FILES += guard-regions ifneq ($(ARCH),arm64) TEST_GEN_FILES += soft-dirty diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-regions.c similarity index 98% rename from tools/testing/selftests/mm/guard-pages.c rename to tools/testing/selftests/mm/guard-regions.c index ece37212a8a2..7a41cf9ffbdf 100644 --- a/tools/testing/selftests/mm/guard-pages.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -107,12 +107,12 @@ static bool try_read_write_buf(char *ptr) return try_read_buf(ptr) && try_write_buf(ptr); } -FIXTURE(guard_pages) +FIXTURE(guard_regions) { unsigned long page_size; }; -FIXTURE_SETUP(guard_pages) +FIXTURE_SETUP(guard_regions) { struct sigaction act = { .sa_handler = &handle_fatal, @@ -126,7 +126,7 @@ FIXTURE_SETUP(guard_pages) self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); }; -FIXTURE_TEARDOWN(guard_pages) +FIXTURE_TEARDOWN(guard_regions) { struct sigaction act = { .sa_handler = SIG_DFL, @@ -137,7 +137,7 @@ FIXTURE_TEARDOWN(guard_pages) sigaction(SIGSEGV, &act, NULL); } -TEST_F(guard_pages, basic) +TEST_F(guard_regions, basic) { const unsigned long NUM_PAGES = 10; const unsigned long page_size = self->page_size; @@ -231,7 +231,7 @@ TEST_F(guard_pages, basic) } /* Assert that operations applied across multiple VMAs work as expected. */ -TEST_F(guard_pages, multi_vma) +TEST_F(guard_regions, multi_vma) { const unsigned long page_size = self->page_size; char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3; @@ -367,7 +367,7 @@ TEST_F(guard_pages, multi_vma) * Assert that batched operations performed using process_madvise() work as * expected. */ -TEST_F(guard_pages, process_madvise) +TEST_F(guard_regions, process_madvise) { const unsigned long page_size = self->page_size; pid_t pid = getpid(); @@ -467,7 +467,7 @@ TEST_F(guard_pages, process_madvise) } /* Assert that unmapping ranges does not leave guard markers behind. */ -TEST_F(guard_pages, munmap) +TEST_F(guard_regions, munmap) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new1, *ptr_new2; @@ -505,7 +505,7 @@ TEST_F(guard_pages, munmap) } /* Assert that mprotect() operations have no bearing on guard markers. */ -TEST_F(guard_pages, mprotect) +TEST_F(guard_regions, mprotect) { const unsigned long page_size = self->page_size; char *ptr; @@ -553,7 +553,7 @@ TEST_F(guard_pages, mprotect) } /* Split and merge VMAs and make sure guard pages still behave. */ -TEST_F(guard_pages, split_merge) +TEST_F(guard_regions, split_merge) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; @@ -684,7 +684,7 @@ TEST_F(guard_pages, split_merge) } /* Assert that MADV_DONTNEED does not remove guard markers. */ -TEST_F(guard_pages, dontneed) +TEST_F(guard_regions, dontneed) { const unsigned long page_size = self->page_size; char *ptr; @@ -737,7 +737,7 @@ TEST_F(guard_pages, dontneed) } /* Assert that mlock()'ed pages work correctly with guard markers. */ -TEST_F(guard_pages, mlock) +TEST_F(guard_regions, mlock) { const unsigned long page_size = self->page_size; char *ptr; @@ -810,7 +810,7 @@ TEST_F(guard_pages, mlock) * * - Moving a mapping alone should retain markers as they are. */ -TEST_F(guard_pages, mremap_move) +TEST_F(guard_regions, mremap_move) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; @@ -857,7 +857,7 @@ TEST_F(guard_pages, mremap_move) * will have to remove guard pages manually to fix up (they'd have to do the * same if it were a PROT_NONE mapping). */ -TEST_F(guard_pages, mremap_expand) +TEST_F(guard_regions, mremap_expand) { const unsigned long page_size = self->page_size; char *ptr, *ptr_new; @@ -920,7 +920,7 @@ TEST_F(guard_pages, mremap_expand) * if the user were using a PROT_NONE mapping they'd have to manually fix this * up also so this is OK. */ -TEST_F(guard_pages, mremap_shrink) +TEST_F(guard_regions, mremap_shrink) { const unsigned long page_size = self->page_size; char *ptr; @@ -984,7 +984,7 @@ TEST_F(guard_pages, mremap_shrink) * Assert that forking a process with VMAs that do not have VM_WIPEONFORK set * retain guard pages. */ -TEST_F(guard_pages, fork) +TEST_F(guard_regions, fork) { const unsigned long page_size = self->page_size; char *ptr; @@ -1039,7 +1039,7 @@ TEST_F(guard_pages, fork) * Assert expected behaviour after we fork populated ranges of anonymous memory * and then guard and unguard the range. */ -TEST_F(guard_pages, fork_cow) +TEST_F(guard_regions, fork_cow) { const unsigned long page_size = self->page_size; char *ptr; @@ -1110,7 +1110,7 @@ TEST_F(guard_pages, fork_cow) * Assert that forking a process with VMAs that do have VM_WIPEONFORK set * behave as expected. */ -TEST_F(guard_pages, fork_wipeonfork) +TEST_F(guard_regions, fork_wipeonfork) { const unsigned long page_size = self->page_size; char *ptr; @@ -1160,7 +1160,7 @@ TEST_F(guard_pages, fork_wipeonfork) } /* Ensure that MADV_FREE retains guard entries as expected. */ -TEST_F(guard_pages, lazyfree) +TEST_F(guard_regions, lazyfree) { const unsigned long page_size = self->page_size; char *ptr; @@ -1196,7 +1196,7 @@ TEST_F(guard_pages, lazyfree) } /* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */ -TEST_F(guard_pages, populate) +TEST_F(guard_regions, populate) { const unsigned long page_size = self->page_size; char *ptr; @@ -1222,7 +1222,7 @@ TEST_F(guard_pages, populate) } /* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */ -TEST_F(guard_pages, cold_pageout) +TEST_F(guard_regions, cold_pageout) { const unsigned long page_size = self->page_size; char *ptr; @@ -1268,7 +1268,7 @@ TEST_F(guard_pages, cold_pageout) } /* Ensure that guard pages do not break userfaultd. */ -TEST_F(guard_pages, uffd) +TEST_F(guard_regions, uffd) { const unsigned long page_size = self->page_size; int uffd; diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 93a606198751..4b5e45a10219 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -395,7 +395,7 @@ CATEGORY="mremap" run_test ./mremap_dontunmap CATEGORY="hmm" run_test bash ./test_hmm.sh smoke # MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests -CATEGORY="madv_guard" run_test ./guard-pages +CATEGORY="madv_guard" run_test ./guard-regions # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests CATEGORY="madv_populate" run_test ./madv_populate From 272f37d3e99a223fbb51fc2f6e6135c9856dc104 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Feb 2025 18:17:02 +0000 Subject: [PATCH 096/431] tools/selftests: expand all guard region tests to file-backed Extend the guard region tests to allow for test fixture variants for anon, shmem, and local file files. This allows us to assert that each of the expected behaviours of anonymous memory also applies correctly to file-backed (both shmem and an a file created locally in the current working directory) and thus asserts the same correctness guarantees as all the remaining tests do. The fixture teardown is now performed in the parent process rather than child forked ones, meaning cleanup is always performed, including unlinking any generated temporary files. Additionally the variant fixture data type now contains an enum value indicating the type of backing store and the mmap() invocation is abstracted to allow for the mapping of whichever backing store the variant is testing. We adjust tests as necessary to account for the fact they may now reference files rather than anonymous memory. Link: https://lkml.kernel.org/r/ab42228d2bd9b8aa18e9faebcd5c88732a7e5820.1739469950.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: David Hildenbrand Cc: Jann Horn Cc: John Hubbard Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcox Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-regions.c | 290 +++++++++++++++------ 1 file changed, 205 insertions(+), 85 deletions(-) diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c index 7a41cf9ffbdf..0469c783f4fa 100644 --- a/tools/testing/selftests/mm/guard-regions.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,79 @@ static sigjmp_buf signal_jmp_buf; */ #define FORCE_READ(x) (*(volatile typeof(x) *)x) +/* + * How is the test backing the mapping being tested? + */ +enum backing_type { + ANON_BACKED, + SHMEM_BACKED, + LOCAL_FILE_BACKED, +}; + +FIXTURE(guard_regions) +{ + unsigned long page_size; + char path[PATH_MAX]; + int fd; +}; + +FIXTURE_VARIANT(guard_regions) +{ + enum backing_type backing; +}; + +FIXTURE_VARIANT_ADD(guard_regions, anon) +{ + .backing = ANON_BACKED, +}; + +FIXTURE_VARIANT_ADD(guard_regions, shmem) +{ + .backing = SHMEM_BACKED, +}; + +FIXTURE_VARIANT_ADD(guard_regions, file) +{ + .backing = LOCAL_FILE_BACKED, +}; + +static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant) +{ + switch (variant->backing) { + case ANON_BACKED: + case SHMEM_BACKED: + return true; + default: + return false; + } +} + +static void *mmap_(FIXTURE_DATA(guard_regions) * self, + const FIXTURE_VARIANT(guard_regions) * variant, + void *addr, size_t length, int prot, int extra_flags, + off_t offset) +{ + int fd; + int flags = extra_flags; + + switch (variant->backing) { + case ANON_BACKED: + flags |= MAP_PRIVATE | MAP_ANON; + fd = -1; + break; + case SHMEM_BACKED: + case LOCAL_FILE_BACKED: + flags |= MAP_SHARED; + fd = self->fd; + break; + default: + ksft_exit_fail(); + break; + } + + return mmap(addr, length, prot, flags, fd, offset); +} + static int userfaultfd(int flags) { return syscall(SYS_userfaultfd, flags); @@ -107,12 +181,7 @@ static bool try_read_write_buf(char *ptr) return try_read_buf(ptr) && try_write_buf(ptr); } -FIXTURE(guard_regions) -{ - unsigned long page_size; -}; - -FIXTURE_SETUP(guard_regions) +static void setup_sighandler(void) { struct sigaction act = { .sa_handler = &handle_fatal, @@ -122,11 +191,9 @@ FIXTURE_SETUP(guard_regions) sigemptyset(&act.sa_mask); if (sigaction(SIGSEGV, &act, NULL)) ksft_exit_fail_perror("sigaction"); +} - self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); -}; - -FIXTURE_TEARDOWN(guard_regions) +static void teardown_sighandler(void) { struct sigaction act = { .sa_handler = SIG_DFL, @@ -137,6 +204,48 @@ FIXTURE_TEARDOWN(guard_regions) sigaction(SIGSEGV, &act, NULL); } +static int open_file(const char *prefix, char *path) +{ + int fd; + + snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix); + fd = mkstemp(path); + if (fd < 0) + ksft_exit_fail_perror("mkstemp"); + + return fd; +} + +FIXTURE_SETUP(guard_regions) +{ + self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); + setup_sighandler(); + + if (variant->backing == ANON_BACKED) + return; + + self->fd = open_file( + variant->backing == SHMEM_BACKED ? "/tmp/" : "", + self->path); + + /* We truncate file to at least 100 pages, tests can modify as needed. */ + ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0); +}; + +FIXTURE_TEARDOWN_PARENT(guard_regions) +{ + teardown_sighandler(); + + if (variant->backing == ANON_BACKED) + return; + + if (self->fd >= 0) + close(self->fd); + + if (self->path[0] != '\0') + unlink(self->path); +} + TEST_F(guard_regions, basic) { const unsigned long NUM_PAGES = 10; @@ -144,8 +253,8 @@ TEST_F(guard_regions, basic) char *ptr; int i; - ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, -1, 0); + ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Trivially assert we can touch the first page. */ @@ -238,25 +347,23 @@ TEST_F(guard_regions, multi_vma) int i; /* Reserve a 100 page region over which we can install VMAs. */ - ptr_region = mmap(NULL, 100 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_region = mmap_(self, variant, NULL, 100 * page_size, + PROT_NONE, 0, 0); ASSERT_NE(ptr_region, MAP_FAILED); /* Place a VMA of 10 pages size at the start of the region. */ - ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr1 = mmap_(self, variant, ptr_region, 10 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr1, MAP_FAILED); /* Place a VMA of 5 pages size 50 pages into the region. */ - ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr2, MAP_FAILED); /* Place a VMA of 20 pages size at the end of the region. */ - ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr3, MAP_FAILED); /* Unmap gaps. */ @@ -326,13 +433,11 @@ TEST_F(guard_regions, multi_vma) } /* Now map incompatible VMAs in the gaps. */ - ptr = mmap(&ptr_region[10 * page_size], 40 * page_size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size, + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0); ASSERT_NE(ptr, MAP_FAILED); - ptr = mmap(&ptr_region[55 * page_size], 25 * page_size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size, + PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0); ASSERT_NE(ptr, MAP_FAILED); /* @@ -379,8 +484,8 @@ TEST_F(guard_regions, process_madvise) ASSERT_NE(pidfd, -1); /* Reserve region to map over. */ - ptr_region = mmap(NULL, 100 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_region = mmap_(self, variant, NULL, 100 * page_size, + PROT_NONE, 0, 0); ASSERT_NE(ptr_region, MAP_FAILED); /* @@ -388,9 +493,8 @@ TEST_F(guard_regions, process_madvise) * overwrite existing entries and test this code path against * overwriting existing entries. */ - ptr1 = mmap(&ptr_region[page_size], 10 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0); + ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0); ASSERT_NE(ptr1, MAP_FAILED); /* We want guard markers at start/end of each VMA. */ vec[0].iov_base = ptr1; @@ -399,9 +503,8 @@ TEST_F(guard_regions, process_madvise) vec[1].iov_len = page_size; /* 5 pages offset 50 pages into reserve region. */ - ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr2, MAP_FAILED); vec[2].iov_base = ptr2; vec[2].iov_len = page_size; @@ -409,9 +512,8 @@ TEST_F(guard_regions, process_madvise) vec[3].iov_len = page_size; /* 20 pages offset 79 pages into reserve region. */ - ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size, - PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr3, MAP_FAILED); vec[4].iov_base = ptr3; vec[4].iov_len = page_size; @@ -472,8 +574,8 @@ TEST_F(guard_regions, munmap) const unsigned long page_size = self->page_size; char *ptr, *ptr_new1, *ptr_new2; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard first and last pages. */ @@ -489,11 +591,11 @@ TEST_F(guard_regions, munmap) ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0); /* Map over them.*/ - ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE, + MAP_FIXED, 0); ASSERT_NE(ptr_new1, MAP_FAILED); - ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new2, MAP_FAILED); /* Assert that they are now not guarded. */ @@ -511,8 +613,8 @@ TEST_F(guard_regions, mprotect) char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard the middle of the range. */ @@ -559,8 +661,8 @@ TEST_F(guard_regions, split_merge) char *ptr, *ptr_new; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard the whole range. */ @@ -601,14 +703,14 @@ TEST_F(guard_regions, split_merge) } /* Now map them again - the unmap will have cleared the guards. */ - ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); - ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); - ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size, + PROT_READ | PROT_WRITE, MAP_FIXED, 0); ASSERT_NE(ptr_new, MAP_FAILED); /* Now make sure guard pages are established. */ @@ -690,8 +792,8 @@ TEST_F(guard_regions, dontneed) char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Back the whole range. */ @@ -721,8 +823,16 @@ TEST_F(guard_regions, dontneed) ASSERT_FALSE(result); } else { ASSERT_TRUE(result); - /* Make sure we really did get reset to zero page. */ - ASSERT_EQ(*curr, '\0'); + switch (variant->backing) { + case ANON_BACKED: + /* If anon, then we get a zero page. */ + ASSERT_EQ(*curr, '\0'); + break; + default: + /* Otherwise, we get the file data. */ + ASSERT_EQ(*curr, 'y'); + break; + } } /* Now write... */ @@ -743,8 +853,8 @@ TEST_F(guard_regions, mlock) char *ptr; int i; - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Populate. */ @@ -816,8 +926,8 @@ TEST_F(guard_regions, mremap_move) char *ptr, *ptr_new; /* Map 5 pages. */ - ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 5 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Place guard markers at both ends of the 5 page span. */ @@ -831,8 +941,7 @@ TEST_F(guard_regions, mremap_move) /* Map a new region we will move this range into. Doing this ensures * that we have reserved a range to map into. */ - ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE, - -1, 0); + ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0); ASSERT_NE(ptr_new, MAP_FAILED); ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size, @@ -863,8 +972,8 @@ TEST_F(guard_regions, mremap_expand) char *ptr, *ptr_new; /* Map 10 pages... */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* ...But unmap the last 5 so we can ensure we can expand into them. */ ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0); @@ -888,8 +997,7 @@ TEST_F(guard_regions, mremap_expand) ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size])); /* Reserve a region which we can move to and expand into. */ - ptr_new = mmap(NULL, 20 * page_size, PROT_NONE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0); ASSERT_NE(ptr_new, MAP_FAILED); /* Now move and expand into it. */ @@ -927,8 +1035,8 @@ TEST_F(guard_regions, mremap_shrink) int i; /* Map 5 pages. */ - ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 5 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Place guard markers at both ends of the 5 page span. */ @@ -992,8 +1100,8 @@ TEST_F(guard_regions, fork) int i; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Establish guard pages in the first 5 pages. */ @@ -1046,9 +1154,12 @@ TEST_F(guard_regions, fork_cow) pid_t pid; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "CoW only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Populate range. */ @@ -1117,9 +1228,12 @@ TEST_F(guard_regions, fork_wipeonfork) pid_t pid; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "Wipe on fork only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Mark wipe on fork. */ @@ -1166,9 +1280,12 @@ TEST_F(guard_regions, lazyfree) char *ptr; int i; + if (variant->backing != ANON_BACKED) + SKIP(return, "MADV_FREE only supported on anon mappings"); + /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1202,8 +1319,8 @@ TEST_F(guard_regions, populate) char *ptr; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1229,8 +1346,8 @@ TEST_F(guard_regions, cold_pageout) int i; /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Guard range. */ @@ -1281,6 +1398,9 @@ TEST_F(guard_regions, uffd) struct uffdio_register reg; struct uffdio_range range; + if (!is_anon_backed(variant)) + SKIP(return, "uffd only works on anon backing"); + /* Set up uffd. */ uffd = userfaultfd(0); if (uffd == -1 && errno == EPERM) @@ -1290,8 +1410,8 @@ TEST_F(guard_regions, uffd) ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0); /* Map 10 pages. */ - ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); ASSERT_NE(ptr, MAP_FAILED); /* Register the range with uffd. */ From 0b6d4853d1d7d8ab65de9e6958af4d5cdf6a2b75 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 13 Feb 2025 18:17:03 +0000 Subject: [PATCH 097/431] tools/selftests: add file/shmem-backed mapping guard region tests Extend the guard region self tests to explicitly assert that guard regions work correctly for functionality specific to file-backed and shmem mappings. In addition to testing all of the existing guard region functionality that is currently tested against anonymous mappings against file-backed and shmem mappings (except those which are exclusive to anonymous mapping), we now also: * Test that MADV_SEQUENTIAL does not cause unexpected readahead behaviour. * Test that MAP_PRIVATE behaves as expected with guard regions installed in both a shared and private mapping of an fd. * Test that a read-only file can correctly establish guard regions. * Test a probable fault-around case does not interfere with guard regions (or vice-versa). * Test that truncation does not eliminate guard regions. * Test that hole punching functions as expected in the presence of guard regions. * Test that a read-only mapping of a memfd write sealed mapping can have guard regions established within it and function correctly without violation of the seal. * Test that guard regions installed into a mapping of the anonymous zero page function correctly. Link: https://lkml.kernel.org/r/90c16bec5fcaafcd1700dfa3e9988c3e1aa9ac1d.1739469950.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: Jann Horn Cc: John Hubbard Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcox Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-regions.c | 595 +++++++++++++++++++++ 1 file changed, 595 insertions(+) diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c index 0469c783f4fa..ea9b5815e828 100644 --- a/tools/testing/selftests/mm/guard-regions.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -216,6 +216,58 @@ static int open_file(const char *prefix, char *path) return fd; } +/* Establish a varying pattern in a buffer. */ +static void set_pattern(char *ptr, size_t num_pages, size_t page_size) +{ + size_t i; + + for (i = 0; i < num_pages; i++) { + char *ptr2 = &ptr[i * page_size]; + + memset(ptr2, 'a' + (i % 26), page_size); + } +} + +/* + * Check that a buffer contains the pattern set by set_pattern(), starting at a + * page offset of pgoff within the buffer. + */ +static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size, + size_t pgoff) +{ + size_t i; + + for (i = 0; i < num_pages * page_size; i++) { + size_t offset = pgoff * page_size + i; + char actual = ptr[offset]; + char expected = 'a' + ((offset / page_size) % 26); + + if (actual != expected) + return false; + } + + return true; +} + +/* Check that a buffer contains the pattern set by set_pattern(). */ +static bool check_pattern(char *ptr, size_t num_pages, size_t page_size) +{ + return check_pattern_offset(ptr, num_pages, page_size, 0); +} + +/* Determine if a buffer contains only repetitions of a specified char. */ +static bool is_buf_eq(char *buf, size_t size, char chr) +{ + size_t i; + + for (i = 0; i < size; i++) { + if (buf[i] != chr) + return false; + } + + return true; +} + FIXTURE_SETUP(guard_regions) { self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); @@ -1437,4 +1489,547 @@ TEST_F(guard_regions, uffd) ASSERT_EQ(munmap(ptr, 10 * page_size), 0); } +/* + * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we + * aggressively read-ahead, then install guard regions and assert that it + * behaves correctly. + * + * We page out using MADV_PAGEOUT before checking guard regions so we drop page + * cache folios, meaning we maximise the possibility of some broken readahead. + */ +TEST_F(guard_regions, madvise_sequential) +{ + char *ptr; + int i; + const unsigned long page_size = self->page_size; + + if (variant->backing == ANON_BACKED) + SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Establish a pattern of data in the file. */ + set_pattern(ptr, 10, page_size); + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Mark it as being accessed sequentially. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0); + + /* Mark every other page a guard page. */ + for (i = 0; i < 10; i += 2) { + char *ptr2 = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now page it out. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); + + /* Now make sure pages are as expected. */ + for (i = 0; i < 10; i++) { + char *chrp = &ptr[i * page_size]; + + if (i % 2 == 0) { + bool result = try_read_write_buf(chrp); + + ASSERT_FALSE(result); + } else { + ASSERT_EQ(*chrp, 'a' + i); + } + } + + /* Now remove guard pages. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Now make sure all data is as expected. */ + if (!check_pattern(ptr, 10, page_size)) + ASSERT_TRUE(false); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +/* + * Check that file-backed mappings implement guard regions with MAP_PRIVATE + * correctly. + */ +TEST_F(guard_regions, map_private) +{ + const unsigned long page_size = self->page_size; + char *ptr_shared, *ptr_private; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "MAP_PRIVATE test specific to file-backed"); + + ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr_shared, MAP_FAILED); + + /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */ + ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0); + ASSERT_NE(ptr_private, MAP_FAILED); + + /* Set pattern in shared mapping. */ + set_pattern(ptr_shared, 10, page_size); + + /* Install guard regions in every other page in the shared mapping. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_shared[i * page_size]; + + ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0); + } + + for (i = 0; i < 10; i++) { + /* Every even shared page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); + /* Private mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size])); + } + + /* Install guard regions in every other page in the private mapping. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_private[i * page_size]; + + ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0); + } + + for (i = 0; i < 10; i++) { + /* Every even shared page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0); + /* Every odd private page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0); + } + + /* Remove guard regions from shared mapping. */ + ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0); + + for (i = 0; i < 10; i++) { + /* Shared mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + /* Every even private page should be guarded. */ + ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0); + } + + /* Remove guard regions from private mapping. */ + ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); + + for (i = 0; i < 10; i++) { + /* Shared mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + /* Private mappings should always be readable. */ + ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size])); + } + + /* Ensure patterns are intact. */ + ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size)); + ASSERT_TRUE(check_pattern(ptr_private, 10, page_size)); + + /* Now write out every other page to MAP_PRIVATE. */ + for (i = 0; i < 10; i += 2) { + char *ptr = &ptr_private[i * page_size]; + + memset(ptr, 'a' + i, page_size); + } + + /* + * At this point the mapping is: + * + * 0123456789 + * SPSPSPSPSP + * + * Where S = shared, P = private mappings. + */ + + /* Now mark the beginning of the mapping guarded. */ + ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0); + + /* + * This renders the mapping: + * + * 0123456789 + * xxxxxPSPSP + */ + + for (i = 0; i < 10; i++) { + char *ptr = &ptr_private[i * page_size]; + + /* Ensure guard regions as expected. */ + ASSERT_EQ(try_read_buf(ptr), i >= 5); + /* The shared mapping should always succeed. */ + ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size])); + } + + /* Remove the guard regions altogether. */ + ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* + * + * We now expect the mapping to be: + * + * 0123456789 + * SSSSSPSPSP + * + * As we removed guard regions, the private pages from the first 5 will + * have been zapped, so on fault will reestablish the shared mapping. + */ + + for (i = 0; i < 10; i++) { + char *ptr = &ptr_private[i * page_size]; + + /* + * Assert that shared mappings in the MAP_PRIVATE mapping match + * the shared mapping. + */ + if (i < 5 || i % 2 == 0) { + char *ptr_s = &ptr_shared[i * page_size]; + + ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0); + continue; + } + + /* Everything else is a private mapping. */ + ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i)); + } + + ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0); + ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0); +} + +/* Test that guard regions established over a read-only mapping function correctly. */ +TEST_F(guard_regions, readonly_file) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Read-only test specific to file-backed"); + + /* Map shared so we can populate with pattern, populate it, unmap. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + /* Close the fd so we can re-open read-only. */ + ASSERT_EQ(close(self->fd), 0); + + /* Re-open read-only. */ + self->fd = open(self->path, O_RDONLY); + ASSERT_NE(self->fd, -1); + /* Re-map read-only. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Mark every other page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_pg = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Assert that the guard regions are in place.*/ + for (i = 0; i < 10; i++) { + char *ptr_pg = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0); + } + + /* Remove guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Ensure the data is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, fault_around) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Fault-around test specific to file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Establish a pattern in the backing file. */ + set_pattern(ptr, 10, page_size); + + /* + * Now drop it from the page cache so we get major faults when next we + * map it. + */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0); + + /* Unmap and remap 'to be sure'. */ + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now fault in every odd page. This should trigger fault-around. */ + for (i = 1; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_TRUE(try_read_buf(ptr_p)); + } + + /* Finally, ensure that guard regions are intact as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, truncation) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Truncation test specific to file-backed"); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* + * Establish a pattern in the backing file, just so there is data + * there. + */ + set_pattern(ptr, 10, page_size); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + /* Now truncate to actually used size (initialised to 100). */ + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Here the guard regions will remain intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + /* Now truncate to half the size, then truncate again to the full size. */ + ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0); + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Again, guard pages will remain intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +TEST_F(guard_regions, hole_punch) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing == ANON_BACKED) + SKIP(return, "Truncation test specific to file-backed"); + + /* Establish pattern in mapping. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + + /* Install a guard region in the middle of the mapping. */ + ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size, + MADV_GUARD_INSTALL), 0); + + /* + * The buffer will now be: + * + * 0123456789 + * ***xxxx*** + * + * Where * is data and x is the guard region. + */ + + /* Ensure established. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7); + } + + /* Now hole punch the guarded region. */ + ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size, + MADV_REMOVE), 0); + + /* Ensure guard regions remain. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7); + } + + /* Now remove guard region throughout. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Check that the pattern exists in non-hole punched region. */ + ASSERT_TRUE(check_pattern(ptr, 3, page_size)); + /* Check that hole punched region is zeroed. */ + ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0')); + /* Check that the pattern exists in the remainder of the file. */ + ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7)); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + +/* + * Ensure that a memfd works correctly with guard regions, that we can write + * seal it then open the mapping read-only and still establish guard regions + * within, remove those guard regions and have everything work correctly. + */ +TEST_F(guard_regions, memfd_write_seal) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (variant->backing != SHMEM_BACKED) + SKIP(return, "memfd write seal test specific to shmem"); + + /* OK, we need a memfd, so close existing one. */ + ASSERT_EQ(close(self->fd), 0); + + /* Create and truncate memfd. */ + self->fd = memfd_create("guard_regions_memfd_seals_test", + MFD_ALLOW_SEALING); + ASSERT_NE(self->fd, -1); + ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0); + + /* Map, set pattern, unmap. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + set_pattern(ptr, 10, page_size); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); + + /* Write-seal the memfd. */ + ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0); + + /* Now map the memfd readonly. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Ensure pattern is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + /* Now remove guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Ensure pattern is as expected. */ + ASSERT_TRUE(check_pattern(ptr, 10, page_size)); + + /* Ensure write seal intact. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_FALSE(try_write_buf(ptr_p)); + } + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + + +/* + * Since we are now permitted to establish guard regions in read-only anonymous + * mappings, for the sake of thoroughness, though it probably has no practical + * use, test that guard regions function with a mapping to the anonymous zero + * page. + */ +TEST_F(guard_regions, anon_zeropage) +{ + const unsigned long page_size = self->page_size; + char *ptr; + int i; + + if (!is_anon_backed(variant)) + SKIP(return, "anon zero page test specific to anon/shmem"); + + /* Obtain a read-only i.e. anon zero page mapping. */ + ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Now make every even page guarded. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0); + } + + /* Now remove all guard regions. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* Now assert things are as expected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_TRUE(try_read_buf(ptr_p)); + } + + /* Ensure zero page...*/ + ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0')); + + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + TEST_HARNESS_MAIN From 18ea595a07bcf931ec6efae5c1f8a0d9a440ed97 Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Thu, 13 Feb 2025 12:44:53 +0100 Subject: [PATCH 098/431] maple_tree: remove a BUG_ON() in mas_alloc_nodes() Remove a BUG_ON() right before a WARN_ON() with the same condition. Calling WARN_ON() and BUG_ON() here is definitely wrong. Since the goal is generally to remove BUG_ON() invocations from the kernel, keep only the WARN_ON(). Link: https://lkml.kernel.org/r/20250213114453.1078318-1-ptesarik@suse.com Fixes: 067311d33e65 ("maple_tree: separate ma_state node from status") Signed-off-by: Petr Tesarik Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/maple_tree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 60356ccb11ce..d0bea23fa4bc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1242,7 +1242,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) if (mas->mas_flags & MA_STATE_PREALLOC) { if (allocated) return; - BUG_ON(!allocated); WARN_ON(!allocated); } From b23ceebd63d85135c9a5061a535ea85210f94f3f Mon Sep 17 00:00:00 2001 From: Guanjun Date: Thu, 13 Feb 2025 13:56:12 +0800 Subject: [PATCH 099/431] filemap: remove redundant folio_test_large check in filemap_free_folio The folio_test_large() check in filemap_free_folio() is unnecessary because folio_nr_pages(), which is called internally already performs this check. Removing the redundant condition simplifies the code and avoids double validation. This change improves code readability and reduces unnecessary operations in the folio freeing path. Link: https://lkml.kernel.org/r/20250213055612.490993-1-guanjun@linux.alibaba.com Signed-off-by: Guanjun Acked-by: David Hildenbrand Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/filemap.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 6e3d27993b67..152993a86de3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -227,15 +227,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) void filemap_free_folio(struct address_space *mapping, struct folio *folio) { void (*free_folio)(struct folio *); - int refs = 1; free_folio = mapping->a_ops->free_folio; if (free_folio) free_folio(folio); - if (folio_test_large(folio)) - refs = folio_nr_pages(folio); - folio_put_refs(folio, refs); + folio_put_refs(folio, folio_nr_pages(folio)); } /** From faeb2831b517931f522f971e184b50ac82d29633 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 14 Feb 2025 22:30:12 +1300 Subject: [PATCH 100/431] mm: set folio swapbacked iff folios are dirty in try_to_unmap_one MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: batched unmap lazyfree large folios during reclamation", v4. Commit 735ecdfaf4e8 ("mm/vmscan: avoid split lazyfree THP during shrink_folio_list()") prevents the splitting of MADV_FREE'd THP in madvise.c. However, those folios are still added to the deferred_split list in try_to_unmap_one() because we are unmapping PTEs and removing rmap entries one by one. Firstly, this has rendered the following counter somewhat confusing, /sys/kernel/mm/transparent_hugepage/hugepages-size/stats/split_deferred The split_deferred counter was originally designed to track operations such as partial unmap or madvise of large folios. However, in practice, most split_deferred cases arise from memory reclamation of aligned lazyfree mTHPs as observed by Tangquan. This discrepancy has made the split_deferred counter highly misleading. Secondly, this approach is slow because it requires iterating through each PTE and removing the rmap one by one for a large folio. In fact, all PTEs of a pte-mapped large folio should be unmapped at once, and the entire folio should be removed from the rmap as a whole. Thirdly, it also increases the risk of a race condition where lazyfree folios are incorrectly set back to swapbacked, as a speculative folio_get may occur in the shrinker's callback. deferred_split_scan() might call folio_try_get(folio) since we have added the folio to split_deferred list while removing rmap for the 1st subpage, and while we are scanning the 2nd to nr_pages PTEs of this folio in try_to_unmap_one(), the entire mTHP could be transitioned back to swap-backed because the reference count is incremented, which can make "ref_count == 1 + map_count" within try_to_unmap_one() false. /* * The only page refs must be one from isolation * plus the rmap(s) (dropped by discard:). */ if (ref_count == 1 + map_count && (!folio_test_dirty(folio) || ... (vma->vm_flags & VM_DROPPABLE))) { dec_mm_counter(mm, MM_ANONPAGES); goto discard; } This patchset resolves the issue by marking only genuinely dirty folios as swap-backed, as suggested by David, and transitioning to batched unmapping of entire folios in try_to_unmap_one(). Consequently, the deferred_split count drops to zero, and memory reclamation performance improves significantly — reclaiming 64KiB lazyfree large folios is now 2.5x faster(The specific data is embedded in the changelog of patch 3/4). By the way, while the patchset is primarily aimed at PTE-mapped large folios, Baolin and Lance also found that try_to_unmap_one() handles lazyfree redirtied PMD-mapped large folios inefficiently — it splits the PMD into PTEs and iterates over them. This patchset removes the unnecessary splitting, enabling us to skip redirtied PMD-mapped large folios 3.5X faster during memory reclamation. (The specific data can be found in the changelog of patch 4/4). This patch (of 4): The refcount may be temporarily or long-term increased, but this does not change the fundamental nature of the folio already being lazy- freed. Therefore, we only reset 'swapbacked' when we are certain the folio is dirty and not droppable. Link: https://lkml.kernel.org/r/20250214093015.51024-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20250214093015.51024-2-21cnbao@gmail.com Fixes: 6c8e2a256915 ("mm: fix race between MADV_FREE reclaim and blkdev direct IO read") Signed-off-by: Barry Song Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Reviewed-by: Baolin Wang Reviewed-by: Lance Yang Cc: Mauricio Faria de Oliveira Cc: Chis Li (Google) Cc: "Huang, Ying" Cc: Kairui Song Cc: Lorenzo Stoakes Cc: Ryan Roberts Cc: Tangquan Zheng Cc: Albert Ou Cc: Anshuman Khandual Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Hansen Cc: Gavin Shan Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Kefeng Wang Cc: "Kirill A. Shutemov" Cc: Mark Rutland Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Shaoqin Huang Cc: Thomas Gleixner Cc: Will Deacon Cc: Yicong Yang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/rmap.c | 49 ++++++++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 24bacce9971f..5c208e1c8266 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1963,34 +1963,29 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ smp_rmb(); - /* - * The only page refs must be one from isolation - * plus the rmap(s) (dropped by discard:). - */ - if (ref_count == 1 + map_count && - (!folio_test_dirty(folio) || - /* - * Unlike MADV_FREE mappings, VM_DROPPABLE - * ones can be dropped even if they've - * been dirtied. - */ - (vma->vm_flags & VM_DROPPABLE))) { - dec_mm_counter(mm, MM_ANONPAGES); - goto discard; - } - - /* - * If the folio was redirtied, it cannot be - * discarded. Remap the page to page table. - */ - set_pte_at(mm, address, pvmw.pte, pteval); - /* - * Unlike MADV_FREE mappings, VM_DROPPABLE ones - * never get swap backed on failure to drop. - */ - if (!(vma->vm_flags & VM_DROPPABLE)) + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + /* + * redirtied either using the page table or a previously + * obtained GUP reference. + */ + set_pte_at(mm, address, pvmw.pte, pteval); folio_set_swapbacked(folio); - goto walk_abort; + goto walk_abort; + } else if (ref_count != 1 + map_count) { + /* + * Additional reference. Could be a GUP reference or any + * speculative reference. GUP users must mark the folio + * dirty if there was a modification. This folio cannot be + * reclaimed right now either way, so act just like nothing + * happened. + * We'll come back here later and detect if the folio was + * dirtied when the additional reference is gone. + */ + set_pte_at(mm, address, pvmw.pte, pteval); + goto walk_abort; + } + dec_mm_counter(mm, MM_ANONPAGES); + goto discard; } if (swap_duplicate(entry) < 0) { From 2f4ab3ac10e1476abb6ed55f0b5f176cf635e776 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 14 Feb 2025 22:30:13 +1300 Subject: [PATCH 101/431] mm: support tlbbatch flush for a range of PTEs This patch lays the groundwork for supporting batch PTE unmapping in try_to_unmap_one(). It introduces range handling for TLB batch flushing, with the range currently set to the size of PAGE_SIZE. The function __flush_tlb_range_nosync() is architecture-specific and is only used within arch/arm64. This function requires the mm structure instead of the vma structure. To allow its reuse by arch_tlbbatch_add_pending(), which operates with mm but not vma, this patch modifies the argument of __flush_tlb_range_nosync() to take mm as its parameter. Link: https://lkml.kernel.org/r/20250214093015.51024-3-21cnbao@gmail.com Signed-off-by: Barry Song Acked-by: Will Deacon Reviewed-by: Kefeng Wang Cc: Catalin Marinas Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Anshuman Khandual Cc: Ryan Roberts Cc: Shaoqin Huang Cc: Gavin Shan Cc: Mark Rutland Cc: David Hildenbrand Cc: Lance Yang Cc: "Kirill A. Shutemov" Cc: Yosry Ahmed Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Albert Ou Cc: Yicong Yang Cc: Baolin Wang Cc: Chis Li Cc: "Huang, Ying" Cc: Kairui Song Cc: Lorenzo Stoakes Cc: Mauricio Faria de Oliveira Cc: Tangquan Zheng Signed-off-by: Andrew Morton --- arch/arm64/include/asm/tlbflush.h | 23 +++++++++++------------ arch/arm64/mm/contpte.c | 2 +- arch/riscv/include/asm/tlbflush.h | 3 +-- arch/riscv/mm/tlbflush.c | 3 +-- arch/x86/include/asm/tlbflush.h | 3 +-- mm/rmap.c | 10 +++++----- 6 files changed, 20 insertions(+), 24 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index bc94e036a26b..b7e1920570bd 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) return true; } -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) -{ - __flush_tlb_page_nosync(mm, uaddr); -} - /* * If mprotect/munmap/etc occurs during TLB batched flushing, we need to * synchronise all the TLBI issued with a DSB to avoid the race mentioned in @@ -448,7 +441,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start, return false; } -static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, +static inline void __flush_tlb_range_nosync(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) @@ -460,12 +453,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, pages = (end - start) >> PAGE_SHIFT; if (__flush_tlb_range_limit_excess(start, end, pages, stride)) { - flush_tlb_mm(vma->vm_mm); + flush_tlb_mm(mm); return; } dsb(ishst); - asid = ASID(vma->vm_mm); + asid = ASID(mm); if (last_level) __flush_tlb_range_op(vale1is, start, pages, stride, asid, @@ -474,7 +467,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true, lpa2_is_enabled()); - mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } static inline void __flush_tlb_range(struct vm_area_struct *vma, @@ -482,7 +475,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long stride, bool last_level, int tlb_level) { - __flush_tlb_range_nosync(vma, start, end, stride, + __flush_tlb_range_nosync(vma->vm_mm, start, end, stride, last_level, tlb_level); dsb(ish); } @@ -533,6 +526,12 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, unsigned long start, unsigned long end) +{ + __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3); +} #endif #endif diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 55107d27d3f8..bcac4f55f9c1 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, * eliding the trailing DSB applies here. */ addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); - __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE, + __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE, PAGE_SIZE, true, 3); } diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 72e559934952..ce0dd0fed764 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -60,8 +60,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, bool arch_tlbbatch_should_defer(struct mm_struct *mm); void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr); + struct mm_struct *mm, unsigned long start, unsigned long end); void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 9b6e86ce3867..74dd9307fbf1 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -186,8 +186,7 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm) } void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) + struct mm_struct *mm, unsigned long start, unsigned long end) { cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); } diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 02fc2aa06e9e..29373da7b00a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -279,8 +279,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) } static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, - struct mm_struct *mm, - unsigned long uaddr) + struct mm_struct *mm, unsigned long start, unsigned long end) { inc_mm_tlb_gen(mm); cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); diff --git a/mm/rmap.c b/mm/rmap.c index 5c208e1c8266..765e541ac9be 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -672,7 +672,7 @@ void try_to_unmap_flush_dirty(void) (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, - unsigned long uaddr) + unsigned long start, unsigned long end) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; int batch; @@ -681,7 +681,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, if (!pte_accessible(mm, pteval)) return; - arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); + arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); tlb_ubc->flush_required = true; /* @@ -757,7 +757,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm) } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, - unsigned long uaddr) + unsigned long start, unsigned long end) { } @@ -1887,7 +1887,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); - set_tlb_ubc_flush_pending(mm, pteval, address); + set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } @@ -2270,7 +2270,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); - set_tlb_ubc_flush_pending(mm, pteval, address); + set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } From 354dffd29575cdf13154e8fb787322354aa9efc4 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 14 Feb 2025 22:30:14 +1300 Subject: [PATCH 102/431] mm: support batched unmap for lazyfree large folios during reclamation Currently, the PTEs and rmap of a large folio are removed one at a time. This is not only slow but also causes the large folio to be unnecessarily added to deferred_split, which can lead to races between the deferred_split shrinker callback and memory reclamation. This patch releases all PTEs and rmap entries in a batch. Currently, it only handles lazyfree large folios. The below microbench tries to reclaim 128MB lazyfree large folios whose sizes are 64KiB: #include #include #include #include #define SIZE 128*1024*1024 // 128 MB unsigned long read_split_deferred() { FILE *file = fopen("/sys/kernel/mm/transparent_hugepage" "/hugepages-64kB/stats/split_deferred", "r"); if (!file) { perror("Error opening file"); return 0; } unsigned long value; if (fscanf(file, "%lu", &value) != 1) { perror("Error reading value"); fclose(file); return 0; } fclose(file); return value; } int main(int argc, char *argv[]) { while(1) { volatile int *p = mmap(0, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); memset((void *)p, 1, SIZE); madvise((void *)p, SIZE, MADV_FREE); clock_t start_time = clock(); unsigned long start_split = read_split_deferred(); madvise((void *)p, SIZE, MADV_PAGEOUT); clock_t end_time = clock(); unsigned long end_split = read_split_deferred(); double elapsed_time = (double)(end_time - start_time) / CLOCKS_PER_SEC; printf("Time taken by reclamation: %f seconds, split_deferred: %ld\n", elapsed_time, end_split - start_split); munmap((void *)p, SIZE); } return 0; } w/o patch: ~ # ./a.out Time taken by reclamation: 0.177418 seconds, split_deferred: 2048 Time taken by reclamation: 0.178348 seconds, split_deferred: 2048 Time taken by reclamation: 0.174525 seconds, split_deferred: 2048 Time taken by reclamation: 0.171620 seconds, split_deferred: 2048 Time taken by reclamation: 0.172241 seconds, split_deferred: 2048 Time taken by reclamation: 0.174003 seconds, split_deferred: 2048 Time taken by reclamation: 0.171058 seconds, split_deferred: 2048 Time taken by reclamation: 0.171993 seconds, split_deferred: 2048 Time taken by reclamation: 0.169829 seconds, split_deferred: 2048 Time taken by reclamation: 0.172895 seconds, split_deferred: 2048 Time taken by reclamation: 0.176063 seconds, split_deferred: 2048 Time taken by reclamation: 0.172568 seconds, split_deferred: 2048 Time taken by reclamation: 0.171185 seconds, split_deferred: 2048 Time taken by reclamation: 0.170632 seconds, split_deferred: 2048 Time taken by reclamation: 0.170208 seconds, split_deferred: 2048 Time taken by reclamation: 0.174192 seconds, split_deferred: 2048 ... w/ patch: ~ # ./a.out Time taken by reclamation: 0.074231 seconds, split_deferred: 0 Time taken by reclamation: 0.071026 seconds, split_deferred: 0 Time taken by reclamation: 0.072029 seconds, split_deferred: 0 Time taken by reclamation: 0.071873 seconds, split_deferred: 0 Time taken by reclamation: 0.073573 seconds, split_deferred: 0 Time taken by reclamation: 0.071906 seconds, split_deferred: 0 Time taken by reclamation: 0.073604 seconds, split_deferred: 0 Time taken by reclamation: 0.075903 seconds, split_deferred: 0 Time taken by reclamation: 0.073191 seconds, split_deferred: 0 Time taken by reclamation: 0.071228 seconds, split_deferred: 0 Time taken by reclamation: 0.071391 seconds, split_deferred: 0 Time taken by reclamation: 0.071468 seconds, split_deferred: 0 Time taken by reclamation: 0.071896 seconds, split_deferred: 0 Time taken by reclamation: 0.072508 seconds, split_deferred: 0 Time taken by reclamation: 0.071884 seconds, split_deferred: 0 Time taken by reclamation: 0.072433 seconds, split_deferred: 0 Time taken by reclamation: 0.071939 seconds, split_deferred: 0 ... Link: https://lkml.kernel.org/r/20250214093015.51024-4-21cnbao@gmail.com Signed-off-by: Barry Song Cc: Albert Ou Cc: Anshuman Khandual Cc: Baolin Wang Cc: Borislav Petkov Cc: Catalin Marinas Cc: Chis Li Cc: Dave Hansen Cc: David Hildenbrand Cc: Gavin Shan Cc: "H. Peter Anvin" Cc: "Huang, Ying" Cc: Ingo Molnar Cc: Kairui Song Cc: Kefeng Wang Cc: "Kirill A. Shutemov" Cc: Lance Yang Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Mauricio Faria de Oliveira Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Ryan Roberts Cc: Shaoqin Huang Cc: Tangquan Zheng Cc: Thomas Gleixner Cc: Will Deacon Cc: Yicong Yang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/rmap.c | 72 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 765e541ac9be..7a93a7cd2c64 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1722,6 +1722,25 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, #endif } +/* We support batch unmapping of PTEs for lazyfree large folios */ +static inline bool can_batch_unmap_folio_ptes(unsigned long addr, + struct folio *folio, pte_t *ptep) +{ + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; + int max_nr = folio_nr_pages(folio); + pte_t pte = ptep_get(ptep); + + if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) + return false; + if (pte_unused(pte)) + return false; + if (pte_pfn(pte) != folio_pfn(folio)) + return false; + + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, + NULL, NULL) == max_nr; +} + /* * @arg: enum ttu_flags will be passed to this argument */ @@ -1735,6 +1754,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, struct page *subpage; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long nr_pages = 1, end_addr; unsigned long pfn; unsigned long hsz = 0; @@ -1874,23 +1894,26 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (pte_dirty(pteval)) folio_mark_dirty(folio); } else if (likely(pte_present(pteval))) { - flush_cache_page(vma, address, pfn); - /* Nuke the page table entry. */ - if (should_defer_flush(mm, flags)) { - /* - * We clear the PTE but do not flush so potentially - * a remote CPU could still be writing to the folio. - * If the entry was previously clean then the - * architecture must guarantee that a clear->dirty - * transition on a cached TLB entry is written through - * and traps if the PTE is unmapped. - */ - pteval = ptep_get_and_clear(mm, address, pvmw.pte); + if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && + can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) + nr_pages = folio_nr_pages(folio); + end_addr = address + nr_pages * PAGE_SIZE; + flush_cache_range(vma, address, end_addr); - set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); - } else { - pteval = ptep_clear_flush(vma, address, pvmw.pte); - } + /* Nuke the page table entry. */ + pteval = get_and_clear_full_ptes(mm, address, pvmw.pte, nr_pages, 0); + /* + * We clear the PTE but do not flush so potentially + * a remote CPU could still be writing to the folio. + * If the entry was previously clean then the + * architecture must guarantee that a clear->dirty + * transition on a cached TLB entry is written through + * and traps if the PTE is unmapped. + */ + if (should_defer_flush(mm, flags)) + set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); + else + flush_tlb_range(vma, address, end_addr); if (pte_dirty(pteval)) folio_mark_dirty(folio); } else { @@ -1968,7 +1991,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * redirtied either using the page table or a previously * obtained GUP reference. */ - set_pte_at(mm, address, pvmw.pte, pteval); + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); folio_set_swapbacked(folio); goto walk_abort; } else if (ref_count != 1 + map_count) { @@ -1981,10 +2004,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * We'll come back here later and detect if the folio was * dirtied when the additional reference is gone. */ - set_pte_at(mm, address, pvmw.pte, pteval); + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); goto walk_abort; } - dec_mm_counter(mm, MM_ANONPAGES); + add_mm_counter(mm, MM_ANONPAGES, -nr_pages); goto discard; } @@ -2049,13 +2072,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter_file(folio)); } discard: - if (unlikely(folio_test_hugetlb(folio))) + if (unlikely(folio_test_hugetlb(folio))) { hugetlb_remove_rmap(folio); - else - folio_remove_rmap_pte(folio, subpage, vma); + } else { + folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); + folio_ref_sub(folio, nr_pages - 1); + } if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); + /* We have already batched the entire folio */ + if (nr_pages > 1) + goto walk_done; continue; walk_abort: ret = false; From 2f9b43d617e2685728568ca609c5c77e45d6f1e8 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 14 Feb 2025 22:30:15 +1300 Subject: [PATCH 103/431] mm: avoid splitting pmd for lazyfree pmd-mapped THP in try_to_unmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The try_to_unmap_one() function currently handles PMD-mapped THPs inefficiently. It first splits the PMD into PTEs, copies the dirty state from the PMD to the PTEs, iterates over the PTEs to locate the dirty state, and then marks the THP as swap-backed. This process involves unnecessary PMD splitting and redundant iteration. Instead, this functionality can be efficiently managed in __discard_anon_folio_pmd_locked(), avoiding the extra steps and improving performance. The following microbenchmark redirties folios after invoking MADV_FREE, then measures the time taken to perform memory reclamation (actually set those folios swapbacked again) on the redirtied folios. #include #include #include #include #define SIZE 128*1024*1024 // 128 MB int main(int argc, char *argv[]) { while(1) { volatile int *p = mmap(0, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); memset((void *)p, 1, SIZE); madvise((void *)p, SIZE, MADV_FREE); /* redirty after MADV_FREE */ memset((void *)p, 1, SIZE); clock_t start_time = clock(); madvise((void *)p, SIZE, MADV_PAGEOUT); clock_t end_time = clock(); double elapsed_time = (double)(end_time - start_time) / CLOCKS_PER_SEC; printf("Time taken by reclamation: %f seconds\n", elapsed_time); munmap((void *)p, SIZE); } return 0; } Testing results are as below, w/o patch: ~ # ./a.out Time taken by reclamation: 0.007300 seconds Time taken by reclamation: 0.007226 seconds Time taken by reclamation: 0.007295 seconds Time taken by reclamation: 0.007731 seconds Time taken by reclamation: 0.007134 seconds Time taken by reclamation: 0.007285 seconds Time taken by reclamation: 0.007720 seconds Time taken by reclamation: 0.007128 seconds Time taken by reclamation: 0.007710 seconds Time taken by reclamation: 0.007712 seconds Time taken by reclamation: 0.007236 seconds Time taken by reclamation: 0.007690 seconds Time taken by reclamation: 0.007174 seconds Time taken by reclamation: 0.007670 seconds Time taken by reclamation: 0.007169 seconds Time taken by reclamation: 0.007305 seconds Time taken by reclamation: 0.007432 seconds Time taken by reclamation: 0.007158 seconds Time taken by reclamation: 0.007133 seconds … w/ patch ~ # ./a.out Time taken by reclamation: 0.002124 seconds Time taken by reclamation: 0.002116 seconds Time taken by reclamation: 0.002150 seconds Time taken by reclamation: 0.002261 seconds Time taken by reclamation: 0.002137 seconds Time taken by reclamation: 0.002173 seconds Time taken by reclamation: 0.002063 seconds Time taken by reclamation: 0.002088 seconds Time taken by reclamation: 0.002169 seconds Time taken by reclamation: 0.002124 seconds Time taken by reclamation: 0.002111 seconds Time taken by reclamation: 0.002224 seconds Time taken by reclamation: 0.002297 seconds Time taken by reclamation: 0.002260 seconds Time taken by reclamation: 0.002246 seconds Time taken by reclamation: 0.002272 seconds Time taken by reclamation: 0.002277 seconds Time taken by reclamation: 0.002462 seconds … This patch significantly speeds up try_to_unmap_one() by allowing it to skip redirtied THPs without splitting the PMD. Link: https://lkml.kernel.org/r/20250214093015.51024-5-21cnbao@gmail.com Signed-off-by: Barry Song Suggested-by: Baolin Wang Suggested-by: Lance Yang Reviewed-by: Baolin Wang Reviewed-by: Lance Yang Cc: Albert Ou Cc: Anshuman Khandual Cc: Borislav Petkov Cc: Catalin Marinas Cc: Chis Li Cc: Dave Hansen Cc: David Hildenbrand Cc: Gavin Shan Cc: "H. Peter Anvin" Cc: "Huang, Ying" Cc: Ingo Molnar Cc: Kairui Song Cc: Kefeng Wang Cc: "Kirill A. Shutemov" Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Mauricio Faria de Oliveira Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Ryan Roberts Cc: Shaoqin Huang Cc: Tangquan Zheng Cc: Thomas Gleixner Cc: Will Deacon Cc: Yicong Yang Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/huge_memory.c | 24 +++++++++++++++++------- mm/rmap.c | 13 ++++++++++--- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e7ac4f0dc21d..acb12653484e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3063,8 +3063,12 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, int ref_count, map_count; pmd_t orig_pmd = *pmdp; - if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) + if (pmd_dirty(orig_pmd)) + folio_set_dirty(folio); + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + folio_set_swapbacked(folio); return false; + } orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); @@ -3091,8 +3095,15 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, * * The only folio refs must be one from isolation plus the rmap(s). */ - if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) || - ref_count != map_count + 1) { + if (pmd_dirty(orig_pmd)) + folio_set_dirty(folio); + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + folio_set_swapbacked(folio); + set_pmd_at(mm, addr, pmdp, orig_pmd); + return false; + } + + if (ref_count != map_count + 1) { set_pmd_at(mm, addr, pmdp, orig_pmd); return false; } @@ -3112,12 +3123,11 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, { VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio); VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) - return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); - - return false; + return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); } static void remap_page(struct folio *folio, unsigned long nr, int flags) diff --git a/mm/rmap.c b/mm/rmap.c index 7a93a7cd2c64..333ecac049b2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1804,9 +1804,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } if (!pvmw.pte) { - if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, - folio)) - goto walk_done; + if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) + goto walk_done; + /* + * unmap_huge_pmd_locked has either already marked + * the folio as swap-backed or decided to retain it + * due to GUP or speculative references. + */ + goto walk_abort; + } if (flags & TTU_SPLIT_HUGE_PMD) { /* From b2ae5fccb8c0ec2167e6e6c5c5a5eb8d656f70ef Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:38 -0800 Subject: [PATCH 104/431] mm: introduce vma_start_read_locked{_nested} helpers Patch series "reimplement per-vma lock as a refcount", v10. Back when per-vma locks were introduces, vm_lock was moved out of vm_area_struct in [1] because of the performance regression caused by false cacheline sharing. Recent investigation [2] revealed that the regressions is limited to a rather old Broadwell microarchitecture and even there it can be mitigated by disabling adjacent cacheline prefetching, see [3]. Splitting single logical structure into multiple ones leads to more complicated management, extra pointer dereferences and overall less maintainable code. When that split-away part is a lock, it complicates things even further. With no performance benefits, there are no reasons for this split. Merging the vm_lock back into vm_area_struct also allows vm_area_struct to use SLAB_TYPESAFE_BY_RCU later in this patchset. This patchset: 1. moves vm_lock back into vm_area_struct, aligning it at the cacheline boundary and changing the cache to be cacheline-aligned to minimize cacheline sharing; 2. changes vm_area_struct initialization to mark new vma as detached until it is inserted into vma tree; 3. replaces vm_lock and vma->detached flag with a reference counter; 4. regroups vm_area_struct members to fit them into 3 cachelines; 5. changes vm_area_struct cache to SLAB_TYPESAFE_BY_RCU to allow for their reuse and to minimize call_rcu() calls. Pagefault microbenchmarks show performance improvement: Hmean faults/cpu-1 507926.5547 ( 0.00%) 506519.3692 * -0.28%* Hmean faults/cpu-4 479119.7051 ( 0.00%) 481333.6802 * 0.46%* Hmean faults/cpu-7 452880.2961 ( 0.00%) 455845.6211 * 0.65%* Hmean faults/cpu-12 347639.1021 ( 0.00%) 352004.2254 * 1.26%* Hmean faults/cpu-21 200061.2238 ( 0.00%) 229597.0317 * 14.76%* Hmean faults/cpu-30 145251.2001 ( 0.00%) 164202.5067 * 13.05%* Hmean faults/cpu-48 106848.4434 ( 0.00%) 120641.5504 * 12.91%* Hmean faults/cpu-56 92472.3835 ( 0.00%) 103464.7916 * 11.89%* Hmean faults/sec-1 507566.1468 ( 0.00%) 506139.0811 * -0.28%* Hmean faults/sec-4 1880478.2402 ( 0.00%) 1886795.6329 * 0.34%* Hmean faults/sec-7 3106394.3438 ( 0.00%) 3140550.7485 * 1.10%* Hmean faults/sec-12 4061358.4795 ( 0.00%) 4112477.0206 * 1.26%* Hmean faults/sec-21 3988619.1169 ( 0.00%) 4577747.1436 * 14.77%* Hmean faults/sec-30 3909839.5449 ( 0.00%) 4311052.2787 * 10.26%* Hmean faults/sec-48 4761108.4691 ( 0.00%) 5283790.5026 * 10.98%* Hmean faults/sec-56 4885561.4590 ( 0.00%) 5415839.4045 * 10.85%* This patch (of 18): Introduce helper functions which can be used to read-lock a VMA when holding mmap_lock for read. Replace direct accesses to vma->vm_lock with these new helpers. Link: https://lkml.kernel.org/r/20250213224655.1680278-1-surenb@google.com Link: https://lkml.kernel.org/r/20250213224655.1680278-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Lorenzo Stoakes Reviewed-by: Davidlohr Bueso Reviewed-by: Shakeel Butt Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 24 ++++++++++++++++++++++++ mm/userfaultfd.c | 22 +++++----------------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index fd1e85b4b48a..6a4914bc1a38 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -735,6 +735,30 @@ static inline bool vma_start_read(struct vm_area_struct *vma) return true; } +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) +{ + mmap_assert_locked(vma->vm_mm); + down_read_nested(&vma->vm_lock->lock, subclass); +} + +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline void vma_start_read_locked(struct vm_area_struct *vma) +{ + mmap_assert_locked(vma->vm_mm); + down_read(&vma->vm_lock->lock); +} + static inline void vma_end_read(struct vm_area_struct *vma) { rcu_read_lock(); /* keeps vma alive till the end of up_read */ diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index d06453fa8aba..48ac81bbfee6 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -85,16 +85,8 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, mmap_read_lock(mm); vma = find_vma_and_prepare_anon(mm, address); - if (!IS_ERR(vma)) { - /* - * We cannot use vma_start_read() as it may fail due to - * false locked (see comment in vma_start_read()). We - * can avoid that by directly locking vm_lock under - * mmap_lock, which guarantees that nobody can lock the - * vma for write (vma_start_write()) under us. - */ - down_read(&vma->vm_lock->lock); - } + if (!IS_ERR(vma)) + vma_start_read_locked(vma); mmap_read_unlock(mm); return vma; @@ -1564,14 +1556,10 @@ static int uffd_move_lock(struct mm_struct *mm, mmap_read_lock(mm); err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); if (!err) { - /* - * See comment in uffd_lock_vma() as to why not using - * vma_start_read() here. - */ - down_read(&(*dst_vmap)->vm_lock->lock); + vma_start_read_locked(*dst_vmap); if (*dst_vmap != *src_vmap) - down_read_nested(&(*src_vmap)->vm_lock->lock, - SINGLE_DEPTH_NESTING); + vma_start_read_locked_nested(*src_vmap, + SINGLE_DEPTH_NESTING); } mmap_read_unlock(mm); return err; From 7b6218ae1253491d56f21f4b1f3609f3dd873600 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:39 -0800 Subject: [PATCH 105/431] mm: move per-vma lock into vm_area_struct Back when per-vma locks were introduces, vm_lock was moved out of vm_area_struct in [1] because of the performance regression caused by false cacheline sharing. Recent investigation [2] revealed that the regressions is limited to a rather old Broadwell microarchitecture and even there it can be mitigated by disabling adjacent cacheline prefetching, see [3]. Splitting single logical structure into multiple ones leads to more complicated management, extra pointer dereferences and overall less maintainable code. When that split-away part is a lock, it complicates things even further. With no performance benefits, there are no reasons for this split. Merging the vm_lock back into vm_area_struct also allows vm_area_struct to use SLAB_TYPESAFE_BY_RCU later in this patchset. Move vm_lock back into vm_area_struct, aligning it at the cacheline boundary and changing the cache to be cacheline-aligned as well. With kernel compiled using defconfig, this causes VMA memory consumption to grow from 160 (vm_area_struct) + 40 (vm_lock) bytes to 256 bytes: slabinfo before: ... : ... vma_lock ... 40 102 1 : ... vm_area_struct ... 160 51 2 : ... slabinfo after moving vm_lock: ... : ... vm_area_struct ... 256 32 2 : ... Aggregate VMA memory consumption per 1000 VMAs grows from 50 to 64 pages, which is 5.5MB per 100000 VMAs. Note that the size of this structure is dependent on the kernel configuration and typically the original size is higher than 160 bytes. Therefore these calculations are close to the worst case scenario. A more realistic vm_area_struct usage before this change is: ... : ... vma_lock ... 40 102 1 : ... vm_area_struct ... 176 46 2 : ... Aggregate VMA memory consumption per 1000 VMAs grows from 54 to 64 pages, which is 3.9MB per 100000 VMAs. This memory consumption growth can be addressed later by optimizing the vm_lock. [1] https://lore.kernel.org/all/20230227173632.3292573-34-surenb@google.com/ [2] https://lore.kernel.org/all/ZsQyI%2F087V34JoIt@xsang-OptiPlex-9020/ [3] https://lore.kernel.org/all/CAJuCfpEisU8Lfe96AYJDZ+OM4NoPmnw9bP53cT_kbfP_pR+-2g@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250213224655.1680278-3-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 28 ++++++++++-------- include/linux/mm_types.h | 6 ++-- kernel/fork.c | 49 ++++---------------------------- tools/testing/vma/vma_internal.h | 33 +++++---------------- 4 files changed, 32 insertions(+), 84 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6a4914bc1a38..bf3b6362927f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -697,6 +697,12 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_PER_VMA_LOCK +static inline void vma_lock_init(struct vm_area_struct *vma) +{ + init_rwsem(&vma->vm_lock.lock); + vma->vm_lock_seq = UINT_MAX; +} + /* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to @@ -714,7 +720,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) return false; - if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) + if (unlikely(down_read_trylock(&vma->vm_lock.lock) == 0)) return false; /* @@ -729,7 +735,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * This pairs with RELEASE semantics in vma_end_write_all(). */ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { - up_read(&vma->vm_lock->lock); + up_read(&vma->vm_lock.lock); return false; } return true; @@ -744,7 +750,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { mmap_assert_locked(vma->vm_mm); - down_read_nested(&vma->vm_lock->lock, subclass); + down_read_nested(&vma->vm_lock.lock, subclass); } /* @@ -756,13 +762,13 @@ static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int static inline void vma_start_read_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); - down_read(&vma->vm_lock->lock); + down_read(&vma->vm_lock.lock); } static inline void vma_end_read(struct vm_area_struct *vma) { rcu_read_lock(); /* keeps vma alive till the end of up_read */ - up_read(&vma->vm_lock->lock); + up_read(&vma->vm_lock.lock); rcu_read_unlock(); } @@ -791,7 +797,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - down_write(&vma->vm_lock->lock); + down_write(&vma->vm_lock.lock); /* * We should use WRITE_ONCE() here because we can have concurrent reads * from the early lockless pessimistic check in vma_start_read(). @@ -799,7 +805,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. */ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock->lock); + up_write(&vma->vm_lock.lock); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) @@ -811,7 +817,7 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_assert_locked(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_lock->lock)) + if (!rwsem_is_locked(&vma->vm_lock.lock)) vma_assert_write_locked(vma); } @@ -844,6 +850,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ +static inline void vma_lock_init(struct vm_area_struct *vma) {} static inline bool vma_start_read(struct vm_area_struct *vma) { return false; } static inline void vma_end_read(struct vm_area_struct *vma) {} @@ -878,10 +885,6 @@ static inline void assert_fault_locked(struct vm_fault *vmf) extern const struct vm_operations_struct vma_dummy_vm_ops; -/* - * WARNING: vma_init does not initialize vma->vm_lock. - * Use vm_area_alloc()/vm_area_free() if vma needs locking. - */ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { memset(vma, 0, sizeof(*vma)); @@ -890,6 +893,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) INIT_LIST_HEAD(&vma->anon_vma_chain); vma_mark_detached(vma, false); vma_numab_state_init(vma); + vma_lock_init(vma); } /* Use when VMA is not part of the VMA tree and needs no locking */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0234f14f2aa6..36dea20cd101 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -730,8 +730,6 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - /* Unstable RCU readers are allowed to read this. */ - struct vma_lock *vm_lock; #endif /* @@ -784,6 +782,10 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + struct vma_lock vm_lock ____cacheline_aligned_in_smp; +#endif } __randomize_layout; #ifdef CONFIG_NUMA diff --git a/kernel/fork.c b/kernel/fork.c index 735405a9c5f3..bdbabe73fb29 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -436,35 +436,6 @@ static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; -#ifdef CONFIG_PER_VMA_LOCK - -/* SLAB cache for vm_area_struct.lock */ -static struct kmem_cache *vma_lock_cachep; - -static bool vma_lock_alloc(struct vm_area_struct *vma) -{ - vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = UINT_MAX; - - return true; -} - -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - kmem_cache_free(vma_lock_cachep, vma->vm_lock); -} - -#else /* CONFIG_PER_VMA_LOCK */ - -static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } -static inline void vma_lock_free(struct vm_area_struct *vma) {} - -#endif /* CONFIG_PER_VMA_LOCK */ - struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma; @@ -474,10 +445,6 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - kmem_cache_free(vm_area_cachep, vma); - return NULL; - } return vma; } @@ -496,10 +463,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) * will be reinitialized. */ data_race(memcpy(new, orig, sizeof(*new))); - if (!vma_lock_alloc(new)) { - kmem_cache_free(vm_area_cachep, new); - return NULL; - } + vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); dup_anon_vma_name(orig, new); @@ -511,7 +475,6 @@ void __vm_area_free(struct vm_area_struct *vma) { vma_numab_state_free(vma); free_anon_vma_name(vma); - vma_lock_free(vma); kmem_cache_free(vm_area_cachep, vma); } @@ -522,7 +485,7 @@ static void vm_area_free_rcu_cb(struct rcu_head *head) vm_rcu); /* The vma should not be locked while being destroyed. */ - VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); + VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock.lock), vma); __vm_area_free(vma); } #endif @@ -3200,11 +3163,9 @@ void __init proc_caches_init(void) sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); -#ifdef CONFIG_PER_VMA_LOCK - vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); -#endif + vm_area_cachep = KMEM_CACHE(vm_area_struct, + SLAB_HWCACHE_ALIGN|SLAB_NO_MERGE|SLAB_PANIC| + SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); } diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index bb273927af0f..4506e6fb3c6f 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -275,10 +275,10 @@ struct vm_area_struct { /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_lock.lock (in write mode) * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_lock.lock (in read or write mode) * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -287,7 +287,7 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - struct vma_lock *vm_lock; + struct vma_lock vm_lock; #endif /* @@ -464,17 +464,10 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) return mas_find(&vmi->mas, ULONG_MAX); } -static inline bool vma_lock_alloc(struct vm_area_struct *vma) +static inline void vma_lock_init(struct vm_area_struct *vma) { - vma->vm_lock = calloc(1, sizeof(struct vma_lock)); - - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); + init_rwsem(&vma->vm_lock.lock); vma->vm_lock_seq = UINT_MAX; - - return true; } static inline void vma_assert_write_locked(struct vm_area_struct *); @@ -497,6 +490,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); vma_mark_detached(vma, false); + vma_lock_init(vma); } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) @@ -507,10 +501,6 @@ static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - free(vma); - return NULL; - } return vma; } @@ -523,10 +513,7 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return NULL; memcpy(new, orig, sizeof(*new)); - if (!vma_lock_alloc(new)) { - free(new); - return NULL; - } + vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); return new; @@ -696,14 +683,8 @@ static inline void mpol_put(struct mempolicy *) { } -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - free(vma->vm_lock); -} - static inline void __vm_area_free(struct vm_area_struct *vma) { - vma_lock_free(vma); free(vma); } From 8ef95d8f15f9e785341775814f0ed2ee22017aa5 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:40 -0800 Subject: [PATCH 106/431] mm: mark vma as detached until it's added into vma tree Current implementation does not set detached flag when a VMA is first allocated. This does not represent the real state of the VMA, which is detached until it is added into mm's VMA tree. Fix this by marking new VMAs as detached and resetting detached flag only after VMA is added into a tree. Introduce vma_mark_attached() to make the API more readable and to simplify possible future cleanup when vma->vm_mm might be used to indicate detached vma and vma_mark_attached() will need an additional mm parameter. Link: https://lkml.kernel.org/r/20250213224655.1680278-4-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Reviewed-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 27 ++++++++++++++++++++------- kernel/fork.c | 4 ++++ mm/memory.c | 2 +- mm/vma.c | 6 +++--- mm/vma.h | 2 ++ tools/testing/vma/vma_internal.h | 17 ++++++++++++----- 6 files changed, 42 insertions(+), 16 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index bf3b6362927f..d333d4070362 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -821,12 +821,21 @@ static inline void vma_assert_locked(struct vm_area_struct *vma) vma_assert_write_locked(vma); } -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma->detached = false; +} + +static inline void vma_mark_detached(struct vm_area_struct *vma) { /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + vma_assert_write_locked(vma); + vma->detached = true; +} + +static inline bool is_vma_detached(struct vm_area_struct *vma) +{ + return vma->detached; } static inline void release_fault_lock(struct vm_fault *vmf) @@ -857,8 +866,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } -static inline void vma_mark_detached(struct vm_area_struct *vma, - bool detached) {} +static inline void vma_mark_attached(struct vm_area_struct *vma) {} +static inline void vma_mark_detached(struct vm_area_struct *vma) {} static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) @@ -891,7 +900,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); +#ifdef CONFIG_PER_VMA_LOCK + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; +#endif vma_numab_state_init(vma); vma_lock_init(vma); } @@ -1086,6 +1098,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } diff --git a/kernel/fork.c b/kernel/fork.c index bdbabe73fb29..5bf3e407c795 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -465,6 +465,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) data_race(memcpy(new, orig, sizeof(*new))); vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); +#ifdef CONFIG_PER_VMA_LOCK + /* vma is not locked, can't use vma_mark_detached() */ + new->detached = true; +#endif vma_numab_state_init(new); dup_anon_vma_name(orig, new); diff --git a/mm/memory.c b/mm/memory.c index 94feb51a7983..6ef014220e09 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6374,7 +6374,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, goto inval; /* Check if the VMA got isolated after we found it */ - if (vma->detached) { + if (is_vma_detached(vma)) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ diff --git a/mm/vma.c b/mm/vma.c index 603e538a093f..702e2181f8b7 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -341,7 +341,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, if (vp->remove) { again: - vma_mark_detached(vp->remove, true); + vma_mark_detached(vp->remove); if (vp->file) { uprobe_munmap(vp->remove, vp->remove->vm_start, vp->remove->vm_end); @@ -1238,7 +1238,7 @@ static void reattach_vmas(struct ma_state *mas_detach) mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - vma_mark_detached(vma, false); + vma_mark_attached(vma); __mt_destroy(mas_detach->tree); } @@ -1313,7 +1313,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, if (error) goto munmap_gather_failed; - vma_mark_detached(next, true); + vma_mark_detached(next); nrpages = vma_pages(next); vms->nr_pages += nrpages; diff --git a/mm/vma.h b/mm/vma.h index e55e68abfbe3..bffb56afce5f 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -205,6 +205,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } @@ -437,6 +438,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi, __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); mas_store_prealloc(&vmi->mas, vma); + vma_mark_attached(vma); } static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 4506e6fb3c6f..f93f7f74f97b 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -471,12 +471,16 @@ static inline void vma_lock_init(struct vm_area_struct *vma) } static inline void vma_assert_write_locked(struct vm_area_struct *); -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma->detached = false; +} + +static inline void vma_mark_detached(struct vm_area_struct *vma) { /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + vma_assert_write_locked(vma); + vma->detached = true; } extern const struct vm_operations_struct vma_dummy_vm_ops; @@ -489,7 +493,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; vma_lock_init(vma); } @@ -515,6 +520,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) memcpy(new, orig, sizeof(*new)); vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); + /* vma is not locked, can't use vma_mark_detached() */ + new->detached = true; return new; } From 55e50223bf3e06abceaf68e2ad125458bb5f874f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:41 -0800 Subject: [PATCH 107/431] mm: introduce vma_iter_store_attached() to use with attached vmas vma_iter_store() functions can be used both when adding a new vma and when updating an existing one. However for existing ones we do not need to mark them attached as they are already marked that way. With vma->detached being a separate flag, double-marking a vmas as attached or detached is not an issue because the flag will simply be overwritten with the same value. However once we fold this flag into the refcount later in this series, re-attaching or re-detaching a vma becomes an issue since these operations will be incrementing/decrementing a refcount. Introduce vma_iter_store_new() and vma_iter_store_overwrite() to replace vma_iter_store() and avoid re-attaching a vma during vma update. Add assertions in vma_mark_attached()/vma_mark_detached() to catch invalid usage. Update vma tests to check for vma detached state correctness. Link: https://lkml.kernel.org/r/20250213224655.1680278-5-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 14 +++++++++++ mm/nommu.c | 4 +-- mm/vma.c | 12 ++++----- mm/vma.h | 11 +++++++-- tools/testing/vma/vma.c | 42 +++++++++++++++++++++++++------- tools/testing/vma/vma_internal.h | 10 ++++++++ 6 files changed, 74 insertions(+), 19 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d333d4070362..003c3e5c0a96 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -821,8 +821,19 @@ static inline void vma_assert_locked(struct vm_area_struct *vma) vma_assert_write_locked(vma); } +static inline void vma_assert_attached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(vma->detached); +} + +static inline void vma_assert_detached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(!vma->detached); +} + static inline void vma_mark_attached(struct vm_area_struct *vma) { + vma_assert_detached(vma); vma->detached = false; } @@ -830,6 +841,7 @@ static inline void vma_mark_detached(struct vm_area_struct *vma) { /* When detaching vma should be write-locked */ vma_assert_write_locked(vma); + vma_assert_attached(vma); vma->detached = true; } @@ -866,6 +878,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } +static inline void vma_assert_attached(struct vm_area_struct *vma) {} +static inline void vma_assert_detached(struct vm_area_struct *vma) {} static inline void vma_mark_attached(struct vm_area_struct *vma) {} static inline void vma_mark_detached(struct vm_area_struct *vma) {} diff --git a/mm/nommu.c b/mm/nommu.c index baa79abdaf03..8b31d8396297 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file, setup_vma_to_mm(vma, current->mm); current->mm->map_count++; /* add the VMA to the tree */ - vma_iter_store(&vmi, vma); + vma_iter_store_new(&vmi, vma); /* we flush the region from the icache only when the first executable * mapping of it is made */ @@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, setup_vma_to_mm(vma, mm); setup_vma_to_mm(new, mm); - vma_iter_store(vmi, new); + vma_iter_store_new(vmi, new); mm->map_count++; return 0; diff --git a/mm/vma.c b/mm/vma.c index 702e2181f8b7..7374c04eee9b 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -320,7 +320,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - vma_iter_store(vmi, vp->insert); + vma_iter_store_new(vmi, vp->insert); mm->map_count++; } @@ -700,7 +700,7 @@ static int commit_merge(struct vma_merge_struct *vmg) vmg->__adjust_middle_start ? vmg->middle : NULL); vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); vmg_adjust_set_range(vmg); - vma_iter_store(vmg->vmi, vmg->target); + vma_iter_store_overwrite(vmg->vmi, vmg->target); vma_complete(&vp, vmg->vmi, vma->vm_mm); @@ -1711,7 +1711,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) return -ENOMEM; vma_start_write(vma); - vma_iter_store(&vmi, vma); + vma_iter_store_new(&vmi, vma); vma_link_file(vma); mm->map_count++; validate_mm(mm); @@ -2390,7 +2390,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); - vma_iter_store(vmi, vma); + vma_iter_store_new(vmi, vma); map->mm->map_count++; vma_link_file(vma); @@ -2867,7 +2867,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); @@ -2947,7 +2947,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); diff --git a/mm/vma.h b/mm/vma.h index bffb56afce5f..55be77ff042f 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -413,9 +413,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) } /* Store a VMA with preallocated memory */ -static inline void vma_iter_store(struct vma_iterator *vmi, - struct vm_area_struct *vma) +static inline void vma_iter_store_overwrite(struct vma_iterator *vmi, + struct vm_area_struct *vma) { + vma_assert_attached(vma); #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && @@ -438,7 +439,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi, __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); mas_store_prealloc(&vmi->mas, vma); +} + +static inline void vma_iter_store_new(struct vma_iterator *vmi, + struct vm_area_struct *vma) +{ vma_mark_attached(vma); + vma_iter_store_overwrite(vmi, vma); } static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index c7ffa71841ca..11f761769b5b 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -74,10 +74,22 @@ static struct vm_area_struct *alloc_vma(struct mm_struct *mm, ret->vm_end = end; ret->vm_pgoff = pgoff; ret->__vm_flags = flags; + vma_assert_detached(ret); return ret; } +/* Helper function to allocate a VMA and link it to the tree. */ +static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) +{ + int res; + + res = vma_link(mm, vma); + if (!res) + vma_assert_attached(vma); + return res; +} + /* Helper function to allocate a VMA and link it to the tree. */ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, unsigned long start, @@ -90,7 +102,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, if (vma == NULL) return NULL; - if (vma_link(mm, vma)) { + if (attach_vma(mm, vma)) { vm_area_free(vma); return NULL; } @@ -108,6 +120,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, /* Helper function which provides a wrapper around a merge new VMA operation. */ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) { + struct vm_area_struct *vma; /* * For convenience, get prev and next VMAs. Which the new VMA operation * requires. @@ -116,7 +129,11 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) vmg->prev = vma_prev(vmg->vmi); vma_iter_next_range(vmg->vmi); - return vma_merge_new_range(vmg); + vma = vma_merge_new_range(vmg); + if (vma) + vma_assert_attached(vma); + + return vma; } /* @@ -125,7 +142,12 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg) */ static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg) { - return vma_merge_existing_range(vmg); + struct vm_area_struct *vma; + + vma = vma_merge_existing_range(vmg); + if (vma) + vma_assert_attached(vma); + return vma; } /* @@ -260,8 +282,8 @@ static bool test_simple_merge(void) .pgoff = 1, }; - ASSERT_FALSE(vma_link(&mm, vma_left)); - ASSERT_FALSE(vma_link(&mm, vma_right)); + ASSERT_FALSE(attach_vma(&mm, vma_left)); + ASSERT_FALSE(attach_vma(&mm, vma_right)); vma = merge_new(&vmg); ASSERT_NE(vma, NULL); @@ -285,7 +307,7 @@ static bool test_simple_modify(void) struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags); VMA_ITERATOR(vmi, &mm, 0x1000); - ASSERT_FALSE(vma_link(&mm, init_vma)); + ASSERT_FALSE(attach_vma(&mm, init_vma)); /* * The flags will not be changed, the vma_modify_flags() function @@ -351,7 +373,7 @@ static bool test_simple_expand(void) .pgoff = 0, }; - ASSERT_FALSE(vma_link(&mm, vma)); + ASSERT_FALSE(attach_vma(&mm, vma)); ASSERT_FALSE(expand_existing(&vmg)); @@ -372,7 +394,7 @@ static bool test_simple_shrink(void) struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags); VMA_ITERATOR(vmi, &mm, 0); - ASSERT_FALSE(vma_link(&mm, vma)); + ASSERT_FALSE(attach_vma(&mm, vma)); ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0)); @@ -1522,11 +1544,11 @@ static bool test_copy_vma(void) vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); - ASSERT_NE(vma_new, vma); ASSERT_EQ(vma_new->vm_start, 0); ASSERT_EQ(vma_new->vm_end, 0x2000); ASSERT_EQ(vma_new->vm_pgoff, 0); + vma_assert_attached(vma_new); cleanup_mm(&mm, &vmi); @@ -1535,6 +1557,7 @@ static bool test_copy_vma(void) vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags); vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); + vma_assert_attached(vma_new); ASSERT_EQ(vma_new, vma_next); @@ -1576,6 +1599,7 @@ static bool test_expand_only_mode(void) ASSERT_EQ(vma->vm_pgoff, 3); ASSERT_TRUE(vma_write_started(vma)); ASSERT_EQ(vma_iter_addr(&vmi), 0x3000); + vma_assert_attached(vma); cleanup_mm(&mm, &vmi); return true; diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index f93f7f74f97b..34277842156c 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -470,6 +470,16 @@ static inline void vma_lock_init(struct vm_area_struct *vma) vma->vm_lock_seq = UINT_MAX; } +static inline void vma_assert_attached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(vma->detached); +} + +static inline void vma_assert_detached(struct vm_area_struct *vma) +{ + WARN_ON_ONCE(!vma->detached); +} + static inline void vma_assert_write_locked(struct vm_area_struct *); static inline void vma_mark_attached(struct vm_area_struct *vma) { From fe605930f074fb381884456061c7628c1fd35742 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:42 -0800 Subject: [PATCH 108/431] mm: mark vmas detached upon exit When exit_mmap() removes vmas belonging to an exiting task, it does not mark them as detached since they can't be reached by other tasks and they will be freed shortly. Once we introduce vma reuse, all vmas will have to be in detached state before they are freed to ensure vma when reused is in a consistent state. Add missing vma_mark_detached() before freeing the vma. Link: https://lkml.kernel.org/r/20250213224655.1680278-6-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- mm/vma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index 7374c04eee9b..53f4d0efce4d 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -427,10 +427,12 @@ void remove_vma(struct vm_area_struct *vma, bool unreachable) if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - if (unreachable) + if (unreachable) { + vma_mark_detached(vma); __vm_area_free(vma); - else + } else { vm_area_free(vma); + } } /* From 2c2bd11caba2a45445c1f0c722451fe29e59db79 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:43 -0800 Subject: [PATCH 109/431] types: move struct rcuwait into types.h Move rcuwait struct definition into types.h so that rcuwait can be used without including rcuwait.h which includes other headers. Without this change mm_types.h can't use rcuwait due to a the following circular dependency: mm_types.h -> rcuwait.h -> signal.h -> mm_types.h Link: https://lkml.kernel.org/r/20250213224655.1680278-7-surenb@google.com Suggested-by: Matthew Wilcox Signed-off-by: Suren Baghdasaryan Acked-by: Davidlohr Bueso Acked-by: Liam R. Howlett Acked-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/rcuwait.h | 13 +------------ include/linux/types.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 27343424225c..9ad134a04b41 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -4,18 +4,7 @@ #include #include - -/* - * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner. - * - * The only time @task is non-nil is when a user is blocked (or - * checking if it needs to) on a condition, and reset as soon as we - * know that the condition has succeeded and are awoken. - */ -struct rcuwait { - struct task_struct __rcu *task; -}; +#include #define __RCUWAIT_INITIALIZER(name) \ { .task = NULL, } diff --git a/include/linux/types.h b/include/linux/types.h index 1c509ce8f7f6..a3d2182c2686 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -248,5 +248,17 @@ typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); typedef int (*cmp_func_t)(const void *a, const void *b); +/* + * rcuwait provides a way of blocking and waking up a single + * task in an rcu-safe manner. + * + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. + */ +struct rcuwait { + struct task_struct __rcu *task; +}; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ From 7440adb405dfc4abe21bc95abf3481e6a6649c05 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:44 -0800 Subject: [PATCH 110/431] mm: allow vma_start_read_locked/vma_start_read_locked_nested to fail With upcoming replacement of vm_lock with vm_refcnt, we need to handle a possibility of vma_start_read_locked/vma_start_read_locked_nested failing due to refcount overflow. Prepare for such possibility by changing these APIs and adjusting their users. Link: https://lkml.kernel.org/r/20250213224655.1680278-8-surenb@google.com Signed-off-by: Suren Baghdasaryan Cc: Lokesh Gidra Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 6 ++++-- mm/userfaultfd.c | 30 +++++++++++++++++++++++------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 003c3e5c0a96..09b48af68699 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -747,10 +747,11 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ -static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) +static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { mmap_assert_locked(vma->vm_mm); down_read_nested(&vma->vm_lock.lock, subclass); + return true; } /* @@ -759,10 +760,11 @@ static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ -static inline void vma_start_read_locked(struct vm_area_struct *vma) +static inline bool vma_start_read_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); down_read(&vma->vm_lock.lock); + return true; } static inline void vma_end_read(struct vm_area_struct *vma) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 48ac81bbfee6..fbf2cf62ab9f 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -85,8 +85,12 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, mmap_read_lock(mm); vma = find_vma_and_prepare_anon(mm, address); - if (!IS_ERR(vma)) - vma_start_read_locked(vma); + if (!IS_ERR(vma)) { + bool locked = vma_start_read_locked(vma); + + if (!locked) + vma = ERR_PTR(-EAGAIN); + } mmap_read_unlock(mm); return vma; @@ -1555,12 +1559,24 @@ static int uffd_move_lock(struct mm_struct *mm, mmap_read_lock(mm); err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); - if (!err) { - vma_start_read_locked(*dst_vmap); - if (*dst_vmap != *src_vmap) - vma_start_read_locked_nested(*src_vmap, - SINGLE_DEPTH_NESTING); + if (err) + goto out; + + if (!vma_start_read_locked(*dst_vmap)) { + err = -EAGAIN; + goto out; } + + /* Nothing further to do if both vmas are locked. */ + if (*dst_vmap == *src_vmap) + goto out; + + if (!vma_start_read_locked_nested(*src_vmap, SINGLE_DEPTH_NESTING)) { + /* Undo dst_vmap locking if src_vmap failed to lock */ + vma_end_read(*dst_vmap); + err = -EAGAIN; + } +out: mmap_read_unlock(mm); return err; } From ce0853966085dd8eab7153ce0b815c4a07d86698 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:45 -0800 Subject: [PATCH 111/431] mm: move mmap_init_lock() out of the header file mmap_init_lock() is used only from mm_init() in fork.c, therefore it does not have to reside in the header file. This move lets us avoid including additional headers in mmap_lock.h later, when mmap_init_lock() needs to initialize rcuwait object. Link: https://lkml.kernel.org/r/20250213224655.1680278-9-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mmap_lock.h | 6 ------ kernel/fork.c | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 45a21faa3ff6..4706c6769902 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -122,12 +122,6 @@ static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int #endif /* CONFIG_PER_VMA_LOCK */ -static inline void mmap_init_lock(struct mm_struct *mm) -{ - init_rwsem(&mm->mmap_lock); - mm_lock_seqcount_init(mm); -} - static inline void mmap_write_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, true); diff --git a/kernel/fork.c b/kernel/fork.c index 5bf3e407c795..f1af413e5aa4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1230,6 +1230,12 @@ static void mm_init_uprobes_state(struct mm_struct *mm) #endif } +static void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); + mm_lock_seqcount_init(mm); +} + static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { From 45ad9f5290dc4bb2249e951d4b3756d3ebda2d66 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:46 -0800 Subject: [PATCH 112/431] mm: uninline the main body of vma_start_write() vma_start_write() is used in many places and will grow in size very soon. It is not used in performance critical paths and uninlining it should limit the future code size growth. No functional changes. Link: https://lkml.kernel.org/r/20250213224655.1680278-10-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 +++--------- mm/memory.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 09b48af68699..c24c521e38a2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -787,6 +787,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l return (vma->vm_lock_seq == *mm_lock_seq); } +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq); + /* * Begin writing to a VMA. * Exclude concurrent readers under the per-VMA lock until the currently @@ -799,15 +801,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - down_write(&vma->vm_lock.lock); - /* - * We should use WRITE_ONCE() here because we can have concurrent reads - * from the early lockless pessimistic check in vma_start_read(). - * We don't really care about the correctness of that early check, but - * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. - */ - WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock.lock); + __vma_start_write(vma, mm_lock_seq); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) diff --git a/mm/memory.c b/mm/memory.c index 6ef014220e09..f2f7dc215b6b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6353,6 +6353,20 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, #endif #ifdef CONFIG_PER_VMA_LOCK +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) +{ + down_write(&vma->vm_lock.lock); + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); + up_write(&vma->vm_lock.lock); +} +EXPORT_SYMBOL_GPL(__vma_start_write); + /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the From 7f8ceea0c58039dcea3d31b8d5da58aa5f6e12bf Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:47 -0800 Subject: [PATCH 113/431] refcount: provide ops for cases when object's memory can be reused For speculative lookups where a successful inc_not_zero() pins the object, but where we still need to double check if the object acquired is indeed the one we set out to acquire (identity check), needs this validation to happen *after* the increment. Similarly, when a new object is initialized and its memory might have been previously occupied by another object, all stores to initialize the object should happen *before* refcount initialization. Notably SLAB_TYPESAFE_BY_RCU is one such an example when this ordering is required for reference counting. Add refcount_{add|inc}_not_zero_acquire() to guarantee the proper ordering between acquiring a reference count on an object and performing the identity check for that object. Add refcount_set_release() to guarantee proper ordering between stores initializing object attributes and the store initializing the refcount. refcount_set_release() should be done after all other object attributes are initialized. Once refcount_set_release() is called, the object should be considered visible to other tasks even if it was not yet added into an object collection normally used to discover it. This is because other tasks might have discovered the object previously occupying the same memory and after memory reuse they can succeed in taking refcount for the new object and start using it. Object reuse example to consider: consumer: obj = lookup(collection, key); if (!refcount_inc_not_zero_acquire(&obj->ref)) return; if (READ_ONCE(obj->key) != key) { /* identity check */ put_ref(obj); return; } use(obj->value); producer: remove(collection, obj->key); if (!refcount_dec_and_test(&obj->ref)) return; obj->key = KEY_INVALID; free(obj); obj = malloc(); /* obj is reused */ obj->key = new_key; obj->value = new_value; refcount_set_release(obj->ref, 1); add(collection, new_key, obj); refcount_{add|inc}_not_zero_acquire() is required to prevent the following reordering when refcount_inc_not_zero() is used instead: consumer: obj = lookup(collection, key); if (READ_ONCE(obj->key) != key) { /* reordered identity check */ put_ref(obj); return; } producer: remove(collection, obj->key); if (!refcount_dec_and_test(&obj->ref)) return; obj->key = KEY_INVALID; free(obj); obj = malloc(); /* obj is reused */ obj->key = new_key; obj->value = new_value; refcount_set_release(obj->ref, 1); add(collection, new_key, obj); if (!refcount_inc_not_zero(&obj->ref)) return; use(obj->value); /* USING WRONG OBJECT */ refcount_set_release() is required to prevent the following reordering when refcount_set() is used instead: consumer: obj = lookup(collection, key); producer: remove(collection, obj->key); if (!refcount_dec_and_test(&obj->ref)) return; obj->key = KEY_INVALID; free(obj); obj = malloc(); /* obj is reused */ obj->key = new_key; /* new_key == old_key */ refcount_set(obj->ref, 1); if (!refcount_inc_not_zero_acquire(&obj->ref)) return; if (READ_ONCE(obj->key) != key) { /* pass since new_key == old_key */ put_ref(obj); return; } use(obj->value); /* USING STALE obj->value */ obj->value = new_value; /* reordered store */ add(collection, key, obj); [surenb@google.com: fix title underlines in refcount-vs-atomic.rst] Link: https://lkml.kernel.org/r/20250217161645.3137927-1-surenb@google.com Link: https://lkml.kernel.org/r/20250213224655.1680278-11-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka [slab] Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Peter Zijlstra Cc: Will Deacon Cc: Paul E. McKenney Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- Documentation/RCU/whatisRCU.rst | 10 ++ Documentation/core-api/refcount-vs-atomic.rst | 37 +++++- include/linux/refcount.h | 106 ++++++++++++++++++ include/linux/slab.h | 9 ++ 4 files changed, 156 insertions(+), 6 deletions(-) diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index 1ef5784c1b84..53faeed7c190 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -971,6 +971,16 @@ unfortunately any spinlock in a ``SLAB_TYPESAFE_BY_RCU`` object must be initialized after each and every call to kmem_cache_alloc(), which renders reference-free spinlock acquisition completely unsafe. Therefore, when using ``SLAB_TYPESAFE_BY_RCU``, make proper use of a reference counter. +If using refcount_t, the specialized refcount_{add|inc}_not_zero_acquire() +and refcount_set_release() APIs should be used to ensure correct operation +ordering when verifying object identity and when initializing newly +allocated objects. Acquire fence in refcount_{add|inc}_not_zero_acquire() +ensures that identity checks happen *after* reference count is taken. +refcount_set_release() should be called after a newly allocated object is +fully initialized and release fence ensures that new values are visible +*before* refcount can be successfully taken by other users. Once +refcount_set_release() is called, the object should be considered visible +by other tasks. (Those willing to initialize their locks in a kmem_cache constructor may also use locking, including cache-friendly sequence locking.) diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst index 79a009ce11df..94e628c1eb49 100644 --- a/Documentation/core-api/refcount-vs-atomic.rst +++ b/Documentation/core-api/refcount-vs-atomic.rst @@ -86,7 +86,19 @@ Memory ordering guarantee changes: * none (both fully unordered) -case 2) - increment-based ops that return no value +case 2) - non-"Read/Modify/Write" (RMW) ops with release ordering +----------------------------------------------------------------- + +Function changes: + + * atomic_set_release() --> refcount_set_release() + +Memory ordering guarantee changes: + + * none (both provide RELEASE ordering) + + +case 3) - increment-based ops that return no value -------------------------------------------------- Function changes: @@ -98,7 +110,7 @@ Memory ordering guarantee changes: * none (both fully unordered) -case 3) - decrement-based RMW ops that return no value +case 4) - decrement-based RMW ops that return no value ------------------------------------------------------ Function changes: @@ -110,7 +122,7 @@ Memory ordering guarantee changes: * fully unordered --> RELEASE ordering -case 4) - increment-based RMW ops that return a value +case 5) - increment-based RMW ops that return a value ----------------------------------------------------- Function changes: @@ -126,7 +138,20 @@ Memory ordering guarantees changes: result of obtaining pointer to the object! -case 5) - generic dec/sub decrement-based RMW ops that return a value +case 6) - increment-based RMW ops with acquire ordering that return a value +--------------------------------------------------------------------------- + +Function changes: + + * atomic_inc_not_zero() --> refcount_inc_not_zero_acquire() + * no atomic counterpart --> refcount_add_not_zero_acquire() + +Memory ordering guarantees changes: + + * fully ordered --> ACQUIRE ordering on success + + +case 7) - generic dec/sub decrement-based RMW ops that return a value --------------------------------------------------------------------- Function changes: @@ -139,7 +164,7 @@ Memory ordering guarantees changes: * fully ordered --> RELEASE ordering + ACQUIRE ordering on success -case 6) other decrement-based RMW ops that return a value +case 8) other decrement-based RMW ops that return a value --------------------------------------------------------- Function changes: @@ -154,7 +179,7 @@ Memory ordering guarantees changes: .. note:: atomic_add_unless() only provides full order on success. -case 7) - lock-based RMW +case 9) - lock-based RMW ------------------------ Function changes: diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 35f039ecb272..4589d2e7bfea 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -87,6 +87,15 @@ * The decrements dec_and_test() and sub_and_test() also provide acquire * ordering on success. * + * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide + * acquire and release ordering for cases when the memory occupied by the + * object might be reused to store another object. This is important for the + * cases where secondary validation is required to detect such reuse, e.g. + * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after + * the refcount is taken, hence acquire order is necessary. Similarly, when the + * object is initialized, all stores to its attributes should be visible before + * the refcount is set, otherwise a stale attribute value might be used by + * another task which succeeds in taking a refcount to the new object. */ #ifndef _LINUX_REFCOUNT_H @@ -125,6 +134,31 @@ static inline void refcount_set(refcount_t *r, int n) atomic_set(&r->refs, n); } +/** + * refcount_set_release - set a refcount's value with release ordering + * @r: the refcount + * @n: value to which the refcount will be set + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides release memory ordering which will order previous memory operations + * against this store. This ensures all updates to this object are visible + * once the refcount is set and stale values from the object previously + * occupying this memory are overwritten with new ones. + * + * This function should be called only after new object is fully initialized. + * After this call the object should be considered visible to other tasks even + * if it was not yet added into an object collection normally used to discover + * it. This is because other tasks might have discovered the object previously + * occupying the same memory and after memory reuse they can succeed in taking + * refcount to the new object and start using it. + */ +static inline void refcount_set_release(refcount_t *r, int n) +{ + atomic_set_release(&r->refs, n); +} + /** * refcount_read - get a refcount's value * @r: the refcount @@ -178,6 +212,52 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) return __refcount_add_not_zero(i, r, NULL); } +static inline __must_check __signed_wrap +bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) +{ + int old = refcount_read(r); + + do { + if (!old) + break; + } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i)); + + if (oldp) + *oldp = old; + + if (unlikely(old < 0 || old + i < 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); + + return old; +} + +/** + * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0 + * + * @i: the value to add to the refcount + * @r: the refcount + * + * Will saturate at REFCOUNT_SATURATED and WARN. + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides acquire memory ordering on success, it is assumed the caller has + * guaranteed the object memory to be stable (RCU, etc.). It does provide a + * control dependency and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc_not_zero_acquire() should instead be used to increment a + * reference count. + * + * Return: false if the passed refcount is 0, true otherwise + */ +static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r) +{ + return __refcount_add_not_zero_acquire(i, r, NULL); +} + static inline __signed_wrap void __refcount_add(int i, refcount_t *r, int *oldp) { @@ -236,6 +316,32 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) return __refcount_inc_not_zero(r, NULL); } +static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero_acquire(1, r, oldp); +} + +/** + * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0 + * @r: the refcount to increment + * + * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on + * success. + * + * This function should be used when memory occupied by the object might be + * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. + * + * Provides acquire memory ordering on success, it is assumed the caller has + * guaranteed the object memory to be stable (RCU, etc.). It does provide a + * control dependency and thereby orders future stores. See the comment on top. + * + * Return: true if the increment was successful, false otherwise + */ +static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r) +{ + return __refcount_inc_not_zero_acquire(r, NULL); +} + static inline void __refcount_inc(refcount_t *r, int *oldp) { __refcount_add(1, r, oldp); diff --git a/include/linux/slab.h b/include/linux/slab.h index 09eedaecf120..ad902a2d692b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -136,6 +136,15 @@ enum _slab_flag_bits { * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. * + * Note that object identity check has to be done *after* acquiring a + * reference, therefore user has to ensure proper ordering for loads. + * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU, + * the newly allocated object has to be fully initialized *before* its + * refcount gets initialized and proper ordering for stores is required. + * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are + * designed with the proper fences required for reference counting objects + * allocated with SLAB_TYPESAFE_BY_RCU. + * * Note that it is not possible to acquire a lock within a structure * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages From 4e0dbe105d5088c77eb09de6f049aaf44711a2ec Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:48 -0800 Subject: [PATCH 114/431] refcount: introduce __refcount_{add|inc}_not_zero_limited_acquire Introduce functions to increase refcount but with a top limit above which they will fail to increase (the limit is inclusive). Setting the limit to INT_MAX indicates no limit. Link: https://lkml.kernel.org/r/20250213224655.1680278-12-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/refcount.h | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 4589d2e7bfea..80dc023ac2bf 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -213,13 +213,20 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) } static inline __must_check __signed_wrap -bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) +bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp, + int limit) { int old = refcount_read(r); do { if (!old) break; + + if (i > limit - old) { + if (oldp) + *oldp = old; + return false; + } } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i)); if (oldp) @@ -231,6 +238,18 @@ bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) return old; } +static inline __must_check bool +__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit) +{ + return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit); +} + +static inline __must_check __signed_wrap +bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX); +} + /** * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0 * From f35ab95ca0af7a27feab57b9d7e906405bddb093 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:49 -0800 Subject: [PATCH 115/431] mm: replace vm_lock and detached flag with a reference count rw_semaphore is a sizable structure of 40 bytes and consumes considerable space for each vm_area_struct. However vma_lock has two important specifics which can be used to replace rw_semaphore with a simpler structure: 1. Readers never wait. They try to take the vma_lock and fall back to mmap_lock if that fails. 2. Only one writer at a time will ever try to write-lock a vma_lock because writers first take mmap_lock in write mode. Because of these requirements, full rw_semaphore functionality is not needed and we can replace rw_semaphore and the vma->detached flag with a refcount (vm_refcnt). When vma is in detached state, vm_refcnt is 0 and only a call to vma_mark_attached() can take it out of this state. Note that unlike before, now we enforce both vma_mark_attached() and vma_mark_detached() to be done only after vma has been write-locked. vma_mark_attached() changes vm_refcnt to 1 to indicate that it has been attached to the vma tree. When a reader takes read lock, it increments vm_refcnt, unless the top usable bit of vm_refcnt (0x40000000) is set, indicating presence of a writer. When writer takes write lock, it sets the top usable bit to indicate its presence. If there are readers, writer will wait using newly introduced mm->vma_writer_wait. Since all writers take mmap_lock in write mode first, there can be only one writer at a time. The last reader to release the lock will signal the writer to wake up. refcount might overflow if there are many competing readers, in which case read-locking will fail. Readers are expected to handle such failures. In summary: 1. all readers increment the vm_refcnt; 2. writer sets top usable (writer) bit of vm_refcnt; 3. readers cannot increment the vm_refcnt if the writer bit is set; 4. in the presence of readers, writer must wait for the vm_refcnt to drop to 1 (plus the VMA_LOCK_OFFSET writer bit), indicating an attached vma with no readers; 5. vm_refcnt overflow is handled by the readers. While this vm_lock replacement does not yet result in a smaller vm_area_struct (it stays at 256 bytes due to cacheline alignment), it allows for further size optimization by structure member regrouping to bring the size of vm_area_struct below 192 bytes. [surenb@google.com: fix a crash due to vma_end_read() that should have been removed] Link: https://lkml.kernel.org/r/20250220200208.323769-1-surenb@google.com Link: https://lkml.kernel.org/r/20250213224655.1680278-13-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Peter Zijlstra Suggested-by: Matthew Wilcox Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 130 ++++++++++++++++++++----------- include/linux/mm_types.h | 22 +++--- kernel/fork.c | 13 ++-- mm/init-mm.c | 1 + mm/memory.c | 90 ++++++++++++++++++--- tools/testing/vma/linux/atomic.h | 5 ++ tools/testing/vma/vma_internal.h | 63 +++++++-------- 7 files changed, 218 insertions(+), 106 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c24c521e38a2..06f179c844c3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -32,6 +32,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -697,19 +698,54 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_PER_VMA_LOCK -static inline void vma_lock_init(struct vm_area_struct *vma) +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) { - init_rwsem(&vma->vm_lock.lock); +#ifdef CONFIG_DEBUG_LOCK_ALLOC + static struct lock_class_key lockdep_key; + + lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); +#endif + if (reset_refcnt) + refcount_set(&vma->vm_refcnt, 0); vma->vm_lock_seq = UINT_MAX; } +static inline bool is_vma_writer_only(int refcnt) +{ + /* + * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma + * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on + * a detached vma happens only in vma_mark_detached() and is a rare + * case, therefore most of the time there will be no unnecessary wakeup. + */ + return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1; +} + +static inline void vma_refcount_put(struct vm_area_struct *vma) +{ + /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ + struct mm_struct *mm = vma->vm_mm; + int oldcnt; + + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); + if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { + + if (is_vma_writer_only(oldcnt - 1)) + rcuwait_wake_up(&mm->vma_writer_wait); + } +} + /* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to * using mmap_lock. The function should never yield false unlocked result. + * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got + * detached. */ -static inline bool vma_start_read(struct vm_area_struct *vma) +static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) { + int oldcnt; + /* * Check before locking. A race might cause false locked result. * We can use READ_ONCE() for the mm_lock_seq here, and don't need @@ -718,15 +754,25 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * need ordering is below. */ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) - return false; - - if (unlikely(down_read_trylock(&vma->vm_lock.lock) == 0)) - return false; + return NULL; /* - * Overflow might produce false locked result. + * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire() + * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET. + * Acquire fence is required here to avoid reordering against later + * vm_lock_seq check and checks inside lock_vma_under_rcu(). + */ + if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) { + /* return EAGAIN if vma got detached from under us */ + return oldcnt ? NULL : ERR_PTR(-EAGAIN); + } + + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); + /* + * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result. * False unlocked result is impossible because we modify and check - * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq + * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq * modification invalidates all existing locks. * * We must use ACQUIRE semantics for the mm_lock_seq so that if we are @@ -735,10 +781,11 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * This pairs with RELEASE semantics in vma_end_write_all(). */ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { - up_read(&vma->vm_lock.lock); - return false; + vma_refcount_put(vma); + return NULL; } - return true; + + return vma; } /* @@ -749,8 +796,14 @@ static inline bool vma_start_read(struct vm_area_struct *vma) */ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { + int oldcnt; + mmap_assert_locked(vma->vm_mm); - down_read_nested(&vma->vm_lock.lock, subclass); + if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) + return false; + + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); return true; } @@ -762,16 +815,12 @@ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int */ static inline bool vma_start_read_locked(struct vm_area_struct *vma) { - mmap_assert_locked(vma->vm_mm); - down_read(&vma->vm_lock.lock); - return true; + return vma_start_read_locked_nested(vma, 0); } static inline void vma_end_read(struct vm_area_struct *vma) { - rcu_read_lock(); /* keeps vma alive till the end of up_read */ - up_read(&vma->vm_lock.lock); - rcu_read_unlock(); + vma_refcount_put(vma); } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ @@ -813,38 +862,35 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_assert_locked(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_lock.lock)) - vma_assert_write_locked(vma); + unsigned int mm_lock_seq; + + VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 && + !__is_vma_write_locked(vma, &mm_lock_seq), vma); } +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ static inline void vma_assert_attached(struct vm_area_struct *vma) { - WARN_ON_ONCE(vma->detached); + WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); } static inline void vma_assert_detached(struct vm_area_struct *vma) { - WARN_ON_ONCE(!vma->detached); + WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); } static inline void vma_mark_attached(struct vm_area_struct *vma) { - vma_assert_detached(vma); - vma->detached = false; -} - -static inline void vma_mark_detached(struct vm_area_struct *vma) -{ - /* When detaching vma should be write-locked */ vma_assert_write_locked(vma); - vma_assert_attached(vma); - vma->detached = true; + vma_assert_detached(vma); + refcount_set(&vma->vm_refcnt, 1); } -static inline bool is_vma_detached(struct vm_area_struct *vma) -{ - return vma->detached; -} +void vma_mark_detached(struct vm_area_struct *vma); static inline void release_fault_lock(struct vm_fault *vmf) { @@ -867,9 +913,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ -static inline void vma_lock_init(struct vm_area_struct *vma) {} -static inline bool vma_start_read(struct vm_area_struct *vma) - { return false; } +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} +static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) + { return NULL; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) @@ -910,12 +956,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); -#ifdef CONFIG_PER_VMA_LOCK - /* vma is not locked, can't use vma_mark_detached() */ - vma->detached = true; -#endif vma_numab_state_init(vma); - vma_lock_init(vma); + vma_lock_init(vma, false); } /* Use when VMA is not part of the VMA tree and needs no locking */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36dea20cd101..9de0a6cb3c2d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -629,9 +630,8 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) } #endif -struct vma_lock { - struct rw_semaphore lock; -}; +#define VMA_LOCK_OFFSET 0x40000000 +#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1) struct vma_numab_state { /* @@ -709,19 +709,13 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* - * Flag to indicate areas detached from the mm->mm_mt tree. - * Unstable RCU readers are allowed to read this. - */ - bool detached; - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -784,7 +778,10 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_PER_VMA_LOCK /* Unstable RCU readers are allowed to read this. */ - struct vma_lock vm_lock ____cacheline_aligned_in_smp; + refcount_t vm_refcnt ____cacheline_aligned_in_smp; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map vmlock_dep_map; +#endif #endif } __randomize_layout; @@ -920,6 +917,7 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + struct rcuwait vma_writer_wait; /* * This field has lock-like semantics, meaning it is sometimes * accessed with ACQUIRE/RELEASE semantics. diff --git a/kernel/fork.c b/kernel/fork.c index f1af413e5aa4..48a0038f606f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -463,12 +463,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) * will be reinitialized. */ data_race(memcpy(new, orig, sizeof(*new))); - vma_lock_init(new); + vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); -#ifdef CONFIG_PER_VMA_LOCK - /* vma is not locked, can't use vma_mark_detached() */ - new->detached = true; -#endif vma_numab_state_init(new); dup_anon_vma_name(orig, new); @@ -477,6 +473,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) void __vm_area_free(struct vm_area_struct *vma) { + /* The vma should be detached while being destroyed. */ + vma_assert_detached(vma); vma_numab_state_free(vma); free_anon_vma_name(vma); kmem_cache_free(vm_area_cachep, vma); @@ -488,8 +486,6 @@ static void vm_area_free_rcu_cb(struct rcu_head *head) struct vm_area_struct *vma = container_of(head, struct vm_area_struct, vm_rcu); - /* The vma should not be locked while being destroyed. */ - VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock.lock), vma); __vm_area_free(vma); } #endif @@ -1234,6 +1230,9 @@ static void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_lock); mm_lock_seqcount_init(mm); +#ifdef CONFIG_PER_VMA_LOCK + rcuwait_init(&mm->vma_writer_wait); +#endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, diff --git a/mm/init-mm.c b/mm/init-mm.c index 6af3ad675930..4600e7605cab 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -40,6 +40,7 @@ struct mm_struct init_mm = { .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), #ifdef CONFIG_PER_VMA_LOCK + .vma_writer_wait = __RCUWAIT_INITIALIZER(init_mm.vma_writer_wait), .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq), #endif .user_ns = &init_user_ns, diff --git a/mm/memory.c b/mm/memory.c index f2f7dc215b6b..51f233404b02 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6353,9 +6353,47 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, #endif #ifdef CONFIG_PER_VMA_LOCK +static inline bool __vma_enter_locked(struct vm_area_struct *vma, bool detaching) +{ + unsigned int tgt_refcnt = VMA_LOCK_OFFSET; + + /* Additional refcnt if the vma is attached. */ + if (!detaching) + tgt_refcnt++; + + /* + * If vma is detached then only vma_mark_attached() can raise the + * vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached(). + */ + if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt)) + return false; + + rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_); + rcuwait_wait_event(&vma->vm_mm->vma_writer_wait, + refcount_read(&vma->vm_refcnt) == tgt_refcnt, + TASK_UNINTERRUPTIBLE); + lock_acquired(&vma->vmlock_dep_map, _RET_IP_); + + return true; +} + +static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached) +{ + *detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt); + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); +} + void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) { - down_write(&vma->vm_lock.lock); + bool locked; + + /* + * __vma_enter_locked() returns false immediately if the vma is not + * attached, otherwise it waits until refcnt is indicating that vma + * is attached with no readers. + */ + locked = __vma_enter_locked(vma, false); + /* * We should use WRITE_ONCE() here because we can have concurrent reads * from the early lockless pessimistic check in vma_start_read(). @@ -6363,10 +6401,40 @@ void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. */ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock.lock); + + if (locked) { + bool detached; + + __vma_exit_locked(vma, &detached); + WARN_ON_ONCE(detached); /* vma should remain attached */ + } } EXPORT_SYMBOL_GPL(__vma_start_write); +void vma_mark_detached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_attached(vma); + + /* + * We are the only writer, so no need to use vma_refcount_put(). + * The condition below is unlikely because the vma has been already + * write-locked and readers can increment vm_refcnt only temporarily + * before they check vm_lock_seq, realize the vma is locked and drop + * back the vm_refcnt. That is a narrow window for observing a raised + * vm_refcnt. + */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* Wait until vma is detached with no readers. */ + if (__vma_enter_locked(vma, true)) { + bool detached; + + __vma_exit_locked(vma, &detached); + WARN_ON_ONCE(!detached); + } + } +} + /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the @@ -6384,15 +6452,17 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma) goto inval; - if (!vma_start_read(vma)) - goto inval; + vma = vma_start_read(vma); + if (IS_ERR_OR_NULL(vma)) { + /* Check if the VMA got isolated after we found it */ + if (PTR_ERR(vma) == -EAGAIN) { + count_vm_vma_lock_event(VMA_LOCK_MISS); + /* The area was replaced with another one */ + goto retry; + } - /* Check if the VMA got isolated after we found it */ - if (is_vma_detached(vma)) { - vma_end_read(vma); - count_vm_vma_lock_event(VMA_LOCK_MISS); - /* The area was replaced with another one */ - goto retry; + /* Failed to lock the VMA */ + goto inval; } /* * At this point, we have a stable reference to a VMA: The VMA is diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h index 3e1b6adc027b..788c597c4fde 100644 --- a/tools/testing/vma/linux/atomic.h +++ b/tools/testing/vma/linux/atomic.h @@ -9,4 +9,9 @@ #define atomic_set(x, y) uatomic_set(x, y) #define U8_MAX UCHAR_MAX +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed uatomic_cmpxchg +#define atomic_cmpxchg_release uatomic_cmpxchg +#endif /* atomic_cmpxchg_relaxed */ + #endif /* _LINUX_ATOMIC_H */ diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 34277842156c..ba838097d3f6 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -25,7 +25,7 @@ #include #include #include -#include +#include extern unsigned long stack_guard_gap; #ifdef CONFIG_MMU @@ -135,10 +135,6 @@ typedef __bitwise unsigned int vm_fault_t; */ #define pr_warn_once pr_err -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - struct kref { refcount_t refcount; }; @@ -233,15 +229,12 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access */ }; -struct vma_lock { - struct rw_semaphore lock; -}; - - struct file { struct address_space *f_mapping; }; +#define VMA_LOCK_OFFSET 0x40000000 + struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -269,16 +262,13 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* Flag to indicate areas detached from the mm->mm_mt tree */ - bool detached; - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock.lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock.lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -287,7 +277,6 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - struct vma_lock vm_lock; #endif /* @@ -340,6 +329,10 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + refcount_t vm_refcnt; +#endif } __randomize_layout; struct vm_fault {}; @@ -464,33 +457,40 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) return mas_find(&vmi->mas, ULONG_MAX); } -static inline void vma_lock_init(struct vm_area_struct *vma) -{ - init_rwsem(&vma->vm_lock.lock); - vma->vm_lock_seq = UINT_MAX; -} - +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ static inline void vma_assert_attached(struct vm_area_struct *vma) { - WARN_ON_ONCE(vma->detached); + WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); } static inline void vma_assert_detached(struct vm_area_struct *vma) { - WARN_ON_ONCE(!vma->detached); + WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); } static inline void vma_assert_write_locked(struct vm_area_struct *); static inline void vma_mark_attached(struct vm_area_struct *vma) { - vma->detached = false; + vma_assert_write_locked(vma); + vma_assert_detached(vma); + refcount_set(&vma->vm_refcnt, 1); } static inline void vma_mark_detached(struct vm_area_struct *vma) { - /* When detaching vma should be write-locked */ vma_assert_write_locked(vma); - vma->detached = true; + vma_assert_attached(vma); + /* We are the only writer, so no need to use vma_refcount_put(). */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* + * Reader must have temporarily raised vm_refcnt but it will + * drop it without using the vma since vma is write-locked. + */ + } } extern const struct vm_operations_struct vma_dummy_vm_ops; @@ -503,9 +503,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - /* vma is not locked, can't use vma_mark_detached() */ - vma->detached = true; - vma_lock_init(vma); + vma->vm_lock_seq = UINT_MAX; } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) @@ -528,10 +526,9 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return NULL; memcpy(new, orig, sizeof(*new)); - vma_lock_init(new); + refcount_set(&new->vm_refcnt, 0); + new->vm_lock_seq = UINT_MAX; INIT_LIST_HEAD(&new->anon_vma_chain); - /* vma is not locked, can't use vma_mark_detached() */ - new->detached = true; return new; } From 6bef4c2f97221f3b595d08c8656eb5845ef80fe9 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:50 -0800 Subject: [PATCH 116/431] mm: move lesser used vma_area_struct members into the last cacheline Move several vma_area_struct members which are rarely or never used during page fault handling into the last cacheline to better pack vm_area_struct. As a result vm_area_struct will fit into 3 as opposed to 4 cachelines. New typical vm_area_struct layout: struct vm_area_struct { union { struct { long unsigned int vm_start; /* 0 8 */ long unsigned int vm_end; /* 8 8 */ }; /* 0 16 */ freeptr_t vm_freeptr; /* 0 8 */ }; /* 0 16 */ struct mm_struct * vm_mm; /* 16 8 */ pgprot_t vm_page_prot; /* 24 8 */ union { const vm_flags_t vm_flags; /* 32 8 */ vm_flags_t __vm_flags; /* 32 8 */ }; /* 32 8 */ unsigned int vm_lock_seq; /* 40 4 */ /* XXX 4 bytes hole, try to pack */ struct list_head anon_vma_chain; /* 48 16 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct anon_vma * anon_vma; /* 64 8 */ const struct vm_operations_struct * vm_ops; /* 72 8 */ long unsigned int vm_pgoff; /* 80 8 */ struct file * vm_file; /* 88 8 */ void * vm_private_data; /* 96 8 */ atomic_long_t swap_readahead_info; /* 104 8 */ struct mempolicy * vm_policy; /* 112 8 */ struct vma_numab_state * numab_state; /* 120 8 */ /* --- cacheline 2 boundary (128 bytes) --- */ refcount_t vm_refcnt (__aligned__(64)); /* 128 4 */ /* XXX 4 bytes hole, try to pack */ struct { struct rb_node rb (__aligned__(8)); /* 136 24 */ long unsigned int rb_subtree_last; /* 160 8 */ } __attribute__((__aligned__(8))) shared; /* 136 32 */ struct anon_vma_name * anon_name; /* 168 8 */ struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 176 8 */ /* size: 192, cachelines: 3, members: 18 */ /* sum members: 176, holes: 2, sum holes: 8 */ /* padding: 8 */ /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */ } __attribute__((__aligned__(64))); Memory consumption per 1000 VMAs becomes 48 pages: slabinfo after vm_area_struct changes: ... : ... vm_area_struct ... 192 42 2 : ... Link: https://lkml.kernel.org/r/20250213224655.1680278-14-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 38 +++++++++++++++----------------- tools/testing/vma/vma_internal.h | 37 +++++++++++++++---------------- 2 files changed, 36 insertions(+), 39 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9de0a6cb3c2d..c3aa0e20be41 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -725,17 +725,6 @@ struct vm_area_struct { */ unsigned int vm_lock_seq; #endif - - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. - * - */ - struct { - struct rb_node rb; - unsigned long rb_subtree_last; - } shared; - /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma @@ -755,14 +744,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_ANON_VMA_NAME - /* - * For private and shared anonymous mappings, a pointer to a null - * terminated string containing the name given to the vma, or NULL if - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. - */ - struct anon_vma_name *anon_name; -#endif #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif @@ -775,7 +756,6 @@ struct vm_area_struct { #ifdef CONFIG_NUMA_BALANCING struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif - struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_PER_VMA_LOCK /* Unstable RCU readers are allowed to read this. */ refcount_t vm_refcnt ____cacheline_aligned_in_smp; @@ -783,6 +763,24 @@ struct vm_area_struct { struct lockdep_map vmlock_dep_map; #endif #endif + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + * + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; +#ifdef CONFIG_ANON_VMA_NAME + /* + * For private and shared anonymous mappings, a pointer to a null + * terminated string containing the name given to the vma, or NULL if + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. + */ + struct anon_vma_name *anon_name; +#endif + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; #ifdef CONFIG_NUMA diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index ba838097d3f6..b385170fbb8f 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -279,16 +279,6 @@ struct vm_area_struct { unsigned int vm_lock_seq; #endif - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. - * - */ - struct { - struct rb_node rb; - unsigned long rb_subtree_last; - } shared; - /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma @@ -308,14 +298,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_ANON_VMA_NAME - /* - * For private and shared anonymous mappings, a pointer to a null - * terminated string containing the name given to the vma, or NULL if - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. - */ - struct anon_vma_name *anon_name; -#endif #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif @@ -328,11 +310,28 @@ struct vm_area_struct { #ifdef CONFIG_NUMA_BALANCING struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif - struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_PER_VMA_LOCK /* Unstable RCU readers are allowed to read this. */ refcount_t vm_refcnt; #endif + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + * + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; +#ifdef CONFIG_ANON_VMA_NAME + /* + * For private and shared anonymous mappings, a pointer to a null + * terminated string containing the name given to the vma, or NULL if + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. + */ + struct anon_vma_name *anon_name; +#endif + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; struct vm_fault {}; From 3dd98c5c442358c5aefa13e2c91ee9dee32d776e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:51 -0800 Subject: [PATCH 117/431] mm/debug: print vm_refcnt state when dumping the vma vm_refcnt encodes a number of useful states: - whether vma is attached or detached - the number of current vma readers - presence of a vma writer Let's include it in the vma dump. Link: https://lkml.kernel.org/r/20250213224655.1680278-15-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- mm/debug.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/debug.c b/mm/debug.c index e1282b85a877..2d1bd67d957b 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -181,11 +181,17 @@ void dump_vma(const struct vm_area_struct *vma) pr_emerg("vma %px start %px end %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" +#ifdef CONFIG_PER_VMA_LOCK + "refcnt %x\n" +#endif "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, +#ifdef CONFIG_PER_VMA_LOCK + refcount_read(&vma->vm_refcnt), +#endif vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); From e218d9fedd056a0b17468d6e19fddaf2c3550f97 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:52 -0800 Subject: [PATCH 118/431] mm: remove extra vma_numab_state_init() call vma_init() already memset's the whole vm_area_struct to 0, so there is no need to an additional vma_numab_state_init(). Link: https://lkml.kernel.org/r/20250213224655.1680278-16-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 06f179c844c3..aad932c4bcf0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -956,7 +956,6 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_numab_state_init(vma); vma_lock_init(vma, false); } From e49510bf00de4f832eaaebcfc113795f127ad519 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:53 -0800 Subject: [PATCH 119/431] mm: prepare lock_vma_under_rcu() for vma reuse possibility Once we make vma cache SLAB_TYPESAFE_BY_RCU, it will be possible for a vma to be reused and attached to another mm after lock_vma_under_rcu() locks the vma. lock_vma_under_rcu() should ensure that vma_start_read() is using the original mm and after locking the vma it should ensure that vma->vm_mm has not changed from under us. Link: https://lkml.kernel.org/r/20250213224655.1680278-17-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 ++++++++---- mm/memory.c | 7 ++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index aad932c4bcf0..e3f962de1677 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -739,10 +739,13 @@ static inline void vma_refcount_put(struct vm_area_struct *vma) * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to * using mmap_lock. The function should never yield false unlocked result. + * False locked result is possible if mm_lock_seq overflows or if vma gets + * reused and attached to a different mm before we lock it. * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got * detached. */ -static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) +static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, + struct vm_area_struct *vma) { int oldcnt; @@ -753,7 +756,7 @@ static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below. */ - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) return NULL; /* @@ -780,7 +783,7 @@ static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { vma_refcount_put(vma); return NULL; } @@ -914,7 +917,8 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} -static inline struct vm_area_struct *vma_start_read(struct vm_area_struct *vma) +static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, + struct vm_area_struct *vma) { return NULL; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} diff --git a/mm/memory.c b/mm/memory.c index 51f233404b02..39bceed7448f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6452,7 +6452,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma) goto inval; - vma = vma_start_read(vma); + vma = vma_start_read(mm, vma); if (IS_ERR_OR_NULL(vma)) { /* Check if the VMA got isolated after we found it */ if (PTR_ERR(vma) == -EAGAIN) { @@ -6471,8 +6471,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, * fields are accessible for RCU readers. */ - /* Check since vm_start/vm_end might change before we lock the VMA */ - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + /* Check if the vma we locked is the right one. */ + if (unlikely(vma->vm_mm != mm || + address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; rcu_read_unlock(); From 3104138517fc66aad21f4a2487bb572e9fc2e3ec Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:54 -0800 Subject: [PATCH 120/431] mm: make vma cache SLAB_TYPESAFE_BY_RCU To enable SLAB_TYPESAFE_BY_RCU for vma cache we need to ensure that object reuse before RCU grace period is over will be detected by lock_vma_under_rcu(). Current checks are sufficient as long as vma is detached before it is freed. The only place this is not currently happening is in exit_mmap(). Add the missing vma_mark_detached() in exit_mmap(). Another issue which might trick lock_vma_under_rcu() during vma reuse is vm_area_dup(), which copies the entire content of the vma into a new one, overriding new vma's vm_refcnt and temporarily making it appear as attached. This might trick a racing lock_vma_under_rcu() to operate on a reused vma if it found the vma before it got reused. To prevent this situation, we should ensure that vm_refcnt stays at detached state (0) when it is copied and advances to attached state only after it is added into the vma tree. Introduce vm_area_init_from() which preserves new vma's vm_refcnt and use it in vm_area_dup(). Since all vmas are in detached state with no current readers when they are freed, lock_vma_under_rcu() will not be able to take vm_refcnt after vma got detached even if vma is reused. vma_mark_attached() in modified to include a release fence to ensure all stores to the vma happen before vm_refcnt gets initialized. Finally, make vm_area_cachep SLAB_TYPESAFE_BY_RCU. This will facilitate vm_area_struct reuse and will minimize the number of call_rcu() calls. [surenb@google.com: remove atomic_set_release() usage in tools/] Link: https://lkml.kernel.org/r/20250217054351.2973666-1-surenb@google.com Link: https://lkml.kernel.org/r/20250213224655.1680278-18-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- include/linux/mm.h | 4 +- include/linux/mm_types.h | 13 ++++-- include/linux/slab.h | 6 --- kernel/fork.c | 73 ++++++++++++++++++++------------ mm/mmap.c | 3 +- mm/vma.c | 11 ++--- mm/vma.h | 2 +- tools/include/linux/refcount.h | 5 +++ tools/testing/vma/vma_internal.h | 9 +--- 9 files changed, 70 insertions(+), 56 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e3f962de1677..14115c9949d8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -258,8 +258,6 @@ void setup_initial_init_mm(void *start_code, void *end_code, struct vm_area_struct *vm_area_alloc(struct mm_struct *); struct vm_area_struct *vm_area_dup(struct vm_area_struct *); void vm_area_free(struct vm_area_struct *); -/* Use only if VMA has no other users */ -void __vm_area_free(struct vm_area_struct *vma); #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; @@ -890,7 +888,7 @@ static inline void vma_mark_attached(struct vm_area_struct *vma) { vma_assert_write_locked(vma); vma_assert_detached(vma); - refcount_set(&vma->vm_refcnt, 1); + refcount_set_release(&vma->vm_refcnt, 1); } void vma_mark_detached(struct vm_area_struct *vma); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c3aa0e20be41..6a93abb4452b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -574,6 +574,12 @@ static inline void *folio_get_private(struct folio *folio) typedef unsigned long vm_flags_t; +/* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + /* * A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that @@ -677,6 +683,9 @@ struct vma_numab_state { * * Only explicitly marked struct members may be accessed by RCU readers before * getting a stable reference. + * + * WARNING: when adding new members, please update vm_area_init_from() to copy + * them during vm_area_struct content duplication. */ struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -687,9 +696,7 @@ struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; }; -#ifdef CONFIG_PER_VMA_LOCK - struct rcu_head vm_rcu; /* Used for deferred freeing. */ -#endif + freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ }; /* diff --git a/include/linux/slab.h b/include/linux/slab.h index ad902a2d692b..f8924fd6ea26 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -243,12 +243,6 @@ enum _slab_flag_bits { #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif -/* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/kernel/fork.c b/kernel/fork.c index 48a0038f606f..364b2d4fd3ef 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -449,6 +449,42 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return vma; } +static void vm_area_init_from(const struct vm_area_struct *src, + struct vm_area_struct *dest) +{ + dest->vm_mm = src->vm_mm; + dest->vm_ops = src->vm_ops; + dest->vm_start = src->vm_start; + dest->vm_end = src->vm_end; + dest->anon_vma = src->anon_vma; + dest->vm_pgoff = src->vm_pgoff; + dest->vm_file = src->vm_file; + dest->vm_private_data = src->vm_private_data; + vm_flags_init(dest, src->vm_flags); + memcpy(&dest->vm_page_prot, &src->vm_page_prot, + sizeof(dest->vm_page_prot)); + /* + * src->shared.rb may be modified concurrently when called from + * dup_mmap(), but the clone will reinitialize it. + */ + data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared))); + memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx, + sizeof(dest->vm_userfaultfd_ctx)); +#ifdef CONFIG_ANON_VMA_NAME + dest->anon_name = src->anon_name; +#endif +#ifdef CONFIG_SWAP + memcpy(&dest->swap_readahead_info, &src->swap_readahead_info, + sizeof(dest->swap_readahead_info)); +#endif +#ifndef CONFIG_MMU + dest->vm_region = src->vm_region; +#endif +#ifdef CONFIG_NUMA + dest->vm_policy = src->vm_policy; +#endif +} + struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); @@ -458,11 +494,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); - /* - * orig->shared.rb may be modified concurrently, but the clone - * will be reinitialized. - */ - data_race(memcpy(new, orig, sizeof(*new))); + vm_area_init_from(orig, new); vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); @@ -471,7 +503,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return new; } -void __vm_area_free(struct vm_area_struct *vma) +void vm_area_free(struct vm_area_struct *vma) { /* The vma should be detached while being destroyed. */ vma_assert_detached(vma); @@ -480,25 +512,6 @@ void __vm_area_free(struct vm_area_struct *vma) kmem_cache_free(vm_area_cachep, vma); } -#ifdef CONFIG_PER_VMA_LOCK -static void vm_area_free_rcu_cb(struct rcu_head *head) -{ - struct vm_area_struct *vma = container_of(head, struct vm_area_struct, - vm_rcu); - - __vm_area_free(vma); -} -#endif - -void vm_area_free(struct vm_area_struct *vma) -{ -#ifdef CONFIG_PER_VMA_LOCK - call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); -#else - __vm_area_free(vma); -#endif -} - static void account_kernel_stack(struct task_struct *tsk, int account) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { @@ -3156,6 +3169,11 @@ void __init mm_cache_init(void) void __init proc_caches_init(void) { + struct kmem_cache_args args = { + .use_freeptr_offset = true, + .freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr), + }; + sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| @@ -3172,8 +3190,9 @@ void __init proc_caches_init(void) sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - vm_area_cachep = KMEM_CACHE(vm_area_struct, - SLAB_HWCACHE_ALIGN|SLAB_NO_MERGE|SLAB_PANIC| + vm_area_cachep = kmem_cache_create("vm_area_struct", + sizeof(struct vm_area_struct), &args, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); diff --git a/mm/mmap.c b/mm/mmap.c index 6401a1d73f4a..15d6cd7cc845 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1305,7 +1305,8 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - remove_vma(vma, /* unreachable = */ true); + vma_mark_detached(vma); + remove_vma(vma); count++; cond_resched(); vma = vma_next(&vmi); diff --git a/mm/vma.c b/mm/vma.c index 53f4d0efce4d..5cdc5612bfc1 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -420,19 +420,14 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg, /* * Close a vm structure and free it. */ -void remove_vma(struct vm_area_struct *vma, bool unreachable) +void remove_vma(struct vm_area_struct *vma) { might_sleep(); vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - if (unreachable) { - vma_mark_detached(vma); - __vm_area_free(vma); - } else { - vm_area_free(vma); - } + vm_area_free(vma); } /* @@ -1218,7 +1213,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, /* Remove and clean up vmas */ mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - remove_vma(vma, /* unreachable = */ false); + remove_vma(vma); vm_unacct_memory(vms->nr_accounted); validate_mm(mm); diff --git a/mm/vma.h b/mm/vma.h index 55be77ff042f..7356ca5a22d3 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -218,7 +218,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); -void remove_vma(struct vm_area_struct *vma, bool unreachable); +void remove_vma(struct vm_area_struct *vma); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h index 36cb29bc57c2..1f30956e070d 100644 --- a/tools/include/linux/refcount.h +++ b/tools/include/linux/refcount.h @@ -60,6 +60,11 @@ static inline void refcount_set(refcount_t *r, unsigned int n) atomic_set(&r->refs, n); } +static inline void refcount_set_release(refcount_t *r, unsigned int n) +{ + atomic_set(&r->refs, n); +} + static inline unsigned int refcount_read(const refcount_t *r) { return atomic_read(&r->refs); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index b385170fbb8f..572ab2cea763 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -476,7 +476,7 @@ static inline void vma_mark_attached(struct vm_area_struct *vma) { vma_assert_write_locked(vma); vma_assert_detached(vma); - refcount_set(&vma->vm_refcnt, 1); + refcount_set_release(&vma->vm_refcnt, 1); } static inline void vma_mark_detached(struct vm_area_struct *vma) @@ -696,14 +696,9 @@ static inline void mpol_put(struct mempolicy *) { } -static inline void __vm_area_free(struct vm_area_struct *vma) -{ - free(vma); -} - static inline void vm_area_free(struct vm_area_struct *vma) { - __vm_area_free(vma); + free(vma); } static inline void lru_add_drain(void) From 795f29616e85aff32248e695c9cc1fbc8b4c9632 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 13 Feb 2025 14:46:55 -0800 Subject: [PATCH 121/431] docs/mm: document latest changes to vm_lock Change the documentation to reflect that vm_lock is integrated into vma and replaced with vm_refcnt. Document newly introduced vma_start_read_locked{_nested} functions. Link: https://lkml.kernel.org/r/20250213224655.1680278-19-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Reviewed-by: Lorenzo Stoakes Tested-by: Shivank Garg Link: https://lkml.kernel.org/r/5e19ec93-8307-47c2-bb13-3ddf7150624e@amd.com Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Suren Baghdasaryan Cc: Wei Yang Cc: Will Deacon Cc: Heiko Carstens Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- Documentation/mm/process_addrs.rst | 44 ++++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst index 81417fa2ed20..e6756e78b476 100644 --- a/Documentation/mm/process_addrs.rst +++ b/Documentation/mm/process_addrs.rst @@ -716,9 +716,14 @@ calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`, before releasing the RCU lock via :c:func:`!rcu_read_unlock`. -VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for -their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it -via :c:func:`!vma_end_read`. +In cases when the user already holds mmap read lock, :c:func:`!vma_start_read_locked` +and :c:func:`!vma_start_read_locked_nested` can be used. These functions do not +fail due to lock contention but the caller should still check their return values +in case they fail for other reasons. + +VMA read locks increment :c:member:`!vma.vm_refcnt` reference counter for their +duration and the caller of :c:func:`!lock_vma_under_rcu` must drop it via +:c:func:`!vma_end_read`. VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always @@ -726,9 +731,9 @@ acquired. An mmap write lock **must** be held for the duration of the VMA write lock, releasing or downgrading the mmap write lock also releases the VMA write lock so there is no :c:func:`!vma_end_write` function. -Note that a semaphore write lock is not held across a VMA lock. Rather, a -sequence number is used for serialisation, and the write semaphore is only -acquired at the point of write lock to update this. +Note that when write-locking a VMA lock, the :c:member:`!vma.vm_refcnt` is temporarily +modified so that readers can detect the presense of a writer. The reference counter is +restored once the vma sequence number used for serialisation is updated. This ensures the semantics we require - VMA write locks provide exclusive write access to the VMA. @@ -738,7 +743,7 @@ Implementation details The VMA lock mechanism is designed to be a lightweight means of avoiding the use of the heavily contended mmap lock. It is implemented using a combination of a -read/write semaphore and sequence numbers belonging to the containing +reference counter and sequence numbers belonging to the containing :c:struct:`!struct mm_struct` and the VMA. Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic @@ -779,28 +784,31 @@ release of any VMA locks on its release makes sense, as you would never want to keep VMAs locked across entirely separate write operations. It also maintains correct lock ordering. -Each time a VMA read lock is acquired, we acquire a read lock on the -:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that -the sequence count of the VMA does not match that of the mm. +Each time a VMA read lock is acquired, we increment :c:member:`!vma.vm_refcnt` +reference counter and check that the sequence count of the VMA does not match +that of the mm. -If it does, the read lock fails. If it does not, we hold the lock, excluding -writers, but permitting other readers, who will also obtain this lock under RCU. +If it does, the read lock fails and :c:member:`!vma.vm_refcnt` is dropped. +If it does not, we keep the reference counter raised, excluding writers, but +permitting other readers, who can also obtain this lock under RCU. Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu` are also RCU safe, so the whole read lock operation is guaranteed to function correctly. -On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock` -read/write semaphore, before setting the VMA's sequence number under this lock, -also simultaneously holding the mmap write lock. +On the write side, we set a bit in :c:member:`!vma.vm_refcnt` which can't be +modified by readers and wait for all readers to drop their reference count. +Once there are no readers, the VMA's sequence number is set to match that of +the mm. During this entire operation mmap write lock is held. This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep until these are finished and mutual exclusion is achieved. -After setting the VMA's sequence number, the lock is released, avoiding -complexity with a long-term held write lock. +After setting the VMA's sequence number, the bit in :c:member:`!vma.vm_refcnt` +indicating a writer is cleared. From this point on, VMA's sequence number will +indicate VMA's write-locked state until mmap write lock is dropped or downgraded. -This clever combination of a read/write semaphore and sequence count allows for +This clever combination of a reference counter and sequence count allows for fast RCU-based per-VMA lock acquisition (especially on page fault, though utilised elsewhere) with minimal complexity around lock ordering. From fcd807a03b864e2c7b2aa5eaade185127c4e2414 Mon Sep 17 00:00:00 2001 From: Marcelo Moreira Date: Mon, 17 Feb 2025 18:54:31 -0300 Subject: [PATCH 122/431] Docs/mm/damon: fix spelling and grammar in monitoring_intervals_tuning_example.rst This patch fixes some spelling and grammar mistakes in the documentation, improving the readability. - multipled -> multiplied - idential -> identical - minuts -> minutes - efficieny -> efficiency Link: https://lkml.kernel.org/r/20250217215512.12833-1-marcelomoreira1905@gmail.com Signed-off-by: Marcelo Moreira Reviewed-by: SeongJae Park Cc: Shuah khan Signed-off-by: Andrew Morton --- .../mm/damon/monitoring_intervals_tuning_example.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst index 334a854efb40..7207cbed591f 100644 --- a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst +++ b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst @@ -36,7 +36,7 @@ Then, list the DAMON-found regions of different access patterns, sorted by the "access temperature". "Access temperature" is a metric representing the access-hotness of a region. It is calculated as a weighted sum of the access frequency and the age of the region. If the access frequency is 0 %, the -temperature is multipled by minus one. That is, if a region is not accessed, +temperature is multiplied by minus one. That is, if a region is not accessed, it gets minus temperature and it gets lower as not accessed for longer time. The sorting is in temperature-ascendint order, so the region at the top of the list is the coldest, and the one at the bottom is the hottest one. :: @@ -58,11 +58,11 @@ list is the coldest, and the one at the bottom is the hottest one. :: The list shows not seemingly hot regions, and only minimum access pattern diversity. Every region has zero access frequency. The number of region is 10, which is the default ``min_nr_regions value``. Size of each region is also -nearly idential. We can suspect this is because “adaptive regions adjustment” +nearly identical. We can suspect this is because “adaptive regions adjustment” mechanism was not well working. As the guide suggested, we can get relative hotness of regions using ``age`` as the recency information. That would be better than nothing, but given the fact that the longest age is only about 6 -seconds while we waited about ten minuts, it is unclear how useful this will +seconds while we waited about ten minutes, it is unclear how useful this will be. The temperature ranges to total size of regions of each range histogram @@ -190,7 +190,7 @@ for sampling and aggregation intervals, respectively). :: The number of regions having different access patterns has significantly increased. Size of each region is also more varied. Total size of non-zero access frequency regions is also significantly increased. Maybe this is already -good enough to make some meaningful memory management efficieny changes. +good enough to make some meaningful memory management efficiency changes. 800ms/16s intervals: Another bias ================================= From 63a23847dc47113b879a5f53cc0ca5cedc881ffd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Feb 2025 19:20:06 +0000 Subject: [PATCH 123/431] fs: convert block_commit_write() to take a folio All callers now have a folio, so pass it in instead of converting folio->page->folio. Link: https://lkml.kernel.org/r/20250217192009.437916-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- fs/buffer.c | 14 ++++---------- fs/ext4/inline.c | 2 +- fs/ext4/move_extent.c | 2 +- fs/iomap/buffered-io.c | 2 +- fs/ocfs2/aops.c | 4 ++-- fs/ocfs2/file.c | 2 +- fs/udf/file.c | 2 +- include/linux/buffer_head.h | 2 +- 8 files changed, 12 insertions(+), 18 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index cc8452f60251..c66a59bb068b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2166,7 +2166,7 @@ int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, } EXPORT_SYMBOL(__block_write_begin); -static void __block_commit_write(struct folio *folio, size_t from, size_t to) +void block_commit_write(struct folio *folio, size_t from, size_t to) { size_t block_start, block_end; bool partial = false; @@ -2204,6 +2204,7 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to) if (!partial) folio_mark_uptodate(folio); } +EXPORT_SYMBOL(block_commit_write); /* * block_write_begin takes care of the basic task of block allocation and @@ -2262,7 +2263,7 @@ int block_write_end(struct file *file, struct address_space *mapping, flush_dcache_folio(folio); /* This could be a short (even 0-length) commit */ - __block_commit_write(folio, start, start + copied); + block_commit_write(folio, start, start + copied); return copied; } @@ -2578,13 +2579,6 @@ int cont_write_begin(struct file *file, struct address_space *mapping, } EXPORT_SYMBOL(cont_write_begin); -void block_commit_write(struct page *page, unsigned from, unsigned to) -{ - struct folio *folio = page_folio(page); - __block_commit_write(folio, from, to); -} -EXPORT_SYMBOL(block_commit_write); - /* * block_page_mkwrite() is not allowed to change the file size as it gets * called from a page fault handler when a page is first dirtied. Hence we must @@ -2630,7 +2624,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (unlikely(ret)) goto out_unlock; - __block_commit_write(folio, 0, end); + block_commit_write(folio, 0, end); folio_mark_dirty(folio); folio_wait_stable(folio); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3536ca7e4fcc..0af474c8b260 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -637,7 +637,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping, goto retry; if (folio) - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); out: if (folio) { folio_unlock(folio); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 898443e98efc..48649be64d6a 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -399,7 +399,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, bh = bh->b_this_page; } - block_commit_write(&folio[0]->page, from, from + replaced_size); + block_commit_write(folio[0], from, from + replaced_size); /* Even in case of data=writeback it is reasonable to pin * inode to transaction, to prevent unexpected data loss */ diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d303e6c8900c..f3904d13cda4 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1484,7 +1484,7 @@ static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, &iter->iomap); if (ret) return ret; - block_commit_write(&folio->page, 0, length); + block_commit_write(folio, 0, length); } else { WARN_ON_ONCE(!folio_test_uptodate(folio)); folio_mark_dirty(folio); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 5bbeb6fbb1ac..ee1d92ed950f 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode, ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); } } } @@ -2012,7 +2012,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } - block_commit_write(&folio->page, from, to); + block_commit_write(folio, from, to); } } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e54f2c4b5a90..2056cf08ac1e 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -813,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, /* must not update i_size! */ - block_commit_write(&folio->page, block_start + 1, block_start + 1); + block_commit_write(folio, block_start + 1, block_start + 1); } /* diff --git a/fs/udf/file.c b/fs/udf/file.c index 412fe7c4d348..0d76c4f37b3e 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -69,7 +69,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf) goto out_unlock; } - block_commit_write(&folio->page, 0, end); + block_commit_write(folio, 0, end); out_dirty: folio_mark_dirty(folio); folio_wait_stable(folio); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 932139c5d46f..6672e1a5031c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -271,7 +271,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, struct folio **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); -void block_commit_write(struct page *page, unsigned int from, unsigned int to); +void block_commit_write(struct folio *folio, size_t from, size_t to); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); From 52d671a1a36a16f3a0dd9a2beff964e75bce9787 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Feb 2025 19:20:07 +0000 Subject: [PATCH 124/431] fs: remove page_file_mapping() This wrapper has no more callers. Delete it. Link: https://lkml.kernel.org/r/20250217192009.437916-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 47bfc6b1b632..975c56fb4f85 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -575,11 +575,6 @@ static inline struct address_space *folio_flush_mapping(struct folio *folio) return folio_mapping(folio); } -static inline struct address_space *page_file_mapping(struct page *page) -{ - return folio_file_mapping(page_folio(page)); -} - /** * folio_inode - Get the host inode for this folio. * @folio: The folio. From 0d40cfe63a2f19b9d375382e6d90b9ebd412901e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Feb 2025 19:20:08 +0000 Subject: [PATCH 125/431] fs: remove folio_file_mapping() No callers of this function remain as filesystems no longer see swapfile pages through their normal read/write paths. Link: https://lkml.kernel.org/r/20250217192009.437916-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 975c56fb4f85..ad7c0f615e9b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -535,26 +535,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); -/** - * folio_file_mapping - Find the mapping this folio belongs to. - * @folio: The folio. - * - * For folios which are in the page cache, return the mapping that this - * page belongs to. Folios in the swap cache return the mapping of the - * swap file or swap device where the data is stored. This is different - * from the mapping returned by folio_mapping(). The only reason to - * use it is if, like NFS, you return 0 from ->activate_swapfile. - * - * Do not call this for folios which aren't in the page cache or swap cache. - */ -static inline struct address_space *folio_file_mapping(struct folio *folio) -{ - if (unlikely(folio_test_swapcache(folio))) - return swapcache_mapping(folio); - - return folio->mapping; -} - /** * folio_flush_mapping - Find the file mapping this folio belongs to. * @folio: The folio. From 8e4909d693224134fb14ae082c379168b43112c9 Mon Sep 17 00:00:00 2001 From: Suchit K Date: Sat, 15 Feb 2025 22:30:42 +0530 Subject: [PATCH 126/431] Documentation/mm: fix spelling mistake The word watermark was misspelled as "watemark". Link: https://lkml.kernel.org/r/CAO9wTFhe4sf1eVVgijt2cdLPPsUHBj7B=HN-380_JSpve5KbvQ@mail.gmail.com Signed-off-by: Suchit Cc: Shuah Khan Signed-off-by: Andrew Morton --- Documentation/mm/balance.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/mm/balance.rst b/Documentation/mm/balance.rst index abaa78561c31..c4962c89a7f5 100644 --- a/Documentation/mm/balance.rst +++ b/Documentation/mm/balance.rst @@ -81,7 +81,7 @@ Page stealing from process memory and shm is done if stealing the page would alleviate memory pressure on any zone in the page's node that has fallen below its watermark. -watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These +watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These are per-zone fields, used to determine when a zone needs to be balanced. When the number of pages falls below watermark[WMARK_MIN], the hysteric field low_on_memory gets set. This stays set till the number of free pages becomes From af3b45aac5c9d0bdaebf4e93e3fecddc3f363857 Mon Sep 17 00:00:00 2001 From: Ujwal Kundur Date: Sat, 15 Feb 2025 13:48:03 +0530 Subject: [PATCH 127/431] selftests/mm: fix spelling Fix misspelling flagged by codespell. Link: https://lkml.kernel.org/r/20250215081803.1793-1-ujwal.kundur@gmail.com Signed-off-by: Ujwal Kundur Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 31e0c8a3110d..5457a078690d 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -348,7 +348,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) /* * After initialization of area_src, we must explicitly release pages * for area_dst to make sure it's fully empty. Otherwise we could have - * some area_dst pages be errornously initialized with zero pages, + * some area_dst pages be erroneously initialized with zero pages, * hence we could hit memory corruption later in the test. * * One example is when THP is globally enabled, above allocate_area() From 86758b504864913233f6a16076184ba784cd4466 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 18 Feb 2025 15:49:54 +0530 Subject: [PATCH 128/431] mm/ioremap: pass pgprot_t to ioremap_prot() instead of unsigned long ioremap_prot() currently accepts pgprot_val parameter as an unsigned long, thus implicitly assuming that pgprot_val and pgprot_t could never be bigger than unsigned long. But this assumption soon will not be true on arm64 when using D128 pgtables. In 128 bit page table configuration, unsigned long is 64 bit, but pgprot_t is 128 bit. Passing platform abstracted pgprot_t argument is better as compared to size based data types. Let's change the parameter to directly pass pgprot_t like another similar helper generic_ioremap_prot(). Without this change in place, D128 configuration does not work on arm64 as the top 64 bits gets silently stripped when passing the protection value to this function. Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com Signed-off-by: Ryan Roberts Co-developed-by: Anshuman Khandual Signed-off-by: Anshuman Khandual Acked-by: Catalin Marinas [arm64] Signed-off-by: Andrew Morton --- arch/arc/mm/ioremap.c | 6 ++---- arch/arm64/include/asm/io.h | 6 +++--- arch/arm64/kernel/acpi.c | 2 +- arch/arm64/mm/ioremap.c | 3 +-- arch/csky/include/asm/io.h | 2 +- arch/loongarch/include/asm/io.h | 10 +++++----- arch/mips/include/asm/io.h | 8 ++++---- arch/mips/mm/ioremap.c | 4 ++-- arch/mips/mm/ioremap64.c | 4 ++-- arch/parisc/include/asm/io.h | 2 +- arch/parisc/mm/ioremap.c | 4 ++-- arch/powerpc/include/asm/io.h | 2 +- arch/powerpc/mm/ioremap.c | 4 ++-- arch/powerpc/platforms/ps3/spu.c | 4 ++-- arch/riscv/include/asm/io.h | 2 +- arch/riscv/kernel/acpi.c | 2 +- arch/s390/include/asm/io.h | 4 ++-- arch/s390/pci/pci.c | 4 ++-- arch/sh/boards/mach-landisk/setup.c | 2 +- arch/sh/boards/mach-lboxre2/setup.c | 2 +- arch/sh/boards/mach-sh03/setup.c | 2 +- arch/sh/include/asm/io.h | 2 +- arch/sh/mm/ioremap.c | 3 +-- arch/x86/include/asm/io.h | 2 +- arch/x86/mm/ioremap.c | 4 ++-- arch/xtensa/include/asm/io.h | 6 +++--- arch/xtensa/mm/ioremap.c | 4 ++-- include/asm-generic/io.h | 4 ++-- mm/ioremap.c | 4 ++-- mm/memory.c | 6 +++--- 30 files changed, 55 insertions(+), 59 deletions(-) diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index b07004d53267..fd8897a0e52c 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -32,7 +32,7 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size) return (void __iomem *)(u32)paddr; return ioremap_prot(paddr, size, - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); } EXPORT_SYMBOL(ioremap); @@ -44,10 +44,8 @@ EXPORT_SYMBOL(ioremap); * might need finer access control (R/W/X) */ void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, - unsigned long flags) + pgprot_t prot) { - pgprot_t prot = __pgprot(flags); - /* force uncached */ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); } diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 76ebbdc6ffdd..9b96840fb979 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -270,9 +270,9 @@ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook); #define _PAGE_IOREMAP PROT_DEVICE_nGnRE #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), PROT_NORMAL_NC) + ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) #define ioremap_np(addr, size) \ - ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE) + ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) /* * io{read,write}{16,32,64}be() macros @@ -293,7 +293,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size) if (pfn_is_map_memory(__phys_to_pfn(addr))) return (void __iomem *)__phys_to_virt(addr); - return ioremap_prot(addr, size, PROT_NORMAL); + return ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); } /* diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index e6f66491fbe9..b9a66fc146c9 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -379,7 +379,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) prot = __acpi_get_writethrough_mem_attribute(); } } - return ioremap_prot(phys, size, pgprot_val(prot)); + return ioremap_prot(phys, size, prot); } /* diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 6cc0b7e7eb03..10e246f11271 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -15,10 +15,9 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook) } void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t pgprot) { unsigned long last_addr = phys_addr + size - 1; - pgprot_t pgprot = __pgprot(prot); /* Don't allow outside PHYS_MASK */ if (last_addr & ~PHYS_MASK) diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index ed53f0b47388..536d3bf32ff1 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -36,7 +36,7 @@ */ #define ioremap_wc(addr, size) \ ioremap_prot((addr), (size), \ - (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) + __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)) #include diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index e77a56eaf906..eaff72b38dc8 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -23,9 +23,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); #ifdef CONFIG_ARCH_IOREMAP static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - switch (prot_val & _CACHE_MASK) { + switch (pgprot_val(prot) & _CACHE_MASK) { case _CACHE_CC: return (void __iomem *)(unsigned long)(CACHE_BASE + offset); case _CACHE_SUC: @@ -38,7 +38,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, } #define ioremap(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) + ioremap_prot((offset), (size), PAGE_KERNEL_SUC) #define iounmap(addr) ((void)(addr)) @@ -55,10 +55,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, */ #define ioremap_wc(offset, size) \ ioremap_prot((offset), (size), \ - pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) + wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC) #define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((offset), (size), PAGE_KERNEL) #define mmiowb() wmb() diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0bddb568af7c..4dacf40ebefd 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -126,7 +126,7 @@ static inline unsigned long isa_virt_to_bus(volatile void *address) } void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val); + pgprot_t prot); void iounmap(const volatile void __iomem *addr); /* @@ -141,7 +141,7 @@ void iounmap(const volatile void __iomem *addr); * address. */ #define ioremap(offset, size) \ - ioremap_prot((offset), (size), _CACHE_UNCACHED) + ioremap_prot((offset), (size), __pgprot(_CACHE_UNCACHED)) /* * ioremap_cache - map bus memory into CPU space @@ -159,7 +159,7 @@ void iounmap(const volatile void __iomem *addr); * memory-like regions on I/O busses. */ #define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), _page_cachable_default) + ioremap_prot((offset), (size), __pgprot(_page_cachable_default)) /* * ioremap_wc - map bus memory into CPU space @@ -180,7 +180,7 @@ void iounmap(const volatile void __iomem *addr); * _CACHE_UNCACHED option (see cpu_probe() method). */ #define ioremap_wc(offset, size) \ - ioremap_prot((offset), (size), boot_cpu_data.writecombine) + ioremap_prot((offset), (size), __pgprot(boot_cpu_data.writecombine)) #if defined(CONFIG_CPU_CAVIUM_OCTEON) #define war_io_reorder_wmb() wmb() diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index d8243d61ef32..c6c4576cd4a8 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -44,9 +44,9 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, * ioremap_prot gives the caller control over cache coherency attributes (CCA) */ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; unsigned long offset, pfn, last_pfn; struct vm_struct *area; phys_addr_t last_addr; diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c index 15e7820d6a5f..acc03ba20098 100644 --- a/arch/mips/mm/ioremap64.c +++ b/arch/mips/mm/ioremap64.c @@ -3,9 +3,9 @@ #include void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { - unsigned long flags = prot_val & _CACHE_MASK; + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE); void __iomem *addr; diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 3143cf29ce27..04b783e2a6d1 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -131,7 +131,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) _PAGE_ACCESSED | _PAGE_NO_CACHE) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), _PAGE_IOREMAP) + ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP)) #define pci_iounmap pci_iounmap diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c index fd996472dfe7..0b65c4b3baee 100644 --- a/arch/parisc/mm/ioremap.c +++ b/arch/parisc/mm/ioremap.c @@ -14,7 +14,7 @@ #include void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { #ifdef CONFIG_EISA unsigned long end = phys_addr + size - 1; @@ -41,6 +41,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, } } - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index fd92ac450169..0436cdc7cfcc 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -895,7 +895,7 @@ void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_cache(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((addr), (size), PAGE_KERNEL) #define iounmap iounmap diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 7b0afcabd89f..0d6615620ada 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -41,9 +41,9 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) return __ioremap_caller(addr, size, prot, caller); } -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot) { - pte_t pte = __pte(flags); + pte_t pte = __pte(pgprot_val(prot)); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 4a2520ec6d7f..61b37c9400b2 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu) static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, - sizeof(struct spe_shadow), shadow_flags); + sizeof(struct spe_shadow), + pgprot_noncached_wc(PAGE_KERNEL_RO)); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 1c5c641075d2..0536846db9b6 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -137,7 +137,7 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) #ifdef CONFIG_MMU #define arch_memremap_wb(addr, size) \ - ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) + ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL))) #endif #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c index 2fd29695a788..3f6d5a6789e8 100644 --- a/arch/riscv/kernel/acpi.c +++ b/arch/riscv/kernel/acpi.c @@ -305,7 +305,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } } - return ioremap_prot(phys, size, pgprot_val(prot)); + return ioremap_prot(phys, size, prot); } #ifdef CONFIG_PCI diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index fc9933a743d6..82f1043a4fc3 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -33,9 +33,9 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) #define ioremap_wc(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) + ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL)) #define ioremap_wt(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) + ioremap_prot((addr), (size), pgprot_writethrough(PAGE_KERNEL)) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 88f72745fa59..9fdcd733d40e 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -255,7 +255,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { /* * When PCI MIO instructions are unavailable the "physical" address @@ -265,7 +265,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, if (!static_branch_unlikely(&have_mio)) return (void __iomem *)phys_addr; - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 2c44b94f82fb..1b3f43c3ac46 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -58,7 +58,7 @@ static int __init landisk_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c index 20d01b430f2a..e95bde207adb 100644 --- a/arch/sh/boards/mach-lboxre2/setup.c +++ b/arch/sh/boards/mach-lboxre2/setup.c @@ -53,7 +53,7 @@ static int __init lboxre2_devices_setup(void) paddrbase = virt_to_phys((void*)PA_AREA5_IO); psize = PAGE_SIZE; prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot)); + cf0_io_base = (u32)ioremap_prot(paddrbase, psize, prot); if (!cf0_io_base) { printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); return -ENOMEM; diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c index 3901b6031ad5..5c9312f334d3 100644 --- a/arch/sh/boards/mach-sh03/setup.c +++ b/arch/sh/boards/mach-sh03/setup.c @@ -75,7 +75,7 @@ static int __init sh03_devices_setup(void) /* open I/O area window */ paddrbase = virt_to_phys((void *)PA_AREA5_IO); prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); if (!cf_ide_base) { printk("allocate_cf_area : can't open CF I/O window!\n"); return -ENOMEM; diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index cf5eab840d57..531ec49b878d 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -299,7 +299,7 @@ unsigned long long poke_real_address_q(unsigned long long addr, #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) #define ioremap_cache(addr, size) \ - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) + ioremap_prot((addr), (size), PAGE_KERNEL) #endif /* CONFIG_MMU */ #include diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 33d20f34560f..5bbde53fb32d 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -73,10 +73,9 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) #endif /* CONFIG_29BIT */ void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t pgprot) { void __iomem *mapped; - pgprot_t pgprot = __pgprot(prot); mapped = __ioremap_trapped(phys_addr, size); if (mapped) diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index ed580c7f9d0a..0794936ec187 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -170,7 +170,7 @@ extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); #define ioremap_cache ioremap_cache -extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot); #define ioremap_prot ioremap_prot extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size); #define ioremap_encrypted ioremap_encrypted diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 38ff7791a9c7..d501f0871aa5 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -440,10 +440,10 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) EXPORT_SYMBOL(ioremap_cache); void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, - unsigned long prot_val) + pgprot_t prot) { return __ioremap_caller(phys_addr, size, - pgprot2cachemode(__pgprot(prot_val)), + pgprot2cachemode(prot), __builtin_return_address(0), false); } EXPORT_SYMBOL(ioremap_prot); diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 934e58399c8c..7cdcc2deab3e 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -29,7 +29,7 @@ * I/O memory mapping functions. */ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot); + pgprot_t prot); #define ioremap_prot ioremap_prot #define iounmap iounmap @@ -40,7 +40,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); else return ioremap_prot(offset, size, - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); } #define ioremap ioremap @@ -51,7 +51,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); else - return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); + return ioremap_prot(offset, size, PAGE_KERNEL); } #define ioremap_cache ioremap_cache diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c index 8ca660b7ab49..26f238fa9d0d 100644 --- a/arch/xtensa/mm/ioremap.c +++ b/arch/xtensa/mm/ioremap.c @@ -11,12 +11,12 @@ #include void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { unsigned long pfn = __phys_to_pfn((phys_addr)); WARN_ON(pfn_valid(pfn)); - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index a5cbbf3e26ec..402020b23423 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1111,7 +1111,7 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, pgprot_t prot); void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot); + pgprot_t prot); void iounmap(volatile void __iomem *addr); void generic_iounmap(volatile void __iomem *addr); @@ -1120,7 +1120,7 @@ void generic_iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) { /* _PAGE_IOREMAP needs to be supplied by the architecture */ - return ioremap_prot(addr, size, _PAGE_IOREMAP); + return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP)); } #endif #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ diff --git a/mm/ioremap.c b/mm/ioremap.c index 3e049dfb28bd..c36dd9f62fd5 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -50,9 +50,9 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, #ifndef ioremap_prot void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, - unsigned long prot) + pgprot_t prot) { - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); + return generic_ioremap_prot(phys_addr, size, prot); } EXPORT_SYMBOL(ioremap_prot); #endif diff --git a/mm/memory.c b/mm/memory.c index 39bceed7448f..270c9357475d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6727,7 +6727,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; - unsigned long prot = 0; + pgprot_t prot = __pgprot(0); void __iomem *maddr; int offset = offset_in_page(addr); int ret = -EINVAL; @@ -6737,7 +6737,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, retry: if (follow_pfnmap_start(&args)) return -EINVAL; - prot = pgprot_val(args.pgprot); + prot = args.pgprot; phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; writable = args.writable; follow_pfnmap_end(&args); @@ -6752,7 +6752,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, if (follow_pfnmap_start(&args)) goto out_unmap; - if ((prot != pgprot_val(args.pgprot)) || + if ((pgprot_val(prot) != pgprot_val(args.pgprot)) || (phys_addr != (args.pfn << PAGE_SHIFT)) || (writable != args.writable)) { follow_pfnmap_end(&args); From 381ff0341ac63d245035f274cacd3f1b8068d388 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 18 Feb 2025 14:37:04 -0800 Subject: [PATCH 129/431] Docs/mm/damon/design: fix typo on DAMOS filters usage doc link Patch series "Docs/mm/damon: misc DAMOS filters documentation fixes and improves". Fix and improve DAMOS filters documentation by fixing a copy-paste typo, adding hugepage_size filter documentation on design doc, moving logic details from usage to design, clarify DAMOS filters handling sequence based on handling layer, and re-organizing the filters type list for easier understanding of the handling sequence. This patch (of 5): The link from DAMOS filters design doc to usage doc has a typo calling filters as watermarks. Fix it. Link: https://lkml.kernel.org/r/20250218223708.53437-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250218223708.53437-2-sj@kernel.org Fixes: d31f5626a0e1 ("Docs/mm/damon/design: add links to sections of DAMON sysfs interface usage doc") Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index e28c6a1b40ae..12ae7e1209c8 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -617,7 +617,7 @@ Below ``type`` of filters are currently supported. - Applied to pages that belonging to a given DAMON monitoring target. - Handled by the core logic. -To know how user-space can set the watermarks via :ref:`DAMON sysfs interface +To know how user-space can set the filters via :ref:`DAMON sysfs interface `, refer to :ref:`filters ` part of the documentation. From e52a942b47c8b32072e7f342aca600016bcfca33 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 18 Feb 2025 14:37:05 -0800 Subject: [PATCH 130/431] Docs/mm/damon/design: document hugepage_size filter 'hugepage_size' DAMOS filter type is not documented on the design doc. Add a description of the type. Link: https://lkml.kernel.org/r/20250218223708.53437-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 12ae7e1209c8..a959c081bc59 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -610,6 +610,9 @@ Below ``type`` of filters are currently supported. - Applied to pages that are accessed after the last access check from the scheme. - Handled by operations set layer. Supported by only ``paddr`` set. +- pages that managed in a given size range + - Applied to pages that managed in a given size range. + - Handled by operations set layer. Supported by only ``paddr`` set. - address range - Applied to pages that belonging to a given address range. - Handled by the core logic. From 0f28583b28d84a5eaaf299e01b7301922ae8da46 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 18 Feb 2025 14:37:06 -0800 Subject: [PATCH 131/431] Docs/damon: move DAMOS filter type names and meaning to design doc DAMON sysfs usage doc is describing DAMOS filter type names and their meanings in short. The design doc is providing the short meaning and detailed descriptions, too. This is unnecessary duplicates and confuses where to document new DAMOS filter types and features. Move the details from usage to design doc. Link: https://lkml.kernel.org/r/20250218223708.53437-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 26 +++++++++----------- Documentation/mm/damon/design.rst | 12 ++++----- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 51af66c208c5..dc37bba96273 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -408,21 +408,19 @@ in the numeric order. Each filter directory contains nine files, namely ``type``, ``matching``, ``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, ``min``, ``max`` -and ``target_idx``. To ``type`` file, you can write one of six special -keywords: ``anon`` for anonymous pages, ``memcg`` for specific memory cgroup, -``young`` for young pages, ``addr`` for specific address range (an open-ended -interval), ``hugepage_size`` for large folios of a specific size range [``min``, -``max``] or ``target`` for specific DAMON monitoring target filtering. Meaning -of the types are same to the description on the :ref:`design doc -`. +and ``target_idx``. To ``type`` file, you can write the type of the filter. +Refer to :ref:`the design doc ` for available type +names and their meanings. -In case of the memory cgroup filtering, you can specify the memory cgroup of -the interest by writing the path of the memory cgroup from the cgroups mount -point to ``memcg_path`` file. In case of the address range filtering, you can -specify the start and end address of the range to ``addr_start`` and -``addr_end`` files, respectively. For the DAMON monitoring target filtering, -you can specify the index of the target between the list of the DAMON context's -monitoring targets list to ``target_idx`` file. +For ``memcg`` type, you can specify the memory cgroup of the interest by +writing the path of the memory cgroup from the cgroups mount point to +``memcg_path`` file. For ``addr`` type, you can specify the start and end +address of the range (open-ended interval) to ``addr_start`` and ``addr_end`` +files, respectively. For ``hugepage_size`` type, you can specify the minimum +and maximum size of the range (closed interval) to ``min`` and ``max`` files, +respectively. For ``target`` type, you can specify the index of the target +between the list of the DAMON context's monitoring targets list to +``target_idx`` file. You can write ``Y`` or ``N`` to ``matching`` file to specify whether the filter is for memory that matches the ``type``. You can write ``Y`` or ``N`` to diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index a959c081bc59..7360e5ac0d06 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -600,23 +600,23 @@ counted as the scheme has tried. This difference affects the statistics. Below ``type`` of filters are currently supported. -- anonymous page +- anon - Applied to pages that containing data that not stored in files. - Handled by operations set layer. Supported by only ``paddr`` set. -- memory cgroup +- memcg - Applied to pages that belonging to a given cgroup. - Handled by operations set layer. Supported by only ``paddr`` set. -- young page +- young - Applied to pages that are accessed after the last access check from the scheme. - Handled by operations set layer. Supported by only ``paddr`` set. -- pages that managed in a given size range +- hugepage_size - Applied to pages that managed in a given size range. - Handled by operations set layer. Supported by only ``paddr`` set. -- address range +- addr - Applied to pages that belonging to a given address range. - Handled by the core logic. -- DAMON monitoring target +- target - Applied to pages that belonging to a given DAMON monitoring target. - Handled by the core logic. From 4a4d8e792506432270e27516cf03a8208cbbec8b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 18 Feb 2025 14:37:07 -0800 Subject: [PATCH 132/431] Docs/mm/damon/design: clarify handling layer based filters evaluation sequence If an element of memory matches a DAMOS filter, filters that installed after that get no chance to make any effect to the element. Hence in what order DAMOS filters are handled is important, if both allow filters and reject filters are used together. The ordering is affected by both the installation order and which layter the filters are handled. The design document is not clearly documenting the latter part. Clarify it on the design doc. Link: https://lkml.kernel.org/r/20250218223708.53437-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 7360e5ac0d06..8b9727d91434 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -569,11 +569,21 @@ number of filters for each scheme. Each filter specifies - whether it is to allow (include) or reject (exclude) applying the scheme's action to the memory (``allow``). -When multiple filters are installed, each filter is evaluated in the installed -order. If a part of memory is matched to one of the filter, next filters are -ignored. If the memory passes through the filters evaluation stage because it -is not matched to any of the filters, applying the scheme's action to it is -allowed, same to the behavior when no filter exists. +For efficient handling of filters, some types of filters are handled by the +core layer, while others are handled by operations set. In the latter case, +hence, support of the filter types depends on the DAMON operations set. In +case of the core layer-handled filters, the memory regions that excluded by the +filter are not counted as the scheme has tried to the region. In contrast, if +a memory regions is filtered by an operations set layer-handled filter, it is +counted as the scheme has tried. This difference affects the statistics. + +When multiple filters are installed, the group of filters that handled by the +core layer are evaluated first. After that, the group of filters that handled +by the operations layer are evaluated. Filters in each of the groups are +evaluated in the installed order. If a part of memory is matched to one of the +filter, next filters are ignored. If the memory passes through the filters +evaluation stage because it is not matched to any of the filters, applying the +scheme's action to it is allowed, same to the behavior when no filter exists. For example, let's assume 1) a filter for allowing anonymous pages and 2) another filter for rejecting young pages are installed in the order. If a page @@ -590,14 +600,6 @@ filter-allowed or filters evaluation stage passed. It means that installing allow-filters at the end of the list makes no practical change but only filters-checking overhead. -For efficient handling of filters, some types of filters are handled by the -core layer, while others are handled by operations set. In the latter case, -hence, support of the filter types depends on the DAMON operations set. In -case of the core layer-handled filters, the memory regions that excluded by the -filter are not counted as the scheme has tried to the region. In contrast, if -a memory regions is filtered by an operations set layer-handled filter, it is -counted as the scheme has tried. This difference affects the statistics. - Below ``type`` of filters are currently supported. - anon From edab6ffd792a7774e7d5fd7eb1d6251e452010f5 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 18 Feb 2025 14:37:08 -0800 Subject: [PATCH 133/431] Docs/mm/damon/design: categorize DAMOS filter types based on handling layer On what DAMON layer a DAMOS filter is handled is important to expect in what order filters will be evaluated. Re-organize the DAMOS filter types list on the design doc to categorize types based on the handling layer, to let users more easily understand the handling order. Link: https://lkml.kernel.org/r/20250218223708.53437-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 34 ++++++++++++++----------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 8b9727d91434..6a66aa0833fd 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -602,25 +602,21 @@ filters-checking overhead. Below ``type`` of filters are currently supported. -- anon - - Applied to pages that containing data that not stored in files. - - Handled by operations set layer. Supported by only ``paddr`` set. -- memcg - - Applied to pages that belonging to a given cgroup. - - Handled by operations set layer. Supported by only ``paddr`` set. -- young - - Applied to pages that are accessed after the last access check from the - scheme. - - Handled by operations set layer. Supported by only ``paddr`` set. -- hugepage_size - - Applied to pages that managed in a given size range. - - Handled by operations set layer. Supported by only ``paddr`` set. -- addr - - Applied to pages that belonging to a given address range. - - Handled by the core logic. -- target - - Applied to pages that belonging to a given DAMON monitoring target. - - Handled by the core logic. +- Core layer handled + - addr + - Applied to pages that belonging to a given address range. + - target + - Applied to pages that belonging to a given DAMON monitoring target. +- Operations layer handled, supported by only ``paddr`` operations set. + - anon + - Applied to pages that containing data that not stored in files. + - memcg + - Applied to pages that belonging to a given cgroup. + - young + - Applied to pages that are accessed after the last access check from the + scheme. + - hugepage_size + - Applied to pages that managed in a given size range. To know how user-space can set the filters via :ref:`DAMON sysfs interface `, refer to :ref:`filters ` part of the From 7365ff2c8eef4ea50b5f3ae2349fa180e3782ef1 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:02 +0000 Subject: [PATCH 134/431] mm/cma: export total and free number of pages for CMA areas Patch series "hugetlb/CMA improvements for large systems", v5. On large systems, we observed some issues with hugetlb and CMA: 1) When specifying a large number of hugetlb boot pages (hugepages= on the commandline), the kernel may run out of memory before it even gets to HVO. For example, if you have a 3072G system, and want to use 3024 1G hugetlb pages for VMs, that should leave you plenty of space for the hypervisor, provided you have the hugetlb vmemmap optimization (HVO) enabled. However, since the vmemmap pages are always allocated first, and then later in boot freed, you will actually run yourself out of memory before you can do HVO. This means not getting all the hugetlb pages you want, and worse, failure to boot if there is an allocation failure in the system from which it can't recover. 2) There is a system setup where you might want to use hugetlb_cma with a large value (say, again, 3024 out of 3072G like above), and then lower that if system usage allows it, to make room for non-hugetlb processes. For this, a variation of the problem above applies: the kernel runs out of unmovable space to allocate from before you finish boot, since your CMA area takes up all the space. 3) CMA wants to use one big contiguous area for allocations. Which fails if you have the aforementioned 3T system with a gap in the middle of physical memory (like the < 40bits BIOS DMA area seen on some AMD systems). You then won't be able to set up a CMA area for one of the NUMA nodes, leading to loss of half of your hugetlb CMA area. 4) Under the scenario mentioned in 2), when trying to grow the number of hugetlb pages after dropping it for a while, new CMA allocations may fail occasionally. This is not unexpected, some transient references on pages may prevent cma_alloc from succeeding under memory pressure. However, the hugetlb code then falls back to a normal contiguous alloc, which may end up succeeding. This is not always desired behavior. If you have a large CMA area, then the kernel has a restricted amount of memory it can do unmovable allocations from (a well known issue). A normal contiguous alloc may eat further in to this space. To resolve these issues, do the following: * Add hooks to the section init code to do custom initialization of memmap pages. Hugetlb bootmem (memblock) allocated pages can then be pre-HVOed. This avoids allocating a large number of vmemmap pages early in boot, only to have them be freed again later, and also avoids running out of memory as described under 1). Using these hooks for hugetlb is optional. It requires moving hugetlb bootmem allocation to an earlier spot by the architecture. This has been enabled on x86. * hugetlb_cma doesn't care about the CMA area it uses being one large contiguous range. Multiple smaller ranges are fine. The only requirements are that the areas should be on one NUMA node, and individual gigantic pages should be allocatable from them. So, implement multi-range support for CMA, avoiding issue 3). * Introduce a hugetlb_cma_only option on the commandline. This only allows allocations from CMA for gigantic pages, if hugetlb_cma= is also specified. * With hugetlb_cma_only active, it also makes sense to be able to pre-allocate gigantic hugetlb pages at boot time from the CMA area(s). Add a rudimentary early CMA allocation interface, that just grabs a piece of memblock-allocated space from the CMA area, which gets marked as allocated in the CMA bitmap when the CMA area is initialized. With this, hugepages= can be supported with hugetlb_cma=, making scenario 2) work. Additionally, fix some minor bugs, with one worth mentioning: since hugetlb gigantic bootmem pages are allocated by memblock, they may span multiple zones, as memblock doesn't (and mostly can't) know about zones. This can cause problems. A hugetlb page spanning multiple zones is bad, and it's worse with HVO, when the de-HVO step effectively sneakily re-assigns pages to a different zone than originally configured, since the tail pages all inherit the zone from the first 60 tail pages. This condition is not common, but can be easily reproduced using ZONE_MOVABLE. To fix this, add checks to see if gigantic bootmem pages intersect with multiple zones, and do not use them if they do, giving them back to the page allocator instead. The first patch is kind of along for the ride, except that maintaining an available_count for a CMA area is convenient for the multiple range support. This patch (of 27): In addition to the number of allocations and releases, system management software may like to be aware of the size of CMA areas, and how many pages are available in it. This information is currently not available, so export it in total_page and available_pages, respectively. The name 'available_pages' was picked over 'free_pages' because 'free' implies that the pages are unused. But they might not be, they just haven't been used by cma_alloc The number of available pages is tracked regardless of CONFIG_CMA_SYSFS, allowing for a few minor shortcuts in the code, avoiding bitmap operations. Link: https://lkml.kernel.org/r/20250228182928.2645936-2-fvdl@google.com Signed-off-by: Frank van der Linden Reviewed-by: Oscar Salvador Cc: David Hildenbrand Cc: Joao Martins Cc: Muchun Song Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Yu Zhao Cc: Zi Yan Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: Heiko Carstens Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Peter Zijlstra Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-cma | 13 +++++++++++ mm/cma.c | 22 ++++++++++++++----- mm/cma.h | 1 + mm/cma_debug.c | 5 +---- mm/cma_sysfs.c | 20 +++++++++++++++++ 5 files changed, 51 insertions(+), 10 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma index dfd755201142..aaf2a5d8b13b 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-cma +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma @@ -29,3 +29,16 @@ Date: Feb 2024 Contact: Anshuman Khandual Description: the number of pages CMA API succeeded to release + +What: /sys/kernel/mm/cma//total_pages +Date: Jun 2024 +Contact: Frank van der Linden +Description: + The size of the CMA area in pages. + +What: /sys/kernel/mm/cma//available_pages +Date: Jun 2024 +Contact: Frank van der Linden +Description: + The number of pages in the CMA area that are still + available for CMA allocation. diff --git a/mm/cma.c b/mm/cma.c index de5bc0c81fc2..95a8788e54d3 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -86,6 +86,7 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, spin_lock_irqsave(&cma->lock, flags); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); + cma->available_count += count; spin_unlock_irqrestore(&cma->lock, flags); } @@ -133,7 +134,7 @@ static void __init cma_activate_area(struct cma *cma) free_reserved_page(pfn_to_page(pfn)); } totalcma_pages -= cma->count; - cma->count = 0; + cma->available_count = cma->count = 0; pr_err("CMA area %s could not be activated\n", cma->name); } @@ -206,7 +207,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); cma->base_pfn = PFN_DOWN(base); - cma->count = size >> PAGE_SHIFT; + cma->available_count = cma->count = size >> PAGE_SHIFT; cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; @@ -390,7 +391,7 @@ static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; unsigned long start = 0; - unsigned long nr_part, nr_total = 0; + unsigned long nr_part; unsigned long nbits = cma_bitmap_maxno(cma); spin_lock_irq(&cma->lock); @@ -402,12 +403,12 @@ static void cma_debug_show_areas(struct cma *cma) next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); nr_zero = next_set_bit - next_zero_bit; nr_part = nr_zero << cma->order_per_bit; - pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, + pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, next_zero_bit); - nr_total += nr_part; start = next_zero_bit + nr_zero; } - pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); + pr_cont("=> %lu free of %lu total pages\n", cma->available_count, + cma->count); spin_unlock_irq(&cma->lock); } @@ -444,6 +445,14 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, for (;;) { spin_lock_irq(&cma->lock); + /* + * If the request is larger than the available number + * of pages, stop right away. + */ + if (count > cma->available_count) { + spin_unlock_irq(&cma->lock); + break; + } bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); @@ -452,6 +461,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); + cma->available_count -= count; /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the diff --git a/mm/cma.h b/mm/cma.h index 8485ef893e99..3dd3376ae980 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -13,6 +13,7 @@ struct cma_kobject { struct cma { unsigned long base_pfn; unsigned long count; + unsigned long available_count; unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 602fff89b15f..89236f22230a 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -34,13 +34,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n"); static int cma_used_get(void *data, u64 *val) { struct cma *cma = data; - unsigned long used; spin_lock_irq(&cma->lock); - /* pages counter is smaller than sizeof(int) */ - used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); + *val = cma->count - cma->available_count; spin_unlock_irq(&cma->lock); - *val = (u64)used << cma->order_per_bit; return 0; } diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c index f50db3973171..97acd3e5a6a5 100644 --- a/mm/cma_sysfs.c +++ b/mm/cma_sysfs.c @@ -62,6 +62,24 @@ static ssize_t release_pages_success_show(struct kobject *kobj, } CMA_ATTR_RO(release_pages_success); +static ssize_t total_pages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%lu\n", cma->count); +} +CMA_ATTR_RO(total_pages); + +static ssize_t available_pages_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct cma *cma = cma_from_kobj(kobj); + + return sysfs_emit(buf, "%lu\n", cma->available_count); +} +CMA_ATTR_RO(available_pages); + static void cma_kobj_release(struct kobject *kobj) { struct cma *cma = cma_from_kobj(kobj); @@ -75,6 +93,8 @@ static struct attribute *cma_attrs[] = { &alloc_pages_success_attr.attr, &alloc_pages_fail_attr.attr, &release_pages_success_attr.attr, + &total_pages_attr.attr, + &available_pages_attr.attr, NULL, }; ATTRIBUTE_GROUPS(cma); From c009da4258f9885c5a3749fc004870db9c0e7a99 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:03 +0000 Subject: [PATCH 135/431] mm, cma: support multiple contiguous ranges, if requested Currently, CMA manages one range of physically contiguous memory. Creation of larger CMA areas with hugetlb_cma may run in to gaps in physical memory, so that they are not able to allocate that contiguous physical range from memblock when creating the CMA area. This can happen, for example, on an AMD system with > 1TB of memory, where there will be a gap just below the 1TB (40bit DMA) line. If you have set aside most of memory for potential hugetlb CMA allocation, cma_declare_contiguous_nid will fail. hugetlb_cma doesn't need the entire area to be one physically contiguous range. It just cares about being able to get physically contiguous chunks of a certain size (e.g. 1G), and it is fine to have the CMA area backed by multiple physical ranges, as long as it gets 1G contiguous allocations. Multi-range support is implemented by introducing an array of ranges, instead of just one big one. Each range has its own bitmap. Effectively, the allocate and release operations work as before, just per-range. So, instead of going through one large bitmap, they now go through a number of smaller ones. The maximum number of supported ranges is 8, as defined in CMA_MAX_RANGES. Since some current users of CMA expect a CMA area to just use one physically contiguous range, only allow for multiple ranges if a new interface, cma_declare_contiguous_nid_multi, is used. The other interfaces will work like before, creating only CMA areas with 1 range. cma_declare_contiguous_nid_multi works as follows, mimicking the default "bottom-up, above 4G" reservation approach: 0) Try cma_declare_contiguous_nid, which will use only one region. If this succeeds, return. This makes sure that for all the cases that currently work, the behavior remains unchanged even if the caller switches from cma_declare_contiguous_nid to cma_declare_contiguous_nid_multi. 1) Select the largest free memblock ranges above 4G, with a maximum number of CMA_MAX_RANGES. 2) If we did not find at most CMA_MAX_RANGES that add up to the total size requested, return -ENOMEM. 3) Sort the selected ranges by base address. 4) Reserve them bottom-up until we get what we wanted. Link: https://lkml.kernel.org/r/20250228182928.2645936-3-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Arnd Bergmann Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/cma_debugfs.rst | 10 +- include/linux/cma.h | 3 + mm/cma.c | 596 +++++++++++++++---- mm/cma.h | 27 +- mm/cma_debug.c | 56 +- 5 files changed, 551 insertions(+), 141 deletions(-) diff --git a/Documentation/admin-guide/mm/cma_debugfs.rst b/Documentation/admin-guide/mm/cma_debugfs.rst index 7367e6294ef6..4120e9cb0cd5 100644 --- a/Documentation/admin-guide/mm/cma_debugfs.rst +++ b/Documentation/admin-guide/mm/cma_debugfs.rst @@ -12,10 +12,16 @@ its CMA name like below: The structure of the files created under that directory is as follows: - - [RO] base_pfn: The base PFN (Page Frame Number) of the zone. + - [RO] base_pfn: The base PFN (Page Frame Number) of the CMA area. + This is the same as ranges/0/base_pfn. - [RO] count: Amount of memory in the CMA area. - [RO] order_per_bit: Order of pages represented by one bit. - - [RO] bitmap: The bitmap of page states in the zone. + - [RO] bitmap: The bitmap of allocated pages in the area. + This is the same as ranges/0/base_pfn. + - [RO] ranges/N/base_pfn: The base PFN of contiguous range N + in the CMA area. + - [RO] ranges/N/bitmap: The bit map of allocated pages in + range N in the CMA area. - [WO] alloc: Allocate N pages from that CMA area. For example:: echo 5 > /cma//alloc diff --git a/include/linux/cma.h b/include/linux/cma.h index d15b64f51336..863427c27dc2 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -40,6 +40,9 @@ static inline int __init cma_declare_contiguous(phys_addr_t base, return cma_declare_contiguous_nid(base, size, limit, alignment, order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); } +extern int __init cma_declare_contiguous_multi(phys_addr_t size, + phys_addr_t align, unsigned int order_per_bit, + const char *name, struct cma **res_cma, int nid); extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, diff --git a/mm/cma.c b/mm/cma.c index 95a8788e54d3..34caa6b29c99 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -35,9 +36,16 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned int cma_area_count; static DEFINE_MUTEX(cma_mutex); +static int __init __cma_declare_contiguous_nid(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid); + phys_addr_t cma_get_base(const struct cma *cma) { - return PFN_PHYS(cma->base_pfn); + WARN_ON_ONCE(cma->nranges != 1); + return PFN_PHYS(cma->ranges[0].base_pfn); } unsigned long cma_get_size(const struct cma *cma) @@ -63,9 +71,10 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, * The value returned is represented in order_per_bits. */ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, + const struct cma_memrange *cmr, unsigned int align_order) { - return (cma->base_pfn & ((1UL << align_order) - 1)) + return (cmr->base_pfn & ((1UL << align_order) - 1)) >> cma->order_per_bit; } @@ -75,46 +84,57 @@ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; } -static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, - unsigned long count) +static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr, + unsigned long pfn, unsigned long count) { unsigned long bitmap_no, bitmap_count; unsigned long flags; - bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; + bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); spin_lock_irqsave(&cma->lock, flags); - bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); + bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count); cma->available_count += count; spin_unlock_irqrestore(&cma->lock, flags); } static void __init cma_activate_area(struct cma *cma) { - unsigned long base_pfn = cma->base_pfn, pfn; + unsigned long pfn, base_pfn; + int allocrange, r; struct zone *zone; + struct cma_memrange *cmr; - cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); - if (!cma->bitmap) - goto out_error; - - /* - * alloc_contig_range() requires the pfn range specified to be in the - * same zone. Simplify by forcing the entire CMA resv range to be in the - * same zone. - */ - WARN_ON_ONCE(!pfn_valid(base_pfn)); - zone = page_zone(pfn_to_page(base_pfn)); - for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { - WARN_ON_ONCE(!pfn_valid(pfn)); - if (page_zone(pfn_to_page(pfn)) != zone) - goto not_in_zone; + for (allocrange = 0; allocrange < cma->nranges; allocrange++) { + cmr = &cma->ranges[allocrange]; + cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr), + GFP_KERNEL); + if (!cmr->bitmap) + goto cleanup; } - for (pfn = base_pfn; pfn < base_pfn + cma->count; - pfn += pageblock_nr_pages) - init_cma_reserved_pageblock(pfn_to_page(pfn)); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + base_pfn = cmr->base_pfn; + + /* + * alloc_contig_range() requires the pfn range specified + * to be in the same zone. Simplify by forcing the entire + * CMA resv range to be in the same zone. + */ + WARN_ON_ONCE(!pfn_valid(base_pfn)); + zone = page_zone(pfn_to_page(base_pfn)); + for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) { + WARN_ON_ONCE(!pfn_valid(pfn)); + if (page_zone(pfn_to_page(pfn)) != zone) + goto cleanup; + } + + for (pfn = base_pfn; pfn < base_pfn + cmr->count; + pfn += pageblock_nr_pages) + init_cma_reserved_pageblock(pfn_to_page(pfn)); + } spin_lock_init(&cma->lock); @@ -125,13 +145,19 @@ static void __init cma_activate_area(struct cma *cma) return; -not_in_zone: - bitmap_free(cma->bitmap); -out_error: +cleanup: + for (r = 0; r < allocrange; r++) + bitmap_free(cma->ranges[r].bitmap); + /* Expose all pages to the buddy, they are useless for CMA. */ if (!cma->reserve_pages_on_error) { - for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) - free_reserved_page(pfn_to_page(pfn)); + for (r = 0; r < allocrange; r++) { + cmr = &cma->ranges[r]; + for (pfn = cmr->base_pfn; + pfn < cmr->base_pfn + cmr->count; + pfn++) + free_reserved_page(pfn_to_page(pfn)); + } } totalcma_pages -= cma->count; cma->available_count = cma->count = 0; @@ -154,6 +180,43 @@ void __init cma_reserve_pages_on_error(struct cma *cma) cma->reserve_pages_on_error = true; } +static int __init cma_new_area(const char *name, phys_addr_t size, + unsigned int order_per_bit, + struct cma **res_cma) +{ + struct cma *cma; + + if (cma_area_count == ARRAY_SIZE(cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &cma_areas[cma_area_count]; + cma_area_count++; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, "%s", name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->available_count = cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + totalcma_pages += cma->count; + + return 0; +} + +static void __init cma_drop_area(struct cma *cma) +{ + totalcma_pages -= cma->count; + cma_area_count--; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -172,13 +235,9 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma) { struct cma *cma; + int ret; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; @@ -195,27 +254,263 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) return -EINVAL; - /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. - */ - cma = &cma_areas[cma_area_count]; + ret = cma_new_area(name, size, order_per_bit, &cma); + if (ret != 0) + return ret; - if (name) - snprintf(cma->name, CMA_MAX_NAME, name); - else - snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + cma->ranges[0].base_pfn = PFN_DOWN(base); + cma->ranges[0].count = cma->count; + cma->nranges = 1; - cma->base_pfn = PFN_DOWN(base); - cma->available_count = cma->count = size >> PAGE_SHIFT; - cma->order_per_bit = order_per_bit; *res_cma = cma; - cma_area_count++; - totalcma_pages += cma->count; return 0; } +/* + * Structure used while walking physical memory ranges and finding out + * which one(s) to use for a CMA area. + */ +struct cma_init_memrange { + phys_addr_t base; + phys_addr_t size; + struct list_head list; +}; + +/* + * Work array used during CMA initialization. + */ +static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata; + +static bool __init revsizecmp(struct cma_init_memrange *mlp, + struct cma_init_memrange *mrp) +{ + return mlp->size > mrp->size; +} + +static bool __init basecmp(struct cma_init_memrange *mlp, + struct cma_init_memrange *mrp) +{ + return mlp->base < mrp->base; +} + +/* + * Helper function to create sorted lists. + */ +static void __init list_insert_sorted( + struct list_head *ranges, + struct cma_init_memrange *mrp, + bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh)) +{ + struct list_head *mp; + struct cma_init_memrange *mlp; + + if (list_empty(ranges)) + list_add(&mrp->list, ranges); + else { + list_for_each(mp, ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + if (cmp(mlp, mrp)) + break; + } + __list_add(&mrp->list, mlp->list.prev, &mlp->list); + } +} + +/* + * Create CMA areas with a total size of @total_size. A normal allocation + * for one area is tried first. If that fails, the biggest memblock + * ranges above 4G are selected, and allocated bottom up. + * + * The complexity here is not great, but this function will only be + * called during boot, and the lists operated on have fewer than + * CMA_MAX_RANGES elements (default value: 8). + */ +int __init cma_declare_contiguous_multi(phys_addr_t total_size, + phys_addr_t align, unsigned int order_per_bit, + const char *name, struct cma **res_cma, int nid) +{ + phys_addr_t start, end; + phys_addr_t size, sizesum, sizeleft; + struct cma_init_memrange *mrp, *mlp, *failed; + struct cma_memrange *cmrp; + LIST_HEAD(ranges); + LIST_HEAD(final_ranges); + struct list_head *mp, *next; + int ret, nr = 1; + u64 i; + struct cma *cma; + + /* + * First, try it the normal way, producing just one range. + */ + ret = __cma_declare_contiguous_nid(0, total_size, 0, align, + order_per_bit, false, name, res_cma, nid); + if (ret != -ENOMEM) + goto out; + + /* + * Couldn't find one range that fits our needs, so try multiple + * ranges. + * + * No need to do the alignment checks here, the call to + * cma_declare_contiguous_nid above would have caught + * any issues. With the checks, we know that: + * + * - @align is a power of 2 + * - @align is >= pageblock alignment + * - @size is aligned to @align and to @order_per_bit + * + * So, as long as we create ranges that have a base + * aligned to @align, and a size that is aligned to + * both @align and @order_to_bit, things will work out. + */ + nr = 0; + sizesum = 0; + failed = NULL; + + ret = cma_new_area(name, total_size, order_per_bit, &cma); + if (ret != 0) + goto out; + + align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); + /* + * Create a list of ranges above 4G, largest range first. + */ + for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { + if (upper_32_bits(start) == 0) + continue; + + start = ALIGN(start, align); + if (start >= end) + continue; + + end = ALIGN_DOWN(end, align); + if (end <= start) + continue; + + size = end - start; + size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit)); + if (!size) + continue; + sizesum += size; + + pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end); + + /* + * If we don't yet have used the maximum number of + * areas, grab a new one. + * + * If we can't use anymore, see if this range is not + * smaller than the smallest one already recorded. If + * not, re-use the smallest element. + */ + if (nr < CMA_MAX_RANGES) + mrp = &memranges[nr++]; + else { + mrp = list_last_entry(&ranges, + struct cma_init_memrange, list); + if (size < mrp->size) + continue; + list_del(&mrp->list); + sizesum -= mrp->size; + pr_debug("deleted %016llx - %016llx from the list\n", + (u64)mrp->base, (u64)mrp->base + size); + } + mrp->base = start; + mrp->size = size; + + /* + * Now do a sorted insert. + */ + list_insert_sorted(&ranges, mrp, revsizecmp); + pr_debug("added %016llx - %016llx to the list\n", + (u64)mrp->base, (u64)mrp->base + size); + pr_debug("total size now %llu\n", (u64)sizesum); + } + + /* + * There is not enough room in the CMA_MAX_RANGES largest + * ranges, so bail out. + */ + if (sizesum < total_size) { + cma_drop_area(cma); + ret = -ENOMEM; + goto out; + } + + /* + * Found ranges that provide enough combined space. + * Now, sorted them by address, smallest first, because we + * want to mimic a bottom-up memblock allocation. + */ + sizesum = 0; + list_for_each_safe(mp, next, &ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + list_del(mp); + list_insert_sorted(&final_ranges, mlp, basecmp); + sizesum += mlp->size; + if (sizesum >= total_size) + break; + } + + /* + * Walk the final list, and add a CMA range for + * each range, possibly not using the last one fully. + */ + nr = 0; + sizeleft = total_size; + list_for_each(mp, &final_ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + size = min(sizeleft, mlp->size); + if (memblock_reserve(mlp->base, size)) { + /* + * Unexpected error. Could go on to + * the next one, but just abort to + * be safe. + */ + failed = mlp; + break; + } + + pr_debug("created region %d: %016llx - %016llx\n", + nr, (u64)mlp->base, (u64)mlp->base + size); + cmrp = &cma->ranges[nr++]; + cmrp->base_pfn = PHYS_PFN(mlp->base); + cmrp->count = size >> PAGE_SHIFT; + + sizeleft -= size; + if (sizeleft == 0) + break; + } + + if (failed) { + list_for_each(mp, &final_ranges) { + mlp = list_entry(mp, struct cma_init_memrange, list); + if (mlp == failed) + break; + memblock_phys_free(mlp->base, mlp->size); + } + cma_drop_area(cma); + ret = -ENOMEM; + goto out; + } + + cma->nranges = nr; + *res_cma = cma; + +out: + if (ret != 0) + pr_err("Failed to reserve %lu MiB\n", + (unsigned long)total_size / SZ_1M); + else + pr_info("Reserved %lu MiB in %d range%s\n", + (unsigned long)total_size / SZ_1M, nr, + nr > 1 ? "s" : ""); + + return ret; +} + /** * cma_declare_contiguous_nid() - reserve custom contiguous area * @base: Base address of the reserved area optional, use 0 for any @@ -241,6 +536,26 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, const char *name, struct cma **res_cma, int nid) +{ + int ret; + + ret = __cma_declare_contiguous_nid(base, size, limit, alignment, + order_per_bit, fixed, name, res_cma, nid); + if (ret != 0) + pr_err("Failed to reserve %ld MiB\n", + (unsigned long)size / SZ_1M); + else + pr_info("Reserved %ld MiB at %pa\n", + (unsigned long)size / SZ_1M, &base); + + return ret; +} + +static int __init __cma_declare_contiguous_nid(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid) { phys_addr_t memblock_end = memblock_end_of_DRAM(); phys_addr_t highmem_start; @@ -273,10 +588,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, /* Sanitise input arguments. */ alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); if (fixed && base & (alignment - 1)) { - ret = -EINVAL; pr_err("Region at %pa must be aligned to %pa bytes\n", &base, &alignment); - goto err; + return -EINVAL; } base = ALIGN(base, alignment); size = ALIGN(size, alignment); @@ -294,10 +608,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, * low/high memory boundary. */ if (fixed && base < highmem_start && base + size > highmem_start) { - ret = -EINVAL; pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", &base, &highmem_start); - goto err; + return -EINVAL; } /* @@ -309,18 +622,16 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, limit = memblock_end; if (base + size > limit) { - ret = -EINVAL; pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", &size, &base, &limit); - goto err; + return -EINVAL; } /* Reserve memory */ if (fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { - ret = -EBUSY; - goto err; + return -EBUSY; } } else { phys_addr_t addr = 0; @@ -357,10 +668,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, if (!addr) { addr = memblock_alloc_range_nid(size, alignment, base, limit, nid, true); - if (!addr) { - ret = -ENOMEM; - goto err; - } + if (!addr) + return -ENOMEM; } /* @@ -373,75 +682,67 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); if (ret) - goto free_mem; + memblock_phys_free(base, size); - pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M, - &base, nid); - return 0; - -free_mem: - memblock_phys_free(base, size); -err: - pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M, - nid); return ret; } static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; - unsigned long start = 0; + unsigned long start; unsigned long nr_part; - unsigned long nbits = cma_bitmap_maxno(cma); + unsigned long nbits; + int r; + struct cma_memrange *cmr; spin_lock_irq(&cma->lock); pr_info("number of available pages: "); - for (;;) { - next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); - if (next_zero_bit >= nbits) - break; - next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); - nr_zero = next_set_bit - next_zero_bit; - nr_part = nr_zero << cma->order_per_bit; - pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, - next_zero_bit); - start = next_zero_bit + nr_zero; + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + + start = 0; + nbits = cma_bitmap_maxno(cma, cmr); + + pr_info("range %d: ", r); + for (;;) { + next_zero_bit = find_next_zero_bit(cmr->bitmap, + nbits, start); + if (next_zero_bit >= nbits) + break; + next_set_bit = find_next_bit(cmr->bitmap, nbits, + next_zero_bit); + nr_zero = next_set_bit - next_zero_bit; + nr_part = nr_zero << cma->order_per_bit; + pr_cont("%s%lu@%lu", start ? "+" : "", nr_part, + next_zero_bit); + start = next_zero_bit + nr_zero; + } + pr_info("\n"); } pr_cont("=> %lu free of %lu total pages\n", cma->available_count, cma->count); spin_unlock_irq(&cma->lock); } -static struct page *__cma_alloc(struct cma *cma, unsigned long count, - unsigned int align, gfp_t gfp) +static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, + unsigned long count, unsigned int align, + struct page **pagep, gfp_t gfp) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; - unsigned long i; + int ret = -EBUSY; struct page *page = NULL; - int ret = -ENOMEM; - const char *name = cma ? cma->name : NULL; - - trace_cma_alloc_start(name, count, align); - - if (!cma || !cma->count || !cma->bitmap) - return page; - - pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, - (void *)cma, cma->name, count, align); - - if (!count) - return page; mask = cma_bitmap_aligned_mask(cma, align); - offset = cma_bitmap_aligned_offset(cma, align); - bitmap_maxno = cma_bitmap_maxno(cma); + offset = cma_bitmap_aligned_offset(cma, cmr, align); + bitmap_maxno = cma_bitmap_maxno(cma, cmr); bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) - return page; + goto out; for (;;) { spin_lock_irq(&cma->lock); @@ -453,14 +754,14 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); break; } - bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, + bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { spin_unlock_irq(&cma->lock); break; } - bitmap_set(cma->bitmap, bitmap_no, bitmap_count); + bitmap_set(cmr->bitmap, bitmap_no, bitmap_count); cma->available_count -= count; /* * It's safe to drop the lock here. We've marked this region for @@ -469,7 +770,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, */ spin_unlock_irq(&cma->lock); - pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); + pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp); mutex_unlock(&cma_mutex); @@ -478,7 +779,7 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, break; } - cma_clear_bitmap(cma, pfn, count); + cma_clear_bitmap(cma, cmr, pfn, count); if (ret != -EBUSY) break; @@ -490,6 +791,38 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } +out: + *pagep = page; + return ret; +} + +static struct page *__cma_alloc(struct cma *cma, unsigned long count, + unsigned int align, gfp_t gfp) +{ + struct page *page = NULL; + int ret = -ENOMEM, r; + unsigned long i; + const char *name = cma ? cma->name : NULL; + + trace_cma_alloc_start(name, count, align); + + if (!cma || !cma->count) + return page; + + pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, + (void *)cma, cma->name, count, align); + + if (!count) + return page; + + for (r = 0; r < cma->nranges; r++) { + page = NULL; + + ret = cma_range_alloc(cma, &cma->ranges[r], count, align, + &page, gfp); + if (ret != -EBUSY || page) + break; + } /* * CMA can allocate multiple page blocks, which results in different @@ -508,7 +841,8 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count, } pr_debug("%s(): returned %p\n", __func__, page); - trace_cma_alloc_finish(name, pfn, page, count, align, ret); + trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0, + page, count, align, ret); if (page) { count_vm_event(CMA_ALLOC_SUCCESS); cma_sysfs_account_success_pages(cma, count); @@ -551,20 +885,31 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count) { - unsigned long pfn; + unsigned long pfn, end; + int r; + struct cma_memrange *cmr; + bool ret; - if (!cma || !pages) + if (!cma || !pages || count > cma->count) return false; pfn = page_to_pfn(pages); + ret = false; - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { - pr_debug("%s(page %p, count %lu)\n", __func__, - (void *)pages, count); - return false; + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + end = cmr->base_pfn + cmr->count; + if (pfn >= cmr->base_pfn && pfn < end) { + ret = pfn + count <= end; + break; + } } - return true; + if (!ret) + pr_debug("%s(page %p, count %lu)\n", + __func__, (void *)pages, count); + + return ret; } /** @@ -580,19 +925,32 @@ bool cma_pages_valid(struct cma *cma, const struct page *pages, bool cma_release(struct cma *cma, const struct page *pages, unsigned long count) { - unsigned long pfn; + struct cma_memrange *cmr; + unsigned long pfn, end_pfn; + int r; + + pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); if (!cma_pages_valid(cma, pages, count)) return false; - pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); - pfn = page_to_pfn(pages); + end_pfn = pfn + count; - VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + if (pfn >= cmr->base_pfn && + pfn < (cmr->base_pfn + cmr->count)) { + VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count); + break; + } + } + + if (r == cma->nranges) + return false; free_contig_range(pfn, count); - cma_clear_bitmap(cma, pfn, count); + cma_clear_bitmap(cma, cmr, pfn, count); cma_sysfs_account_release_pages(cma, count); trace_cma_release(cma->name, pfn, pages, count); diff --git a/mm/cma.h b/mm/cma.h index 3dd3376ae980..5f39dd1aac91 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -10,19 +10,35 @@ struct cma_kobject { struct cma *cma; }; +/* + * Multi-range support. This can be useful if the size of the allocation + * is not expected to be larger than the alignment (like with hugetlb_cma), + * and the total amount of memory requested, while smaller than the total + * amount of memory available, is large enough that it doesn't fit in a + * single physical memory range because of memory holes. + */ +struct cma_memrange { + unsigned long base_pfn; + unsigned long count; + unsigned long *bitmap; +#ifdef CONFIG_CMA_DEBUGFS + struct debugfs_u32_array dfs_bitmap; +#endif +}; +#define CMA_MAX_RANGES 8 + struct cma { - unsigned long base_pfn; unsigned long count; unsigned long available_count; - unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; - struct debugfs_u32_array dfs_bitmap; #endif char name[CMA_MAX_NAME]; + int nranges; + struct cma_memrange ranges[CMA_MAX_RANGES]; #ifdef CONFIG_CMA_SYSFS /* the number of CMA page successful allocations */ atomic64_t nr_pages_succeeded; @@ -39,9 +55,10 @@ struct cma { extern struct cma cma_areas[MAX_CMA_AREAS]; extern unsigned int cma_area_count; -static inline unsigned long cma_bitmap_maxno(struct cma *cma) +static inline unsigned long cma_bitmap_maxno(struct cma *cma, + struct cma_memrange *cmr) { - return cma->count >> cma->order_per_bit; + return cmr->count >> cma->order_per_bit; } #ifdef CONFIG_CMA_SYSFS diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 89236f22230a..fdf899532ca0 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -46,17 +46,26 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n"); static int cma_maxchunk_get(void *data, u64 *val) { struct cma *cma = data; + struct cma_memrange *cmr; unsigned long maxchunk = 0; - unsigned long start, end = 0; - unsigned long bitmap_maxno = cma_bitmap_maxno(cma); + unsigned long start, end; + unsigned long bitmap_maxno; + int r; spin_lock_irq(&cma->lock); - for (;;) { - start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); - if (start >= bitmap_maxno) - break; - end = find_next_bit(cma->bitmap, bitmap_maxno, start); - maxchunk = max(end - start, maxchunk); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + bitmap_maxno = cma_bitmap_maxno(cma, cmr); + end = 0; + for (;;) { + start = find_next_zero_bit(cmr->bitmap, + bitmap_maxno, end); + if (start >= bitmap_maxno) + break; + end = find_next_bit(cmr->bitmap, bitmap_maxno, + start); + maxchunk = max(end - start, maxchunk); + } } spin_unlock_irq(&cma->lock); *val = (u64)maxchunk << cma->order_per_bit; @@ -159,24 +168,41 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n"); static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { - struct dentry *tmp; + struct dentry *tmp, *dir, *rangedir; + int r; + char rdirname[12]; + struct cma_memrange *cmr; tmp = debugfs_create_dir(cma->name, root_dentry); debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); - debugfs_create_file("base_pfn", 0444, tmp, - &cma->base_pfn, &cma_debugfs_fops); debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); debugfs_create_file("order_per_bit", 0444, tmp, &cma->order_per_bit, &cma_debugfs_fops); debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); - cma->dfs_bitmap.array = (u32 *)cma->bitmap; - cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), - BITS_PER_BYTE * sizeof(u32)); - debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); + rangedir = debugfs_create_dir("ranges", tmp); + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + snprintf(rdirname, sizeof(rdirname), "%d", r); + dir = debugfs_create_dir(rdirname, rangedir); + debugfs_create_file("base_pfn", 0444, dir, + &cmr->base_pfn, &cma_debugfs_fops); + cmr->dfs_bitmap.array = (u32 *)cmr->bitmap; + cmr->dfs_bitmap.n_elements = + DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr), + BITS_PER_BYTE * sizeof(u32)); + debugfs_create_u32_array("bitmap", 0444, dir, + &cmr->dfs_bitmap); + } + + /* + * Backward compatible symlinks to range 0 for base_pfn and bitmap. + */ + debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn"); + debugfs_create_symlink("bitmap", tmp, "ranges/0/bitmap"); } static int __init cma_debugfs_init(void) From 624ab90b7b8758090654520b03c97cecc438a414 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:04 +0000 Subject: [PATCH 136/431] mm/cma: introduce cma_intersects function Now that CMA areas can have multiple physical ranges, code can't assume a CMA struct represents a base_pfn plus a size, as returned from cma_get_base. Most cases are ok though, since they all explicitly refer to CMA areas that were created using existing interfaces (cma_declare_contiguous_nid or cma_init_reserved_mem), which guarantees they have just one physical range. An exception is the s390 code, which walks all CMA ranges to see if they intersect with a range of memory that is about to be hotremoved. So, in the future, it might run in to multi-range areas. To keep this check working, define a cma_intersects function. This just checks if a physaddr range intersects any of the ranges. Use it in the s390 check. Link: https://lkml.kernel.org/r/20250228182928.2645936-4-fvdl@google.com Signed-off-by: Frank van der Linden Acked-by: Alexander Gordeev Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/s390/mm/init.c | 13 +++++-------- include/linux/cma.h | 1 + mm/cma.c | 21 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f2298f7a3f21..d88cb1c13f7d 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -239,16 +239,13 @@ struct s390_cma_mem_data { static int s390_cma_check_range(struct cma *cma, void *data) { struct s390_cma_mem_data *mem_data; - unsigned long start, end; mem_data = data; - start = cma_get_base(cma); - end = start + cma_get_size(cma); - if (end < mem_data->start) - return 0; - if (start >= mem_data->end) - return 0; - return -EBUSY; + + if (cma_intersects(cma, mem_data->start, mem_data->end)) + return -EBUSY; + + return 0; } static int s390_cma_mem_notifier(struct notifier_block *nb, diff --git a/include/linux/cma.h b/include/linux/cma.h index 863427c27dc2..03d85c100dcc 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -53,6 +53,7 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); +extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end); extern void cma_reserve_pages_on_error(struct cma *cma); diff --git a/mm/cma.c b/mm/cma.c index 34caa6b29c99..8dc46bfa3819 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -978,3 +978,24 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end) +{ + int r; + struct cma_memrange *cmr; + unsigned long rstart, rend; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + + rstart = PFN_PHYS(cmr->base_pfn); + rend = PFN_PHYS(cmr->base_pfn + cmr->count); + if (end < rstart) + continue; + if (start >= rend) + continue; + return true; + } + + return false; +} From 3dda0103e8ea4923d1a2f9d3c6b75aadc08aee73 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:05 +0000 Subject: [PATCH 137/431] mm, hugetlb: use cma_declare_contiguous_multi hugetlb_cma is fine with using multiple CMA ranges, as long as it can get its gigantic pages allocated from them. So, use cma_declare_contiguous_multi to allow for multiple ranges, increasing the chances of getting what we want on systems with gaps in physical memory. Link: https://lkml.kernel.org/r/20250228182928.2645936-5-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 318624c96584..2585d4da6c45 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7750,9 +7750,8 @@ void __init hugetlb_cma_reserve(int order) * may be returned to CMA allocator in the case of * huge page demotion. */ - res = cma_declare_contiguous_nid(0, size, 0, - PAGE_SIZE << order, - HUGETLB_PAGE_ORDER, false, name, + res = cma_declare_contiguous_multi(size, PAGE_SIZE << order, + HUGETLB_PAGE_ORDER, name, &hugetlb_cma[nid], nid); if (res) { pr_warn("hugetlb_cma: reservation failed: err %d, node %d", From 992e5491b6b83e4de28b14ee96347ff6bf16280a Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:06 +0000 Subject: [PATCH 138/431] mm/hugetlb: remove redundant __ClearPageReserved In hugetlb_folio_init_tail_vmemmap, the reserved flag is cleared for the tail page just before it is zeroed out, which is redundant. Remove the __ClearPageReserved call. Link: https://lkml.kernel.org/r/20250228182928.2645936-6-fvdl@google.com Signed-off-by: Frank van der Linden Reviewed-by: Oscar Salvador Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2585d4da6c45..d5f616d07e81 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3210,7 +3210,6 @@ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio, for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) { struct page *page = pfn_to_page(pfn); - __ClearPageReserved(folio_page(folio, pfn - head_pfn)); __init_single_page(page, pfn, zone, nid); prep_compound_tail((struct page *)folio, pfn - head_pfn); ret = page_ref_freeze(page, 1); From de55996d7188e7cd11b5e8d8f618467c8439feaa Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:07 +0000 Subject: [PATCH 139/431] mm/hugetlb: use online nodes for bootmem allocation Later commits will move hugetlb bootmem allocation to earlier in init, when N_MEMORY has not yet been set on nodes. Use online nodes instead. At most, this wastes just a few cycles once during boot (and most likely none). Link: https://lkml.kernel.org/r/20250228182928.2645936-7-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d5f616d07e81..38ff808fcc83 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3164,7 +3164,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) goto found; } /* allocate from next node when distributing huge pages */ - for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) { + for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) { m = memblock_alloc_try_nid_raw( huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); @@ -4558,8 +4558,8 @@ void __init hugetlb_add_hstate(unsigned int order) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); - h->next_nid_to_alloc = first_memory_node; - h->next_nid_to_free = first_memory_node; + h->next_nid_to_alloc = first_online_node; + h->next_nid_to_free = first_online_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/SZ_1K); From 5b47c02967ab770aa7661c8863a21b2fd59e35ff Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:08 +0000 Subject: [PATCH 140/431] mm/hugetlb: convert cmdline parameters from setup to early Convert the cmdline parameters (hugepagesz, hugepages, default_hugepagesz and hugetlb_free_vmemmap) to early parameters. Since parse_early_param might run before MMU setups on some platforms (powerpc), validation of huge page sizes as specified in command line parameters would fail. So instead, for the hstate-related values, just record the them and parse them on demand, from hugetlb_bootmem_alloc. The allocation of hugetlb bootmem pages is now done in hugetlb_bootmem_alloc, which is called explicitly at the start of mm_core_init(). core_initcall would be too late, as that happens with memblock already torn down. This change will allow earlier allocation and initialization of bootmem hugetlb pages later on. No functional change intended. Link: https://lkml.kernel.org/r/20250228182928.2645936-8-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- .../admin-guide/kernel-parameters.txt | 14 +- include/linux/hugetlb.h | 6 + mm/hugetlb.c | 133 ++++++++++++++---- mm/hugetlb_vmemmap.c | 6 +- mm/mm_init.c | 3 + 5 files changed, 126 insertions(+), 36 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index fb8752b42ec8..ae21d911d1c7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1861,7 +1861,7 @@ hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET registers. Default set by CONFIG_HPET_MMAP_DEFAULT. - hugepages= [HW] Number of HugeTLB pages to allocate at boot. + hugepages= [HW,EARLY] Number of HugeTLB pages to allocate at boot. If this follows hugepagesz (below), it specifies the number of pages of hugepagesz to be allocated. If this is the first HugeTLB parameter on the command @@ -1873,12 +1873,12 @@ :[,:] hugepagesz= - [HW] The size of the HugeTLB pages. This is used in - conjunction with hugepages (above) to allocate huge - pages of a specific size at boot. The pair - hugepagesz=X hugepages=Y can be specified once for - each supported huge page size. Huge page sizes are - architecture dependent. See also + [HW,EARLY] The size of the HugeTLB pages. This is + used in conjunction with hugepages (above) to + allocate huge pages of a specific size at boot. The + pair hugepagesz=X hugepages=Y can be specified once + for each supported huge page size. Huge page sizes + are architecture dependent. See also Documentation/admin-guide/mm/hugetlbpage.rst. Format: size[KMG] diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 76a75ec03dd6..a596aaa178d1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -174,6 +174,8 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; +void hugetlb_bootmem_alloc(void); + /* arch callbacks */ #ifndef CONFIG_HIGHPTE @@ -1257,6 +1259,10 @@ static inline bool hugetlbfs_pagecache_present( { return false; } + +static inline void hugetlb_bootmem_alloc(void) +{ +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 38ff808fcc83..d1178059a52b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -62,6 +63,24 @@ static unsigned long hugetlb_cma_size __initdata; __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; +/* + * Due to ordering constraints across the init code for various + * architectures, hugetlb hstate cmdline parameters can't simply + * be early_param. early_param might call the setup function + * before valid hugetlb page sizes are determined, leading to + * incorrect rejection of valid hugepagesz= options. + * + * So, record the parameters early and consume them whenever the + * init code is ready for them, by calling hugetlb_parse_params(). + */ + +/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */ +#define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1) +struct hugetlb_cmdline { + char *val; + int (*setup)(char *val); +}; + /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; @@ -69,6 +88,20 @@ static bool __initdata parsed_valid_hugepagesz = true; static bool __initdata parsed_default_hugepagesz; static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; +static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; +static int hstate_cmdline_index __initdata; +static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata; +static int hugetlb_param_index __initdata; +static __init int hugetlb_add_param(char *s, int (*setup)(char *val)); +static __init void hugetlb_parse_params(void); + +#define hugetlb_early_param(str, func) \ +static __init int func##args(char *s) \ +{ \ + return hugetlb_add_param(s, func); \ +} \ +early_param(str, func##args) + /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, * free_huge_pages, and surplus_huge_pages. @@ -3496,6 +3529,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) for (i = 0; i < MAX_NUMNODES; i++) INIT_LIST_HEAD(&huge_boot_pages[i]); + h->next_nid_to_alloc = first_online_node; + h->next_nid_to_free = first_online_node; initialized = true; } @@ -4558,8 +4593,6 @@ void __init hugetlb_add_hstate(unsigned int order) for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); - h->next_nid_to_alloc = first_online_node; - h->next_nid_to_free = first_online_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/SZ_1K); @@ -4584,6 +4617,42 @@ static void __init hugepages_clear_pages_in_node(void) } } +static __init int hugetlb_add_param(char *s, int (*setup)(char *)) +{ + size_t len; + char *p; + + if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS) + return -EINVAL; + + len = strlen(s) + 1; + if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf)) + return -EINVAL; + + p = &hstate_cmdline_buf[hstate_cmdline_index]; + memcpy(p, s, len); + hstate_cmdline_index += len; + + hugetlb_params[hugetlb_param_index].val = p; + hugetlb_params[hugetlb_param_index].setup = setup; + + hugetlb_param_index++; + + return 0; +} + +static __init void hugetlb_parse_params(void) +{ + int i; + struct hugetlb_cmdline *hcp; + + for (i = 0; i < hugetlb_param_index; i++) { + hcp = &hugetlb_params[i]; + + hcp->setup(hcp->val); + } +} + /* * hugepages command line processing * hugepages normally follows a valid hugepagsz or default_hugepagsz @@ -4603,7 +4672,7 @@ static int __init hugepages_setup(char *s) if (!parsed_valid_hugepagesz) { pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; - return 1; + return -EINVAL; } /* @@ -4657,24 +4726,16 @@ static int __init hugepages_setup(char *s) } } - /* - * Global state is always initialized later in hugetlb_init. - * But we need to allocate gigantic hstates here early to still - * use the bootmem allocator. - */ - if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) - hugetlb_hstate_alloc_pages(parsed_hstate); - last_mhp = mhp; - return 1; + return 0; invalid: pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); hugepages_clear_pages_in_node(); - return 1; + return -EINVAL; } -__setup("hugepages=", hugepages_setup); +hugetlb_early_param("hugepages", hugepages_setup); /* * hugepagesz command line processing @@ -4693,7 +4754,7 @@ static int __init hugepagesz_setup(char *s) if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); - return 1; + return -EINVAL; } h = size_to_hstate(size); @@ -4708,7 +4769,7 @@ static int __init hugepagesz_setup(char *s) if (!parsed_default_hugepagesz || h != &default_hstate || default_hstate.max_huge_pages) { pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); - return 1; + return -EINVAL; } /* @@ -4718,14 +4779,14 @@ static int __init hugepagesz_setup(char *s) */ parsed_hstate = h; parsed_valid_hugepagesz = true; - return 1; + return 0; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; - return 1; + return 0; } -__setup("hugepagesz=", hugepagesz_setup); +hugetlb_early_param("hugepagesz", hugepagesz_setup); /* * default_hugepagesz command line input @@ -4739,14 +4800,14 @@ static int __init default_hugepagesz_setup(char *s) parsed_valid_hugepagesz = false; if (parsed_default_hugepagesz) { pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); - return 1; + return -EINVAL; } size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); - return 1; + return -EINVAL; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); @@ -4763,17 +4824,33 @@ static int __init default_hugepagesz_setup(char *s) */ if (default_hstate_max_huge_pages) { default_hstate.max_huge_pages = default_hstate_max_huge_pages; - for_each_online_node(i) - default_hstate.max_huge_pages_node[i] = - default_hugepages_in_node[i]; - if (hstate_is_gigantic(&default_hstate)) - hugetlb_hstate_alloc_pages(&default_hstate); + /* + * Since this is an early parameter, we can't check + * NUMA node state yet, so loop through MAX_NUMNODES. + */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (default_hugepages_in_node[i] != 0) + default_hstate.max_huge_pages_node[i] = + default_hugepages_in_node[i]; + } default_hstate_max_huge_pages = 0; } - return 1; + return 0; +} +hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); + +void __init hugetlb_bootmem_alloc(void) +{ + struct hstate *h; + + hugetlb_parse_params(); + + for_each_hstate(h) { + if (hstate_is_gigantic(h)) + hugetlb_hstate_alloc_pages(h); + } } -__setup("default_hugepagesz=", default_hugepagesz_setup); static unsigned int allowed_mems_nr(struct hstate *h) { diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 7735972add01..5b484758f813 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -444,7 +444,11 @@ DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); -core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); +static int __init hugetlb_vmemmap_optimize_param(char *buf) +{ + return kstrtobool(buf, &vmemmap_optimize_enabled); +} +early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param); static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags) diff --git a/mm/mm_init.c b/mm/mm_init.c index c767946e8f5f..45bc4b55fd6a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "internal.h" #include "slab.h" #include "shuffle.h" @@ -2641,6 +2642,8 @@ static void __init mem_init_print_info(void) */ void __init mm_core_init(void) { + hugetlb_bootmem_alloc(); + /* Initializations relying on SMP setup */ BUILD_BUG_ON(MAX_ZONELISTS > 2); build_all_zonelists(NULL); From d3cd80c5879443c6986091717b6de889c60b3eb1 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:09 +0000 Subject: [PATCH 141/431] x86/mm: make register_page_bootmem_memmap handle PTE mappings register_page_bootmem_memmap expects that vmemmap pages handed to it are PMD-mapped, and that the number of pages to call get_page_bootmem on is PMD-aligned. This is currently a correct assumption, but will no longer be true once pre-HVO of hugetlb pages is implemented. Make it handle PTE-mapped vmemmap pages and a nr_pages argument that is not necessarily PAGES_PER_SECTION. Link: https://lkml.kernel.org/r/20250228182928.2645936-9-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Dan Carpenter Cc: Alexander Gordeev Cc: Arnd Bergmann Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/x86/mm/init_64.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 01ea7c6df303..6e8e4ef5312a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1599,11 +1599,14 @@ void register_page_bootmem_memmap(unsigned long section_nr, } get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); - if (!boot_cpu_has(X86_FEATURE_PSE)) { + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + continue; + } + + if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) { next = (addr + PAGE_SIZE) & PAGE_MASK; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - continue; get_page_bootmem(section_nr, pmd_page(*pmd), MIX_SECTION_INFO); @@ -1614,12 +1617,7 @@ void register_page_bootmem_memmap(unsigned long section_nr, SECTION_INFO); } else { next = pmd_addr_end(addr, end); - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - continue; - - nr_pmd_pages = 1 << get_order(PMD_SIZE); + nr_pmd_pages = (next - addr) >> PAGE_SHIFT; page = pmd_page(*pmd); while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, From 243a75e236802614075511266c8d1834d4bcffe9 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:10 +0000 Subject: [PATCH 142/431] mm/bootmem_info: export register_page_bootmem_memmap If other mm code wants to use this function for early memmap inialization (on the platforms that have it), it should be made available properly, not just unconditionally in mm.h Make this function available for such cases. Link: https://lkml.kernel.org/r/20250228182928.2645936-10-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/powerpc/mm/init_64.c | 4 ++++ include/linux/bootmem_info.h | 7 +++++++ include/linux/mm.h | 3 --- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d96bbc001e73..b6f3ae03ca9e 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -386,10 +387,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, } #endif + +#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } +#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index d8a8d245824a..4c506e76a808 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -18,6 +18,8 @@ enum bootmem_type { #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE void __init register_page_bootmem_info_node(struct pglist_data *pgdat); +void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, + unsigned long nr_pages); void get_page_bootmem(unsigned long info, struct page *page, enum bootmem_type type); @@ -58,6 +60,11 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { } +static inline void register_page_bootmem_memmap(unsigned long section_nr, + struct page *map, unsigned long nr_pages) +{ +} + static inline void put_page_bootmem(struct page *page) { } diff --git a/include/linux/mm.h b/include/linux/mm.h index 14115c9949d8..e90ab0bdcc8c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4018,9 +4018,6 @@ static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap, } #endif -void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, - unsigned long nr_pages); - enum mf_flags { MF_COUNT_INCREASED = 1 << 0, MF_ACTION_REQUIRED = 1 << 1, From d65917c42373f70159a3fc453f8f028fd665e04f Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:11 +0000 Subject: [PATCH 143/431] mm/sparse: allow for alternate vmemmap section init at boot Add functions that are called just before the per-section memmap is initialized and just before the memmap page structures are initialized. They are called sparse_vmemmap_init_nid_early and sparse_vmemmap_init_nid_late, respectively. This allows for mm subsystems to add calls to initialize memmap and page structures in a specific way, if using SPARSEMEM_VMEMMAP. Specifically, hugetlb can pre-HVO bootmem allocated pages that way, so that no time and resources are wasted on allocating vmemmap pages, only to free them later (and possibly unnecessarily running the system out of memory in the process). Refactor some code and export a few convenience functions for external use. In sparse_init_nid, skip any sections that are already initialized, e.g. they have been initialized by sparse_vmemmap_init_nid_early already. The hugetlb code to use these functions will be added in a later commit. Export section_map_size, as any alternate memmap init code will want to use it. The internal config option to enable this is SPARSEMEM_VMEMMAP_PREINIT, which is selected if an architecture-specific option, ARCH_WANT_HUGETLB_VMEMMAP_PREINIT, is set. In the future, if other subsystems want to do preinit too, they can do it in a similar fashion. The internal config option is there because a section flag is used, and the number of flags available is architecture-dependent (see mmzone.h). Architecures can decide if there is room for the flag when enabling options that select SPARSEMEM_VMEMMAP_PREINIT. Fortunately, as of right now, all sparse vmemmap using architectures do have room. Link: https://lkml.kernel.org/r/20250228182928.2645936-11-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Johannes Weiner Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- fs/Kconfig | 1 + include/linux/mm.h | 1 + include/linux/mmzone.h | 35 +++++++++++++++++ mm/Kconfig | 6 +++ mm/bootmem_info.c | 4 +- mm/mm_init.c | 3 ++ mm/sparse-vmemmap.c | 23 +++++++++++ mm/sparse.c | 87 ++++++++++++++++++++++++++++++++---------- 8 files changed, 138 insertions(+), 22 deletions(-) diff --git a/fs/Kconfig b/fs/Kconfig index 64d420e3c475..8bcd3a6f80ab 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -286,6 +286,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP def_bool HUGETLB_PAGE depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP depends on SPARSEMEM_VMEMMAP + select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT config HUGETLB_PMD_PAGE_TABLE_SHARING def_bool HUGETLB_PAGE diff --git a/include/linux/mm.h b/include/linux/mm.h index e90ab0bdcc8c..03e4807bd911 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3928,6 +3928,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) #endif void *sparse_buffer_alloc(unsigned long size); +unsigned long section_map_size(void); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9540b41894da..44ecb2f90db4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1933,6 +1933,9 @@ enum { SECTION_IS_EARLY_BIT, #ifdef CONFIG_ZONE_DEVICE SECTION_TAINT_ZONE_DEVICE_BIT, +#endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT + SECTION_IS_VMEMMAP_PREINIT_BIT, #endif SECTION_MAP_LAST_BIT, }; @@ -1944,6 +1947,9 @@ enum { #ifdef CONFIG_ZONE_DEVICE #define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT) +#endif #define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) #define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT @@ -1998,6 +2004,30 @@ static inline int online_device_section(struct mem_section *section) } #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +static inline int preinited_vmemmap_section(struct mem_section *section) +{ + return (section && + (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT)); +} + +void sparse_vmemmap_init_nid_early(int nid); +void sparse_vmemmap_init_nid_late(int nid); + +#else +static inline int preinited_vmemmap_section(struct mem_section *section) +{ + return 0; +} +static inline void sparse_vmemmap_init_nid_early(int nid) +{ +} + +static inline void sparse_vmemmap_init_nid_late(int nid) +{ +} +#endif + static inline int online_section_nr(unsigned long nr) { return online_section(__nr_to_section(nr)); @@ -2035,6 +2065,9 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) } #endif +void sparse_init_early_section(int nid, struct page *map, unsigned long pnum, + unsigned long flags); + #ifndef CONFIG_HAVE_ARCH_PFN_VALID /** * pfn_valid - check if there is a valid memory map entry for a PFN @@ -2116,6 +2149,8 @@ void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) +#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0) +#define sparse_vmemmap_init_nid_late(_nid) do {} while (0) #define pfn_in_present_section pfn_valid #define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ diff --git a/mm/Kconfig b/mm/Kconfig index fba9757e5814..4c1640a197a0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -442,6 +442,9 @@ config SPARSEMEM_VMEMMAP SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise pfn_to_page and page_to_pfn operations. This is the most efficient option when sufficient kernel resources are available. + +config SPARSEMEM_VMEMMAP_PREINIT + bool # # Select this config option from the architecture Kconfig, if it is preferred # to enable the feature of HugeTLB/dev_dax vmemmap optimization. @@ -452,6 +455,9 @@ config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP bool +config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT + bool + config HAVE_MEMBLOCK_PHYS_MAP bool diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c index 95f288169a38..b0e2a9fa641f 100644 --- a/mm/bootmem_info.c +++ b/mm/bootmem_info.c @@ -88,7 +88,9 @@ static void __init register_page_bootmem_info_section(unsigned long start_pfn) memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); - register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); + if (!preinited_vmemmap_section(ms)) + register_page_bootmem_memmap(section_nr, memmap, + PAGES_PER_SECTION); usage = ms->usage; page = virt_to_page(usage); diff --git a/mm/mm_init.c b/mm/mm_init.c index 45bc4b55fd6a..56f4a8cfb764 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1862,6 +1862,9 @@ void __init free_area_init(unsigned long *max_zone_pfn) } } + for_each_node_state(nid, N_MEMORY) + sparse_vmemmap_init_nid_late(nid); + calc_nr_kernel_pages(); memmap_init(); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 3287ebadd167..8751c46c35e4 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -470,3 +470,26 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn, return pfn_to_page(pfn); } + +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +/* + * This is called just before initializing sections for a NUMA node. + * Any special initialization that needs to be done before the + * generic initialization can be done from here. Sections that + * are initialized in hooks called from here will be skipped by + * the generic initialization. + */ +void __init sparse_vmemmap_init_nid_early(int nid) +{ +} + +/* + * This is called just before the initialization of page structures + * through memmap_init. Zones are now initialized, so any work that + * needs to be done that needs zone information can be done from + * here. + */ +void __init sparse_vmemmap_init_nid_late(int nid) +{ +} +#endif diff --git a/mm/sparse.c b/mm/sparse.c index 133b033d0cba..ee0234a77c7f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -408,13 +408,13 @@ static void __init check_usemap_section_nr(int nid, #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_SPARSEMEM_VMEMMAP -static unsigned long __init section_map_size(void) +unsigned long __init section_map_size(void) { return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); } #else -static unsigned long __init section_map_size(void) +unsigned long __init section_map_size(void) { return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); } @@ -495,6 +495,44 @@ void __weak __meminit vmemmap_populate_print_last(void) { } +static void *sparse_usagebuf __meminitdata; +static void *sparse_usagebuf_end __meminitdata; + +/* + * Helper function that is used for generic section initialization, and + * can also be used by any hooks added above. + */ +void __init sparse_init_early_section(int nid, struct page *map, + unsigned long pnum, unsigned long flags) +{ + BUG_ON(!sparse_usagebuf || sparse_usagebuf >= sparse_usagebuf_end); + check_usemap_section_nr(nid, sparse_usagebuf); + sparse_init_one_section(__nr_to_section(pnum), pnum, map, + sparse_usagebuf, SECTION_IS_EARLY | flags); + sparse_usagebuf = (void *)sparse_usagebuf + mem_section_usage_size(); +} + +static int __init sparse_usage_init(int nid, unsigned long map_count) +{ + unsigned long size; + + size = mem_section_usage_size() * map_count; + sparse_usagebuf = sparse_early_usemaps_alloc_pgdat_section( + NODE_DATA(nid), size); + if (!sparse_usagebuf) { + sparse_usagebuf_end = NULL; + return -ENOMEM; + } + + sparse_usagebuf_end = sparse_usagebuf + size; + return 0; +} + +static void __init sparse_usage_fini(void) +{ + sparse_usagebuf = sparse_usagebuf_end = NULL; +} + /* * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) * And number of present sections in this node is map_count. @@ -503,47 +541,54 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count) { - struct mem_section_usage *usage; unsigned long pnum; struct page *map; + struct mem_section *ms; - usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), - mem_section_usage_size() * map_count); - if (!usage) { + if (sparse_usage_init(nid, map_count)) { pr_err("%s: node[%d] usemap allocation failed", __func__, nid); goto failed; } + sparse_buffer_init(map_count * section_map_size(), nid); + + sparse_vmemmap_init_nid_early(nid); + for_each_present_section_nr(pnum_begin, pnum) { unsigned long pfn = section_nr_to_pfn(pnum); if (pnum >= pnum_end) break; - map = __populate_section_memmap(pfn, PAGES_PER_SECTION, - nid, NULL, NULL); - if (!map) { - pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", - __func__, nid); - pnum_begin = pnum; - sparse_buffer_fini(); - goto failed; + ms = __nr_to_section(pnum); + if (!preinited_vmemmap_section(ms)) { + map = __populate_section_memmap(pfn, PAGES_PER_SECTION, + nid, NULL, NULL); + if (!map) { + pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", + __func__, nid); + pnum_begin = pnum; + sparse_usage_fini(); + sparse_buffer_fini(); + goto failed; + } + sparse_init_early_section(nid, map, pnum, 0); } - check_usemap_section_nr(nid, usage); - sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, - SECTION_IS_EARLY); - usage = (void *) usage + mem_section_usage_size(); } + sparse_usage_fini(); sparse_buffer_fini(); return; failed: - /* We failed to allocate, mark all the following pnums as not present */ + /* + * We failed to allocate, mark all the following pnums as not present, + * except the ones already initialized earlier. + */ for_each_present_section_nr(pnum_begin, pnum) { - struct mem_section *ms; - if (pnum >= pnum_end) break; ms = __nr_to_section(pnum); + if (!preinited_vmemmap_section(ms)) + ms->section_mem_map = 0; ms->section_mem_map = 0; } } From 3d61909cb7f89fd99523e94b7f25d34d87d86496 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:12 +0000 Subject: [PATCH 144/431] mm/hugetlb: set migratetype for bootmem folios The pageblocks that back memblock allocated hugetlb folios might not have the migrate type set, in the CONFIG_DEFERRED_STRUCT_PAGE_INIT case. memblock allocated hugetlb folios might be given to the buddy allocator eventually (if nr_hugepages is lowered), so make sure that the migrate type for the pageblocks contained in them is set when initializing them. Set it to the default that memmap init also uses (MIGRATE_MOVABLE). Link: https://lkml.kernel.org/r/20250228182928.2645936-12-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d1178059a52b..00facd2123e5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3266,6 +3266,26 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio, prep_compound_head((struct page *)folio, huge_page_order(h)); } +/* + * memblock-allocated pageblocks might not have the migrate type set + * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) + * here. + * + * Note that this will not write the page struct, it is ok (and necessary) + * to do this on vmemmap optimized folios. + */ +static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, + struct hstate *h) +{ + unsigned long nr_pages = pages_per_huge_page(h), i; + + WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); + + for (i = 0; i < nr_pages; i += pageblock_nr_pages) + set_pageblock_migratetype(folio_page(folio, i), + MIGRATE_MOVABLE); +} + static void __init prep_and_add_bootmem_folios(struct hstate *h, struct list_head *folio_list) { @@ -3287,6 +3307,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, HUGETLB_VMEMMAP_RESERVE_PAGES, pages_per_huge_page(h)); } + hugetlb_bootmem_init_migratetype(folio, h); /* Subdivide locks to achieve better parallel performance */ spin_lock_irqsave(&hugetlb_lock, flags); __prep_account_new_huge_page(h, folio_nid(folio)); From d69d8261a990843514c40aeef36a14add71daf4e Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:13 +0000 Subject: [PATCH 145/431] mm: define __init_reserved_page_zone function Sometimes page structs must be unconditionally initialized as reserved, regardless of DEFERRED_STRUCT_PAGE_INIT. Define a function, __init_reserved_page_zone, containing code that already did all of the work in init_reserved_page, and make it available for use. Link: https://lkml.kernel.org/r/20250228182928.2645936-13-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/internal.h | 1 + mm/mm_init.c | 38 +++++++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 20b3535935a3..780c17b4003a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1449,6 +1449,7 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid); +void __meminit __init_reserved_page_zone(unsigned long pfn, int nid); /* shrinker related functions */ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, diff --git a/mm/mm_init.c b/mm/mm_init.c index 56f4a8cfb764..419b7db220d2 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -650,6 +650,28 @@ static inline void fixup_hashdist(void) static inline void fixup_hashdist(void) {} #endif /* CONFIG_NUMA */ +/* + * Initialize a reserved page unconditionally, finding its zone first. + */ +void __meminit __init_reserved_page_zone(unsigned long pfn, int nid) +{ + pg_data_t *pgdat; + int zid; + + pgdat = NODE_DATA(nid); + + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct zone *zone = &pgdat->node_zones[zid]; + + if (zone_spans_pfn(zone, pfn)) + break; + } + __init_single_page(pfn_to_page(pfn), pfn, zid, nid); + + if (pageblock_aligned(pfn)) + set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE); +} + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static inline void pgdat_set_deferred_range(pg_data_t *pgdat) { @@ -708,24 +730,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) static void __meminit init_reserved_page(unsigned long pfn, int nid) { - pg_data_t *pgdat; - int zid; - if (early_page_initialised(pfn, nid)) return; - pgdat = NODE_DATA(nid); - - for (zid = 0; zid < MAX_NR_ZONES; zid++) { - struct zone *zone = &pgdat->node_zones[zid]; - - if (zone_spans_pfn(zone, pfn)) - break; - } - __init_single_page(pfn_to_page(pfn), pfn, zid, nid); - - if (pageblock_aligned(pfn)) - set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE); + __init_reserved_page_zone(pfn, nid); } #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} From 14ed3a595fa4e8f5bceddb91cbcd1ba566c9669b Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:14 +0000 Subject: [PATCH 146/431] mm/hugetlb: check bootmem pages for zone intersections Bootmem hugetlb pages are allocated using memblock, which isn't (and mostly can't be) aware of zones. So, they may end up crossing zone boundaries. This would create confusion, a hugetlb page that is part of multiple zones is bad. Worse, HVO might then end up stealthily re-assigning pages to a different zone when a hugetlb page is freed, since the tail page structures beyond the first vmemmap page would inherit the zone of the first page structures. While the chance of this happening is low, you can definitely create a configuration where this happens (especially using ZONE_MOVABLE). To avoid this issue, check if bootmem hugetlb pages intersect with multiple zones during the gather phase, and discard them, handing them to the page allocator, if they do. Record the number of invalid bootmem pages per node and subtract them from the number of available pages at the end, making it easier to do these checks in multiple places later on. Link: https://lkml.kernel.org/r/20250228182928.2645936-14-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++-- mm/internal.h | 2 ++ mm/mm_init.c | 25 +++++++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 00facd2123e5..e4bf06f13178 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -62,6 +62,7 @@ static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; static unsigned long hugetlb_cma_size __initdata; __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; +static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; /* * Due to ordering constraints across the init code for various @@ -3316,6 +3317,44 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, } } +static bool __init hugetlb_bootmem_page_zones_valid(int nid, + struct huge_bootmem_page *m) +{ + unsigned long start_pfn; + bool valid; + + start_pfn = virt_to_phys(m) >> PAGE_SHIFT; + + valid = !pfn_range_intersects_zones(nid, start_pfn, + pages_per_huge_page(m->hstate)); + if (!valid) + hstate_boot_nrinvalid[hstate_index(m->hstate)]++; + + return valid; +} + +/* + * Free a bootmem page that was found to be invalid (intersecting with + * multiple zones). + * + * Since it intersects with multiple zones, we can't just do a free + * operation on all pages at once, but instead have to walk all + * pages, freeing them one by one. + */ +static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, + struct hstate *h) +{ + unsigned long npages = pages_per_huge_page(h); + unsigned long pfn; + + while (npages--) { + pfn = page_to_pfn(page); + __init_reserved_page_zone(pfn, nid); + free_reserved_page(page); + page++; + } +} + /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. @@ -3323,14 +3362,25 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, static void __init gather_bootmem_prealloc_node(unsigned long nid) { LIST_HEAD(folio_list); - struct huge_bootmem_page *m; + struct huge_bootmem_page *m, *tm; struct hstate *h = NULL, *prev_h = NULL; - list_for_each_entry(m, &huge_boot_pages[nid], list) { + list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { struct page *page = virt_to_page(m); struct folio *folio = (void *)page; h = m->hstate; + if (!hugetlb_bootmem_page_zones_valid(nid, m)) { + /* + * Can't use this page. Initialize the + * page structures if that hasn't already + * been done, and give them to the page + * allocator. + */ + hugetlb_bootmem_free_invalid_page(nid, page, h); + continue; + } + /* * It is possible to have multiple huge page sizes (hstates) * in this list. If so, process each size separately. @@ -3602,13 +3652,20 @@ static void __init hugetlb_init_hstates(void) static void __init report_hugepages(void) { struct hstate *h; + unsigned long nrinvalid; for_each_hstate(h) { char buf[32]; + nrinvalid = hstate_boot_nrinvalid[hstate_index(h)]; + h->max_huge_pages -= nrinvalid; + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); + if (nrinvalid) + pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n", + buf, nrinvalid, nrinvalid > 1 ? "s" : ""); pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); } diff --git a/mm/internal.h b/mm/internal.h index 780c17b4003a..8233c207d3f3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -658,6 +658,8 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, } void set_zone_contiguous(struct zone *zone); +bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, + unsigned long nr_pages); static inline void clear_zone_contiguous(struct zone *zone) { diff --git a/mm/mm_init.c b/mm/mm_init.c index 419b7db220d2..3eec528afe43 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2287,6 +2287,31 @@ void set_zone_contiguous(struct zone *zone) zone->contiguous = true; } +/* + * Check if a PFN range intersects multiple zones on one or more + * NUMA nodes. Specify the @nid argument if it is known that this + * PFN range is on one node, NUMA_NO_NODE otherwise. + */ +bool pfn_range_intersects_zones(int nid, unsigned long start_pfn, + unsigned long nr_pages) +{ + struct zone *zone, *izone = NULL; + + for_each_zone(zone) { + if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid) + continue; + + if (zone_intersects(zone, start_pfn, nr_pages)) { + if (izone != NULL) + return true; + izone = zone; + } + + } + + return false; +} + static void __init mem_init_print_info(void); void __init page_alloc_init_late(void) { From 9eb6207b7812cee13431831c9661d21b53f3e93f Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:15 +0000 Subject: [PATCH 147/431] mm/sparse: add vmemmap_*_hvo functions Add a few functions to enable early HVO: vmemmap_populate_hvo vmemmap_undo_hvo vmemmap_wrprotect_hvo The populate and undo functions are expected to be used in early init, from the sparse_init_nid_early() function. The wrprotect function is to be used, potentially, later. To implement these functions, mostly re-use the existing compound pages vmemmap logic used by DAX. vmemmap_populate_address has its argument changed a bit in this commit: the page structure passed in to be reused in the mapping is replaced by a PFN and a flag. The flag indicates whether an extra ref should be taken on the vmemmap page containing the head page structure. Taking the ref is appropriate to for DAX / ZONE_DEVICE, but not for HugeTLB HVO. The HugeTLB vmemmap optimization maps tail page structure pages read-only. The vmemmap_wrprotect_hvo function that does this is implemented separately, because it cannot be guaranteed that reserved page structures will not be write accessed during memory initialization. Even with CONFIG_DEFERRED_STRUCT_PAGE_INIT, they might still be written to (if they are at the bottom of a zone). So, vmemmap_populate_hvo leaves the tail page structure pages RW initially, and then later during initialization, after memmap init is fully done, vmemmap_wrprotect_hvo must be called to finish the job. Subsequent commits will use these functions for early HugeTLB HVO. Link: https://lkml.kernel.org/r/20250228182928.2645936-15-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/mm.h | 9 ++- mm/sparse-vmemmap.c | 141 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 135 insertions(+), 15 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 03e4807bd911..9a74a3ee68bc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3937,7 +3937,8 @@ p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, - struct vmem_altmap *altmap, struct page *reuse); + struct vmem_altmap *altmap, unsigned long ptpfn, + unsigned long flags); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node, @@ -3953,6 +3954,12 @@ int vmemmap_populate_hugepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); +int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); +int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); +void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node, + unsigned long headsize); void vmemmap_populate_print_last(void); #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 8751c46c35e4..8cc848c4b17c 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -30,6 +30,13 @@ #include #include +#include + +/* + * Flags for vmemmap_populate_range and friends. + */ +/* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */ +#define VMEMMAP_POPULATE_PAGEREF 0x0001 #include "internal.h" @@ -144,17 +151,18 @@ void __meminit vmemmap_verify(pte_t *pte, int node, pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, unsigned long flags) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(ptep_get(pte))) { pte_t entry; void *p; - if (!reuse) { + if (ptpfn == (unsigned long)-1) { p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); if (!p) return NULL; + ptpfn = PHYS_PFN(__pa(p)); } else { /* * When a PTE/PMD entry is freed from the init_mm @@ -165,10 +173,10 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, * and through vmemmap_populate_compound_pages() when * slab is available. */ - get_page(reuse); - p = page_to_virt(reuse); + if (flags & VMEMMAP_POPULATE_PAGEREF) + get_page(pfn_to_page(ptpfn)); } - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); + entry = pfn_pte(ptpfn, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; @@ -238,7 +246,8 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, + unsigned long flags) { pgd_t *pgd; p4d_t *p4d; @@ -258,7 +267,7 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return NULL; - pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); + pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags); if (!pte) return NULL; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); @@ -269,13 +278,15 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, static int __meminit vmemmap_populate_range(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap, - struct page *reuse) + unsigned long ptpfn, + unsigned long flags) { unsigned long addr = start; pte_t *pte; for (; addr < end; addr += PAGE_SIZE) { - pte = vmemmap_populate_address(addr, node, altmap, reuse); + pte = vmemmap_populate_address(addr, node, altmap, + ptpfn, flags); if (!pte) return -ENOMEM; } @@ -286,7 +297,107 @@ static int __meminit vmemmap_populate_range(unsigned long start, int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_range(start, end, node, altmap, NULL); + return vmemmap_populate_range(start, end, node, altmap, -1, 0); +} + +/* + * Undo populate_hvo, and replace it with a normal base page mapping. + * Used in memory init in case a HVO mapping needs to be undone. + * + * This can happen when it is discovered that a memblock allocated + * hugetlb page spans multiple zones, which can only be verified + * after zones have been initialized. + * + * We know that: + * 1) The first @headsize / PAGE_SIZE vmemmap pages were individually + * allocated through memblock, and mapped. + * + * 2) The rest of the vmemmap pages are mirrors of the last head page. + */ +int __meminit vmemmap_undo_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + unsigned long maddr, pfn; + pte_t *pte; + int headpages; + + /* + * Should only be called early in boot, so nothing will + * be accessing these page structures. + */ + WARN_ON(!early_boot_irqs_disabled); + + headpages = headsize >> PAGE_SHIFT; + + /* + * Clear mirrored mappings for tail page structs. + */ + for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + pte_clear(&init_mm, maddr, pte); + } + + /* + * Clear and free mappings for head page and first tail page + * structs. + */ + for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + pfn = pte_pfn(ptep_get(pte)); + pte_clear(&init_mm, maddr, pte); + memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE); + } + + flush_tlb_kernel_range(addr, end); + + return vmemmap_populate(addr, end, node, NULL); +} + +/* + * Write protect the mirrored tail page structs for HVO. This will be + * called from the hugetlb code when gathering and initializing the + * memblock allocated gigantic pages. The write protect can't be + * done earlier, since it can't be guaranteed that the reserved + * page structures will not be written to during initialization, + * even if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. + * + * The PTEs are known to exist, and nothing else should be touching + * these pages. The caller is responsible for any TLB flushing. + */ +void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + unsigned long maddr; + pte_t *pte; + + for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) { + pte = virt_to_kpte(maddr); + ptep_set_wrprotect(&init_mm, maddr, pte); + } +} + +/* + * Populate vmemmap pages HVO-style. The first page contains the head + * page and needed tail pages, the other ones are mirrors of the first + * page. + */ +int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end, + int node, unsigned long headsize) +{ + pte_t *pte; + unsigned long maddr; + + for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) { + pte = vmemmap_populate_address(maddr, node, NULL, -1, 0); + if (!pte) + return -ENOMEM; + } + + /* + * Reuse the last page struct page mapped above for the rest. + */ + return vmemmap_populate_range(maddr, end, node, NULL, + pte_pfn(ptep_get(pte)), 0); } void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, @@ -409,7 +520,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, * with just tail struct pages. */ return vmemmap_populate_range(start, end, node, NULL, - pte_page(ptep_get(pte))); + pte_pfn(ptep_get(pte)), + VMEMMAP_POPULATE_PAGEREF); } size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); @@ -417,13 +529,13 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, unsigned long next, last = addr + size; /* Populate the head page vmemmap page */ - pte = vmemmap_populate_address(addr, node, NULL, NULL); + pte = vmemmap_populate_address(addr, node, NULL, -1, 0); if (!pte) return -ENOMEM; /* Populate the tail pages vmemmap page */ next = addr + PAGE_SIZE; - pte = vmemmap_populate_address(next, node, NULL, NULL); + pte = vmemmap_populate_address(next, node, NULL, -1, 0); if (!pte) return -ENOMEM; @@ -433,7 +545,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, */ next += PAGE_SIZE; rc = vmemmap_populate_range(next, last, node, NULL, - pte_page(ptep_get(pte))); + pte_pfn(ptep_get(pte)), + VMEMMAP_POPULATE_PAGEREF); if (rc) return -ENOMEM; } From d58b2498200724e4f8c12d71a5953da03c8c8bdf Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:16 +0000 Subject: [PATCH 148/431] mm/hugetlb: deal with multiple calls to hugetlb_bootmem_alloc Architectures that want pre-HVO of hugetlb vmemmap pages will need to call hugetlb_bootmem_alloc from an earlier spot in boot (before sparse_init). To facilitate some architectures doing this, protect hugetlb_bootmem_alloc against multiple calls. Also provide a helper function to check if it's been called, so that the early HVO code, to be added later, can see if there is anything to do. Link: https://lkml.kernel.org/r/20250228182928.2645936-16-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 6 ++++++ mm/hugetlb.c | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a596aaa178d1..f0ab4ca4ecf2 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -175,6 +175,7 @@ extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; void hugetlb_bootmem_alloc(void); +bool hugetlb_bootmem_allocated(void); /* arch callbacks */ @@ -1263,6 +1264,11 @@ static inline bool hugetlbfs_pagecache_present( static inline void hugetlb_bootmem_alloc(void) { } + +static inline bool hugetlb_bootmem_allocated(void) +{ + return false; +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e4bf06f13178..826af96455aa 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4918,16 +4918,28 @@ static int __init default_hugepagesz_setup(char *s) } hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup); +static bool __hugetlb_bootmem_allocated __initdata; + +bool __init hugetlb_bootmem_allocated(void) +{ + return __hugetlb_bootmem_allocated; +} + void __init hugetlb_bootmem_alloc(void) { struct hstate *h; + if (__hugetlb_bootmem_allocated) + return; + hugetlb_parse_params(); for_each_hstate(h) { if (hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } + + __hugetlb_bootmem_allocated = true; } static unsigned int allowed_mems_nr(struct hstate *h) From 91ec71872a6d0c41abd7c53412f68111ffa060cd Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:17 +0000 Subject: [PATCH 149/431] mm/hugetlb: move huge_boot_pages list init to hugetlb_bootmem_alloc Instead of initializing the per-node hugetlb bootmem pages list from the alloc function, we can now do it in a somewhat cleaner way, since there is an explicit hugetlb_bootmem_alloc function. Initialize the lists there. Link: https://lkml.kernel.org/r/20250228182928.2645936-17-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 826af96455aa..f9287d87b8b7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3586,7 +3586,6 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long allocated; - static bool initialized __initdata; /* skip gigantic hugepages allocation if hugetlb_cma enabled */ if (hstate_is_gigantic(h) && hugetlb_cma_size) { @@ -3594,17 +3593,6 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) return; } - /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */ - if (!initialized) { - int i = 0; - - for (i = 0; i < MAX_NUMNODES; i++) - INIT_LIST_HEAD(&huge_boot_pages[i]); - h->next_nid_to_alloc = first_online_node; - h->next_nid_to_free = first_online_node; - initialized = true; - } - /* do node specific alloc */ if (hugetlb_hstate_alloc_pages_specific_nodes(h)) return; @@ -4928,13 +4916,20 @@ bool __init hugetlb_bootmem_allocated(void) void __init hugetlb_bootmem_alloc(void) { struct hstate *h; + int i; if (__hugetlb_bootmem_allocated) return; + for (i = 0; i < MAX_NUMNODES; i++) + INIT_LIST_HEAD(&huge_boot_pages[i]); + hugetlb_parse_params(); for_each_hstate(h) { + h->next_nid_to_alloc = first_online_node; + h->next_nid_to_free = first_online_node; + if (hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } From 752fe17af693323dba5622b19c858a46fed219a1 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:18 +0000 Subject: [PATCH 150/431] mm/hugetlb: add pre-HVO framework Define flags for pre-HVOed bootmem hugetlb pages, and act on them. The most important flag is the HVO flag, signalling that a bootmem allocated gigantic page has already been HVO-ed. If this flag is seen by the hugetlb bootmem gather code, the page is marked as HVO optimized. The HVO code will then not try to optimize it again. Instead, it will just map the tail page mirror pages read-only, completing the HVO steps. No functional change, as nothing sets the flags yet. Link: https://lkml.kernel.org/r/20250228182928.2645936-18-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/powerpc/mm/hugetlbpage.c | 1 + include/linux/hugetlb.h | 4 +++ mm/hugetlb.c | 24 ++++++++++++++++- mm/hugetlb_vmemmap.c | 50 +++++++++++++++++++++++++++++++++-- mm/hugetlb_vmemmap.h | 7 +++++ 5 files changed, 83 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6b043180220a..d3c1b749dcfc 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -113,6 +113,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) gpage_freearray[nr_gpages] = 0; list_add(&m->list, &huge_boot_pages[0]); m->hstate = hstate; + m->flags = 0; return 1; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f0ab4ca4ecf2..bbccc3e6b9dd 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -681,8 +681,12 @@ struct hstate { struct huge_bootmem_page { struct list_head list; struct hstate *hstate; + unsigned long flags; }; +#define HUGE_BOOTMEM_HVO 0x0001 +#define HUGE_BOOTMEM_ZONES_VALID 0x0002 + int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f9287d87b8b7..db0d35bc9b9b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3227,6 +3227,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages[node]); m->hstate = h; + m->flags = 0; return 1; } @@ -3294,7 +3295,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, struct folio *folio, *tmp_f; /* Send list for bulk vmemmap optimization processing */ - hugetlb_vmemmap_optimize_folios(h, folio_list); + hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { if (!folio_test_hugetlb_vmemmap_optimized(folio)) { @@ -3323,6 +3324,13 @@ static bool __init hugetlb_bootmem_page_zones_valid(int nid, unsigned long start_pfn; bool valid; + if (m->flags & HUGE_BOOTMEM_ZONES_VALID) { + /* + * Already validated, skip check. + */ + return true; + } + start_pfn = virt_to_phys(m) >> PAGE_SHIFT; valid = !pfn_range_intersects_zones(nid, start_pfn, @@ -3355,6 +3363,11 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, } } +static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) +{ + return (m->flags & HUGE_BOOTMEM_HVO); +} + /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. @@ -3395,6 +3408,15 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid) hugetlb_folio_init_vmemmap(folio, h, HUGETLB_VMEMMAP_RESERVE_PAGES); init_new_hugetlb_folio(h, folio); + + if (hugetlb_bootmem_page_prehvo(m)) + /* + * If pre-HVO was done, just set the + * flag, the HVO code will then skip + * this folio. + */ + folio_set_hugetlb_vmemmap_optimized(folio); + list_add(&folio->lru, &folio_list); /* diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 5b484758f813..be6b33ecbc8e 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -649,14 +649,39 @@ static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *fol return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse); } -void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, + struct list_head *folio_list, + bool boot) { struct folio *folio; + int nr_to_optimize; LIST_HEAD(vmemmap_pages); unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU; + nr_to_optimize = 0; list_for_each_entry(folio, folio_list, lru) { - int ret = hugetlb_vmemmap_split_folio(h, folio); + int ret; + unsigned long spfn, epfn; + + if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) { + /* + * Already optimized by pre-HVO, just map the + * mirrored tail page structs RO. + */ + spfn = (unsigned long)&folio->page; + epfn = spfn + pages_per_huge_page(h); + vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio), + HUGETLB_VMEMMAP_RESERVE_SIZE); + register_page_bootmem_memmap(pfn_to_section_nr(spfn), + &folio->page, + HUGETLB_VMEMMAP_RESERVE_SIZE); + static_branch_inc(&hugetlb_optimize_vmemmap_key); + continue; + } + + nr_to_optimize++; + + ret = hugetlb_vmemmap_split_folio(h, folio); /* * Spliting the PMD requires allocating a page, thus lets fail @@ -668,6 +693,16 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l break; } + if (!nr_to_optimize) + /* + * All pre-HVO folios, nothing left to do. It's ok if + * there is a mix of pre-HVO and not yet HVO-ed folios + * here, as __hugetlb_vmemmap_optimize_folio() will + * skip any folios that already have the optimized flag + * set, see vmemmap_should_optimize_folio(). + */ + goto out; + flush_tlb_all(); list_for_each_entry(folio, folio_list, lru) { @@ -693,10 +728,21 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l } } +out: flush_tlb_all(); free_vmemmap_page_list(&vmemmap_pages); } +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +{ + __hugetlb_vmemmap_optimize_folios(h, folio_list, false); +} + +void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list) +{ + __hugetlb_vmemmap_optimize_folios(h, folio_list, true); +} + static const struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 2fcae92d3359..71110a90275f 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -24,6 +24,8 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *non_hvo_folios); void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); +void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list); + static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) { @@ -64,6 +66,11 @@ static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list { } +static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, + struct list_head *folio_list) +{ +} + static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { return 0; From eefd3d024a53c5d45e7e70a8677257b035e4de52 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:19 +0000 Subject: [PATCH 151/431] mm/hugetlb_vmemmap: fix hugetlb_vmemmap_restore_folios definition Make the hugetlb_vmemmap_restore_folios definition inline for the !CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP case, so that including this file in files other than hugetlb_vmemmap.c will work. Link: https://lkml.kernel.org/r/20250228182928.2645936-19-fvdl@google.com Fixes: cfb8c75099db ("hugetlb: perform vmemmap restoration on a list of pages") Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/hugetlb_vmemmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 71110a90275f..62d3d645a793 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -50,7 +50,7 @@ static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct f return 0; } -static long hugetlb_vmemmap_restore_folios(const struct hstate *h, +static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *folio_list, struct list_head *non_hvo_folios) { From b1222550fbf73840e31a103305d03ad53b8f5a59 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:20 +0000 Subject: [PATCH 152/431] mm/hugetlb: do pre-HVO for bootmem allocated pages For large systems, the overhead of vmemmap pages for hugetlb is substantial. It's about 1.5% of memory, which is about 45G for a 3T system. If you want to configure most of that system for hugetlb (e.g. to use as backing memory for VMs), there is a chance of running out of memory on boot, even though you know that the 45G will become available later. To avoid this scenario, and since it's a waste to first allocate and then free that 45G during boot, do pre-HVO for hugetlb bootmem allocated pages ('gigantic' pages). pre-HVO is done by adding functions that are called from sparse_init_nid_early and sparse_init_nid_late. The first is called before memmap allocation, so it takes care of allocating memmap HVO-style. The second verifies that all bootmem pages look good, specifically it checks that they do not intersect with multiple zones. This can only be done from sparse_init_nid_late path, when zones have been initialized. The hugetlb page size must be aligned to the section size, and aligned to the size of memory described by the number of page structures contained in one PMD (since pre-HVO is not prepared to split PMDs). This should be true for most 'gigantic' pages, it is for 1G pages on x86, where both of these alignment requirements are 128M. This will only have an effect if hugetlb_bootmem_alloc was called early in boot. If not, it won't do anything, and HVO for bootmem hugetlb pages works as before. Link: https://lkml.kernel.org/r/20250228182928.2645936-20-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 2 + mm/hugetlb.c | 17 ++++- mm/hugetlb_vmemmap.c | 143 ++++++++++++++++++++++++++++++++++++++++ mm/hugetlb_vmemmap.h | 14 ++++ mm/sparse-vmemmap.c | 4 ++ 5 files changed, 177 insertions(+), 3 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index bbccc3e6b9dd..f6b82b0524ed 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -687,6 +687,8 @@ struct huge_bootmem_page { #define HUGE_BOOTMEM_HVO 0x0001 #define HUGE_BOOTMEM_ZONES_VALID 0x0002 +bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); + int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index db0d35bc9b9b..d1134e915927 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3223,7 +3223,18 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) */ memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), huge_page_size(h) - PAGE_SIZE); - /* Put them into a private list first because mem_map is not up yet */ + + /* + * Put them into a private list first because mem_map is not up yet. + * + * For pre-HVO to work correctly, pages need to be on the list for + * the node they were actually allocated from. That node may be + * different in the case of fallback by memblock_alloc_try_nid_raw. + * So, extract the actual node first. + */ + if (nid == NUMA_NO_NODE) + node = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); + INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages[node]); m->hstate = h; @@ -3318,8 +3329,8 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h, } } -static bool __init hugetlb_bootmem_page_zones_valid(int nid, - struct huge_bootmem_page *m) +bool __init hugetlb_bootmem_page_zones_valid(int nid, + struct huge_bootmem_page *m) { unsigned long start_pfn; bool valid; diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index be6b33ecbc8e..9a99dfa3c495 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -743,6 +743,149 @@ void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head __hugetlb_vmemmap_optimize_folios(h, folio_list, true); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT + +/* Return true of a bootmem allocated HugeTLB page should be pre-HVO-ed */ +static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m) +{ + unsigned long section_size, psize, pmd_vmemmap_size; + phys_addr_t paddr; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return false; + + if (!hugetlb_vmemmap_optimizable(m->hstate)) + return false; + + psize = huge_page_size(m->hstate); + paddr = virt_to_phys(m); + + /* + * Pre-HVO only works if the bootmem huge page + * is aligned to the section size. + */ + section_size = (1UL << PA_SECTION_SHIFT); + if (!IS_ALIGNED(paddr, section_size) || + !IS_ALIGNED(psize, section_size)) + return false; + + /* + * The pre-HVO code does not deal with splitting PMDS, + * so the bootmem page must be aligned to the number + * of base pages that can be mapped with one vmemmap PMD. + */ + pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT; + if (!IS_ALIGNED(paddr, pmd_vmemmap_size) || + !IS_ALIGNED(psize, pmd_vmemmap_size)) + return false; + + return true; +} + +/* + * Initialize memmap section for a gigantic page, HVO-style. + */ +void __init hugetlb_vmemmap_init_early(int nid) +{ + unsigned long psize, paddr, section_size; + unsigned long ns, i, pnum, pfn, nr_pages; + unsigned long start, end; + struct huge_bootmem_page *m = NULL; + void *map; + + /* + * Noting to do if bootmem pages were not allocated + * early in boot, or if HVO wasn't enabled in the + * first place. + */ + if (!hugetlb_bootmem_allocated()) + return; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return; + + section_size = (1UL << PA_SECTION_SHIFT); + + list_for_each_entry(m, &huge_boot_pages[nid], list) { + if (!vmemmap_should_optimize_bootmem_page(m)) + continue; + + nr_pages = pages_per_huge_page(m->hstate); + psize = nr_pages << PAGE_SHIFT; + paddr = virt_to_phys(m); + pfn = PHYS_PFN(paddr); + map = pfn_to_page(pfn); + start = (unsigned long)map; + end = start + nr_pages * sizeof(struct page); + + if (vmemmap_populate_hvo(start, end, nid, + HUGETLB_VMEMMAP_RESERVE_SIZE) < 0) + continue; + + memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE); + + pnum = pfn_to_section_nr(pfn); + ns = psize / section_size; + + for (i = 0; i < ns; i++) { + sparse_init_early_section(nid, map, pnum, + SECTION_IS_VMEMMAP_PREINIT); + map += section_map_size(); + pnum++; + } + + m->flags |= HUGE_BOOTMEM_HVO; + } +} + +void __init hugetlb_vmemmap_init_late(int nid) +{ + struct huge_bootmem_page *m, *tm; + unsigned long phys, nr_pages, start, end; + unsigned long pfn, nr_mmap; + struct hstate *h; + void *map; + + if (!hugetlb_bootmem_allocated()) + return; + + if (!READ_ONCE(vmemmap_optimize_enabled)) + return; + + list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { + if (!(m->flags & HUGE_BOOTMEM_HVO)) + continue; + + phys = virt_to_phys(m); + h = m->hstate; + pfn = PHYS_PFN(phys); + nr_pages = pages_per_huge_page(h); + + if (!hugetlb_bootmem_page_zones_valid(nid, m)) { + /* + * Oops, the hugetlb page spans multiple zones. + * Remove it from the list, and undo HVO. + */ + list_del(&m->list); + + map = pfn_to_page(pfn); + + start = (unsigned long)map; + end = start + nr_pages * sizeof(struct page); + + vmemmap_undo_hvo(start, end, nid, + HUGETLB_VMEMMAP_RESERVE_SIZE); + nr_mmap = end - start - HUGETLB_VMEMMAP_RESERVE_SIZE; + memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE)); + + memblock_phys_free(phys, huge_page_size(h)); + continue; + } else + m->flags |= HUGE_BOOTMEM_ZONES_VALID; + } +} +#endif + static const struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 62d3d645a793..18b490825215 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -9,6 +9,8 @@ #ifndef _LINUX_HUGETLB_VMEMMAP_H #define _LINUX_HUGETLB_VMEMMAP_H #include +#include +#include /* * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See @@ -25,6 +27,10 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h, void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list); +#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT +void hugetlb_vmemmap_init_early(int nid); +void hugetlb_vmemmap_init_late(int nid); +#endif static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) @@ -71,6 +77,14 @@ static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, { } +static inline void hugetlb_vmemmap_init_early(int nid) +{ +} + +static inline void hugetlb_vmemmap_init_late(int nid) +{ +} + static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { return 0; diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 8cc848c4b17c..fd2ab5118e13 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -32,6 +32,8 @@ #include #include +#include "hugetlb_vmemmap.h" + /* * Flags for vmemmap_populate_range and friends. */ @@ -594,6 +596,7 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn, */ void __init sparse_vmemmap_init_nid_early(int nid) { + hugetlb_vmemmap_init_early(nid); } /* @@ -604,5 +607,6 @@ void __init sparse_vmemmap_init_nid_early(int nid) */ void __init sparse_vmemmap_init_nid_late(int nid) { + hugetlb_vmemmap_init_late(nid); } #endif From 665eaf313314432b5bbb5f71ec71e275ff2358e0 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:21 +0000 Subject: [PATCH 153/431] x86/setup: call hugetlb_bootmem_alloc early Call hugetlb_bootmem_allloc in an earlier spot in setup, after hugelb_cma_reserve. This will make vmemmap preinit of the sections covered by the allocated hugetlb pages possible. Link: https://lkml.kernel.org/r/20250228182928.2645936-21-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Alexander Gordeev Cc: Arnd Bergmann Cc: Dan Carpenter Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/x86/kernel/setup.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cebee310e200..ff8604007b08 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1108,8 +1108,10 @@ void __init setup_arch(char **cmdline_p) initmem_init(); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); - if (boot_cpu_has(X86_FEATURE_GBPAGES)) + if (boot_cpu_has(X86_FEATURE_GBPAGES)) { hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); + hugetlb_bootmem_alloc(); + } /* * Reserve memory for crash kernel after SRAT is parsed so that it From 08efe293503098b539cb18f55528176b6b559348 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:22 +0000 Subject: [PATCH 154/431] x86/mm: set ARCH_WANT_HUGETLB_VMEMMAP_PREINIT Now that hugetlb bootmem pages are allocated earlier, and available for section preinit (HVO-style), set ARCH_WANT_HUGETLB_VMEMMAP_PREINIT for x86_64, so that is can be done. This enables pre-HVO on x86_64. Link: https://lkml.kernel.org/r/20250228182928.2645936-22-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Johannes Weiner Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0e27ebd7e36a..cf49c130d1d0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -146,6 +146,7 @@ config X86 select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 + select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64 select ARCH_WANTS_THP_SWAP if X86_64 select ARCH_HAS_PARANOID_L1D_FLUSH select BUILDTIME_TABLE_SORT From b51d3db91d4d358f9787e3e75e0c79eb858e8197 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:23 +0000 Subject: [PATCH 155/431] mm/cma: simplify zone intersection check cma_activate_area walks all pages in the area, checking their zone individually to see if the area resides in more than one zone. Make this a little more efficient by using the recently introduced pfn_range_intersects_zones() function. Store the NUMA node id (if any) in the cma structure to facilitate this. Link: https://lkml.kernel.org/r/20250228182928.2645936-23-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/cma.c | 13 ++++++------- mm/cma.h | 2 ++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 8dc46bfa3819..61ad4fd2f62d 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -103,7 +103,6 @@ static void __init cma_activate_area(struct cma *cma) { unsigned long pfn, base_pfn; int allocrange, r; - struct zone *zone; struct cma_memrange *cmr; for (allocrange = 0; allocrange < cma->nranges; allocrange++) { @@ -124,12 +123,8 @@ static void __init cma_activate_area(struct cma *cma) * CMA resv range to be in the same zone. */ WARN_ON_ONCE(!pfn_valid(base_pfn)); - zone = page_zone(pfn_to_page(base_pfn)); - for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) { - WARN_ON_ONCE(!pfn_valid(pfn)); - if (page_zone(pfn_to_page(pfn)) != zone) - goto cleanup; - } + if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) + goto cleanup; for (pfn = base_pfn; pfn < base_pfn + cmr->count; pfn += pageblock_nr_pages) @@ -261,6 +256,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, cma->ranges[0].base_pfn = PFN_DOWN(base); cma->ranges[0].count = cma->count; cma->nranges = 1; + cma->nid = NUMA_NO_NODE; *res_cma = cma; @@ -497,6 +493,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size, } cma->nranges = nr; + cma->nid = nid; *res_cma = cma; out: @@ -684,6 +681,8 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base, if (ret) memblock_phys_free(base, size); + (*res_cma)->nid = nid; + return ret; } diff --git a/mm/cma.h b/mm/cma.h index 5f39dd1aac91..ff79dba5508c 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -50,6 +50,8 @@ struct cma { struct cma_kobject *cma_kobj; #endif bool reserve_pages_on_error; + /* NUMA node (NUMA_NO_NODE if unspecified) */ + int nid; }; extern struct cma cma_areas[MAX_CMA_AREAS]; From 9320fa2717810a7d3451dd1a18c31f986a1d4068 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:24 +0000 Subject: [PATCH 156/431] mm/cma: introduce a cma validate function Define a function to check if a CMA area is valid, which means: do its ranges not cross any zone boundaries. Store the result in the newly created flags for each CMA area, so that multiple calls are dealt with. This allows for checking the validity of a CMA area early, which is needed later in order to be able to allocate hugetlb bootmem pages from it with pre-HVO. Link: https://lkml.kernel.org/r/20250228182928.2645936-24-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/cma.h | 5 ++++ mm/cma.c | 60 ++++++++++++++++++++++++++++++++++++--------- mm/cma.h | 8 +++++- 3 files changed, 60 insertions(+), 13 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 03d85c100dcc..62d9c1cf6326 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -60,6 +60,7 @@ extern void cma_reserve_pages_on_error(struct cma *cma); #ifdef CONFIG_CMA struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); +bool cma_validate_zones(struct cma *cma); #else static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) { @@ -70,6 +71,10 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio) { return false; } +static inline bool cma_validate_zones(struct cma *cma) +{ + return false; +} #endif #endif diff --git a/mm/cma.c b/mm/cma.c index 61ad4fd2f62d..5e1d169e24fa 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -99,6 +99,49 @@ static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr, spin_unlock_irqrestore(&cma->lock, flags); } +/* + * Check if a CMA area contains no ranges that intersect with + * multiple zones. Store the result in the flags in case + * this gets called more than once. + */ +bool cma_validate_zones(struct cma *cma) +{ + int r; + unsigned long base_pfn; + struct cma_memrange *cmr; + bool valid_bit_set; + + /* + * If already validated, return result of previous check. + * Either the valid or invalid bit will be set if this + * check has already been done. If neither is set, the + * check has not been performed yet. + */ + valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags); + if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags)) + return valid_bit_set; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + base_pfn = cmr->base_pfn; + + /* + * alloc_contig_range() requires the pfn range specified + * to be in the same zone. Simplify by forcing the entire + * CMA resv range to be in the same zone. + */ + WARN_ON_ONCE(!pfn_valid(base_pfn)); + if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) { + set_bit(CMA_ZONES_INVALID, &cma->flags); + return false; + } + } + + set_bit(CMA_ZONES_VALID, &cma->flags); + + return true; +} + static void __init cma_activate_area(struct cma *cma) { unsigned long pfn, base_pfn; @@ -113,19 +156,12 @@ static void __init cma_activate_area(struct cma *cma) goto cleanup; } + if (!cma_validate_zones(cma)) + goto cleanup; + for (r = 0; r < cma->nranges; r++) { cmr = &cma->ranges[r]; base_pfn = cmr->base_pfn; - - /* - * alloc_contig_range() requires the pfn range specified - * to be in the same zone. Simplify by forcing the entire - * CMA resv range to be in the same zone. - */ - WARN_ON_ONCE(!pfn_valid(base_pfn)); - if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) - goto cleanup; - for (pfn = base_pfn; pfn < base_pfn + cmr->count; pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); @@ -145,7 +181,7 @@ static void __init cma_activate_area(struct cma *cma) bitmap_free(cma->ranges[r].bitmap); /* Expose all pages to the buddy, they are useless for CMA. */ - if (!cma->reserve_pages_on_error) { + if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { for (r = 0; r < allocrange; r++) { cmr = &cma->ranges[r]; for (pfn = cmr->base_pfn; @@ -172,7 +208,7 @@ core_initcall(cma_init_reserved_areas); void __init cma_reserve_pages_on_error(struct cma *cma) { - cma->reserve_pages_on_error = true; + set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags); } static int __init cma_new_area(const char *name, phys_addr_t size, diff --git a/mm/cma.h b/mm/cma.h index ff79dba5508c..bddc84b3cd96 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -49,11 +49,17 @@ struct cma { /* kobject requires dynamic object */ struct cma_kobject *cma_kobj; #endif - bool reserve_pages_on_error; + unsigned long flags; /* NUMA node (NUMA_NO_NODE if unspecified) */ int nid; }; +enum cma_flags { + CMA_RESERVE_PAGES_ON_ERROR, + CMA_ZONES_VALID, + CMA_ZONES_INVALID, +}; + extern struct cma cma_areas[MAX_CMA_AREAS]; extern unsigned int cma_area_count; From 85abcd023640067fcbb6e23c45e8a0014dbba11d Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:25 +0000 Subject: [PATCH 157/431] mm/cma: introduce interface for early reservations It can be desirable to reserve memory in a CMA area before it is activated, early in boot. Such reservations would effectively be memblock allocations, but they can be returned to the CMA area later. This functionality can be used to allow hugetlb bootmem allocations from a hugetlb CMA area. A new interface, cma_reserve_early is introduced. This allows for pageblock-aligned reservations. These reservations are skipped during the initial handoff of pages in a CMA area to the buddy allocator. The caller is responsible for making sure that the page structures are set up, and that the migrate type is set correctly, as with other memblock allocations that stick around. If the CMA area fails to activate (because it intersects with multiple zones), the reserved memory is not given to the buddy allocator, the caller needs to take care of that. Link: https://lkml.kernel.org/r/20250228182928.2645936-25-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/cma.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++----- mm/cma.h | 8 +++++ mm/internal.h | 16 ++++++++++ mm/mm_init.c | 9 ++++++ 4 files changed, 109 insertions(+), 7 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 5e1d169e24fa..09322b8284bd 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -144,9 +144,10 @@ bool cma_validate_zones(struct cma *cma) static void __init cma_activate_area(struct cma *cma) { - unsigned long pfn, base_pfn; + unsigned long pfn, end_pfn; int allocrange, r; struct cma_memrange *cmr; + unsigned long bitmap_count, count; for (allocrange = 0; allocrange < cma->nranges; allocrange++) { cmr = &cma->ranges[allocrange]; @@ -161,8 +162,13 @@ static void __init cma_activate_area(struct cma *cma) for (r = 0; r < cma->nranges; r++) { cmr = &cma->ranges[r]; - base_pfn = cmr->base_pfn; - for (pfn = base_pfn; pfn < base_pfn + cmr->count; + if (cmr->early_pfn != cmr->base_pfn) { + count = cmr->early_pfn - cmr->base_pfn; + bitmap_count = cma_bitmap_pages_to_bits(cma, count); + bitmap_set(cmr->bitmap, 0, bitmap_count); + } + + for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count; pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); } @@ -173,6 +179,7 @@ static void __init cma_activate_area(struct cma *cma) INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif + set_bit(CMA_ACTIVATED, &cma->flags); return; @@ -184,9 +191,8 @@ static void __init cma_activate_area(struct cma *cma) if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) { for (r = 0; r < allocrange; r++) { cmr = &cma->ranges[r]; - for (pfn = cmr->base_pfn; - pfn < cmr->base_pfn + cmr->count; - pfn++) + end_pfn = cmr->base_pfn + cmr->count; + for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++) free_reserved_page(pfn_to_page(pfn)); } } @@ -290,6 +296,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, return ret; cma->ranges[0].base_pfn = PFN_DOWN(base); + cma->ranges[0].early_pfn = PFN_DOWN(base); cma->ranges[0].count = cma->count; cma->nranges = 1; cma->nid = NUMA_NO_NODE; @@ -509,6 +516,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size, nr, (u64)mlp->base, (u64)mlp->base + size); cmrp = &cma->ranges[nr++]; cmrp->base_pfn = PHYS_PFN(mlp->base); + cmrp->early_pfn = cmrp->base_pfn; cmrp->count = size >> PAGE_SHIFT; sizeleft -= size; @@ -540,7 +548,6 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size, pr_info("Reserved %lu MiB in %d range%s\n", (unsigned long)total_size / SZ_1M, nr, nr > 1 ? "s" : ""); - return ret; } @@ -1034,3 +1041,65 @@ bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end) return false; } + +/* + * Very basic function to reserve memory from a CMA area that has not + * yet been activated. This is expected to be called early, when the + * system is single-threaded, so there is no locking. The alignment + * checking is restrictive - only pageblock-aligned areas + * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function. + * This keeps things simple, and is enough for the current use case. + * + * The CMA bitmaps have not yet been allocated, so just start + * reserving from the bottom up, using a PFN to keep track + * of what has been reserved. Unreserving is not possible. + * + * The caller is responsible for initializing the page structures + * in the area properly, since this just points to memblock-allocated + * memory. The caller should subsequently use init_cma_pageblock to + * set the migrate type and CMA stats the pageblocks that were reserved. + * + * If the CMA area fails to activate later, memory obtained through + * this interface is not handed to the page allocator, this is + * the responsibility of the caller (e.g. like normal memblock-allocated + * memory). + */ +void __init *cma_reserve_early(struct cma *cma, unsigned long size) +{ + int r; + struct cma_memrange *cmr; + unsigned long available; + void *ret = NULL; + + if (!cma || !cma->count) + return NULL; + /* + * Can only be called early in init. + */ + if (test_bit(CMA_ACTIVATED, &cma->flags)) + return NULL; + + if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES)) + return NULL; + + if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit))) + return NULL; + + size >>= PAGE_SHIFT; + + if (size > cma->available_count) + return NULL; + + for (r = 0; r < cma->nranges; r++) { + cmr = &cma->ranges[r]; + available = cmr->count - (cmr->early_pfn - cmr->base_pfn); + if (size <= available) { + ret = phys_to_virt(PFN_PHYS(cmr->early_pfn)); + cmr->early_pfn += size; + cma->available_count -= size; + return ret; + } + } + + return ret; +} diff --git a/mm/cma.h b/mm/cma.h index bddc84b3cd96..df7fc623b7a6 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,9 +16,16 @@ struct cma_kobject { * and the total amount of memory requested, while smaller than the total * amount of memory available, is large enough that it doesn't fit in a * single physical memory range because of memory holes. + * + * Fields: + * @base_pfn: physical address of range + * @early_pfn: first PFN not reserved through cma_reserve_early + * @count: size of range + * @bitmap: bitmap of allocated (1 << order_per_bit)-sized chunks. */ struct cma_memrange { unsigned long base_pfn; + unsigned long early_pfn; unsigned long count; unsigned long *bitmap; #ifdef CONFIG_CMA_DEBUGFS @@ -58,6 +65,7 @@ enum cma_flags { CMA_RESERVE_PAGES_ON_ERROR, CMA_ZONES_VALID, CMA_ZONES_INVALID, + CMA_ACTIVATED, }; extern struct cma cma_areas[MAX_CMA_AREAS]; diff --git a/mm/internal.h b/mm/internal.h index 8233c207d3f3..31c626130883 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -848,6 +848,22 @@ void init_cma_reserved_pageblock(struct page *page); #endif /* CONFIG_COMPACTION || CONFIG_CMA */ +struct cma; + +#ifdef CONFIG_CMA +void *cma_reserve_early(struct cma *cma, unsigned long size); +void init_cma_pageblock(struct page *page); +#else +static inline void *cma_reserve_early(struct cma *cma, unsigned long size) +{ + return NULL; +} +static inline void init_cma_pageblock(struct page *page) +{ +} +#endif + + int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal); diff --git a/mm/mm_init.c b/mm/mm_init.c index 3eec528afe43..b5047c5ef7d6 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2263,6 +2263,15 @@ void __init init_cma_reserved_pageblock(struct page *page) adjust_managed_page_count(page, pageblock_nr_pages); page_zone(page)->cma_pages += pageblock_nr_pages; } +/* + * Similar to above, but only set the migrate type and stats. + */ +void __init init_cma_pageblock(struct page *page) +{ + set_pageblock_migratetype(page, MIGRATE_CMA); + adjust_managed_page_count(page, pageblock_nr_pages); + page_zone(page)->cma_pages += pageblock_nr_pages; +} #endif void set_zone_contiguous(struct zone *zone) From f866cfcec20cdc16955bca255424be255764d2e7 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:26 +0000 Subject: [PATCH 158/431] mm/hugetlb: add hugetlb_cma_only cmdline option Add an option to force hugetlb gigantic pages to be allocated using CMA only (if hugetlb_cma is enabled). This avoids a fallback to allocation from the rest of system memory if the CMA allocation fails. This makes the size of hugetlb_cma a hard upper boundary for gigantic hugetlb page allocations. This is useful because, with a large CMA area, the kernel's unmovable allocations will have less room to work with and it is undesirable for new hugetlb gigantic page allocations to be done from that remaining area. It will eat in to the space available for unmovable allocations, leading to unwanted system behavior (OOMs because the kernel fails to do unmovable allocations). So, with this enabled, an administrator can force a hard upper bound for runtime gigantic page allocations, and have more predictable system behavior. Link: https://lkml.kernel.org/r/20250228182928.2645936-26-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- Documentation/admin-guide/kernel-parameters.txt | 7 +++++++ mm/hugetlb.c | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index ae21d911d1c7..491628ac071a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1892,6 +1892,13 @@ hugepages using the CMA allocator. If enabled, the boot-time allocation of gigantic hugepages is skipped. + hugetlb_cma_only= + [HW,CMA,EARLY] When allocating new HugeTLB pages, only + try to allocate from the CMA areas. + + This option does nothing if hugetlb_cma= is not also + specified. + hugetlb_free_vmemmap= [KNL] Requires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP enabled. diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d1134e915927..80d401593669 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -59,6 +59,7 @@ struct hstate hstates[HUGE_MAX_HSTATE]; static struct cma *hugetlb_cma[MAX_NUMNODES]; static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; #endif +static bool hugetlb_cma_only; static unsigned long hugetlb_cma_size __initdata; __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; @@ -1510,6 +1511,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, } #endif if (!folio) { + if (hugetlb_cma_only) + return NULL; + folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); if (!folio) return NULL; @@ -4750,6 +4754,9 @@ static __init void hugetlb_parse_params(void) hcp->setup(hcp->val); } + + if (!hugetlb_cma_size) + hugetlb_cma_only = false; } /* @@ -7862,6 +7869,13 @@ static int __init cmdline_parse_hugetlb_cma(char *p) early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); +static int __init cmdline_parse_hugetlb_cma_only(char *p) +{ + return kstrtobool(p, &hugetlb_cma_only); +} + +early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only); + void __init hugetlb_cma_reserve(int order) { unsigned long size, reserved, per_node; From d2d78671408061b5332d9ad357686812bbec5070 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:27 +0000 Subject: [PATCH 159/431] mm/hugetlb: enable bootmem allocation from CMA areas If hugetlb_cma_only is enabled, we know that hugetlb pages can only be allocated from CMA. Now that there is an interface to do early reservations from a CMA area (returning memblock memory), it can be used to allocate hugetlb pages from CMA. This also allows for doing pre-HVO on these pages (if enabled). Make sure to initialize the page structures and associated data correctly. Create a flag to signal that a hugetlb page has been allocated from CMA to make things a little easier. Some configurations of powerpc have a special hugetlb bootmem allocator, so introduce a boolean arch_specific_huge_bootmem_alloc that returns true if such an allocator is present. In that case, CMA bootmem allocations can't be used, so check that function before trying. Link: https://lkml.kernel.org/r/20250228182928.2645936-27-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/powerpc/include/asm/book3s/64/hugetlb.h | 6 + include/linux/hugetlb.h | 17 ++ mm/hugetlb.c | 168 ++++++++++++++----- 3 files changed, 152 insertions(+), 39 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index f0bba9c5f9c3..bb786694dd26 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -94,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift) return mmu_psize; } +#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc + +static inline bool arch_has_huge_bootmem_alloc(void) +{ + return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled()); +} #endif diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f6b82b0524ed..8f3ac832ee7f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -591,6 +591,7 @@ enum hugetlb_page_flags { HPG_freed, HPG_vmemmap_optimized, HPG_raw_hwp_unreliable, + HPG_cma, __NR_HPAGEFLAGS, }; @@ -650,6 +651,7 @@ HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) +HPAGEFLAG(Cma, cma) #ifdef CONFIG_HUGETLB_PAGE @@ -678,14 +680,18 @@ struct hstate { char name[HSTATE_NAME_LEN]; }; +struct cma; + struct huge_bootmem_page { struct list_head list; struct hstate *hstate; unsigned long flags; + struct cma *cma; }; #define HUGE_BOOTMEM_HVO 0x0001 #define HUGE_BOOTMEM_ZONES_VALID 0x0002 +#define HUGE_BOOTMEM_CMA 0x0004 bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); @@ -824,6 +830,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, } #endif +#ifndef arch_has_huge_bootmem_alloc +/* + * Some architectures do their own bootmem allocation, so they can't use + * early CMA allocation. + */ +static inline bool arch_has_huge_bootmem_alloc(void) +{ + return false; +} +#endif + static inline struct hstate *folio_hstate(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 80d401593669..8f753695438c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -131,8 +131,10 @@ static void hugetlb_free_folio(struct folio *folio) #ifdef CONFIG_CMA int nid = folio_nid(folio); - if (cma_free_folio(hugetlb_cma[nid], folio)) + if (folio_test_hugetlb_cma(folio)) { + WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); return; + } #endif folio_put(folio); } @@ -1508,6 +1510,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, break; } } + + if (folio) + folio_set_hugetlb_cma(folio); } #endif if (!folio) { @@ -3186,6 +3191,86 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, return ERR_PTR(-ENOSPC); } +static bool __init hugetlb_early_cma(struct hstate *h) +{ + if (arch_has_huge_bootmem_alloc()) + return false; + + return (hstate_is_gigantic(h) && hugetlb_cma_only); +} + +static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) +{ + struct huge_bootmem_page *m; + unsigned long flags; + struct cma *cma; + int listnode = nid; + +#ifdef CONFIG_CMA + if (hugetlb_early_cma(h)) { + flags = HUGE_BOOTMEM_CMA; + cma = hugetlb_cma[nid]; + m = cma_reserve_early(cma, huge_page_size(h)); + if (!m) { + int node; + + if (node_exact) + return NULL; + for_each_online_node(node) { + cma = hugetlb_cma[node]; + if (!cma || node == nid) + continue; + m = cma_reserve_early(cma, huge_page_size(h)); + if (m) { + listnode = node; + break; + } + } + } + } else +#endif + { + flags = 0; + cma = NULL; + if (node_exact) + m = memblock_alloc_exact_nid_raw(huge_page_size(h), + huge_page_size(h), 0, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + else { + m = memblock_alloc_try_nid_raw(huge_page_size(h), + huge_page_size(h), 0, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + /* + * For pre-HVO to work correctly, pages need to be on + * the list for the node they were actually allocated + * from. That node may be different in the case of + * fallback by memblock_alloc_try_nid_raw. So, + * extract the actual node first. + */ + if (m) + listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); + } + } + + if (m) { + /* + * Use the beginning of the huge page to store the + * huge_bootmem_page struct (until gather_bootmem + * puts them into the mem_map). + * + * Put them into a private list first because mem_map + * is not up yet. + */ + INIT_LIST_HEAD(&m->list); + list_add(&m->list, &huge_boot_pages[listnode]); + m->hstate = h; + m->flags = flags; + m->cma = cma; + } + + return m; +} + int alloc_bootmem_huge_page(struct hstate *h, int nid) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h, int nid) @@ -3195,22 +3280,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) /* do node specific alloc */ if (nid != NUMA_NO_NODE) { - m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), - 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); + m = alloc_bootmem(h, node, true); if (!m) return 0; goto found; } + /* allocate from next node when distributing huge pages */ for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) { - m = memblock_alloc_try_nid_raw( - huge_page_size(h), huge_page_size(h), - 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); - /* - * Use the beginning of the huge page to store the - * huge_bootmem_page struct (until gather_bootmem - * puts them into the mem_map). - */ + m = alloc_bootmem(h, node, false); if (!m) return 0; goto found; @@ -3228,21 +3306,6 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), huge_page_size(h) - PAGE_SIZE); - /* - * Put them into a private list first because mem_map is not up yet. - * - * For pre-HVO to work correctly, pages need to be on the list for - * the node they were actually allocated from. That node may be - * different in the case of fallback by memblock_alloc_try_nid_raw. - * So, extract the actual node first. - */ - if (nid == NUMA_NO_NODE) - node = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); - - INIT_LIST_HEAD(&m->list); - list_add(&m->list, &huge_boot_pages[node]); - m->hstate = h; - m->flags = 0; return 1; } @@ -3283,13 +3346,25 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio, prep_compound_head((struct page *)folio, huge_page_order(h)); } +static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) +{ + return m->flags & HUGE_BOOTMEM_HVO; +} + +static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m) +{ + return m->flags & HUGE_BOOTMEM_CMA; +} + /* * memblock-allocated pageblocks might not have the migrate type set * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE) - * here. + * here, or MIGRATE_CMA if this was a page allocated through an early CMA + * reservation. * - * Note that this will not write the page struct, it is ok (and necessary) - * to do this on vmemmap optimized folios. + * In case of vmemmap optimized folios, the tail vmemmap pages are mapped + * read-only, but that's ok - for sparse vmemmap this does not write to + * the page structure. */ static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, struct hstate *h) @@ -3298,9 +3373,13 @@ static void __init hugetlb_bootmem_init_migratetype(struct folio *folio, WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio))); - for (i = 0; i < nr_pages; i += pageblock_nr_pages) - set_pageblock_migratetype(folio_page(folio, i), + for (i = 0; i < nr_pages; i += pageblock_nr_pages) { + if (folio_test_hugetlb_cma(folio)) + init_cma_pageblock(folio_page(folio, i)); + else + set_pageblock_migratetype(folio_page(folio, i), MIGRATE_MOVABLE); + } } static void __init prep_and_add_bootmem_folios(struct hstate *h, @@ -3346,10 +3425,16 @@ bool __init hugetlb_bootmem_page_zones_valid(int nid, return true; } + if (hugetlb_bootmem_page_earlycma(m)) { + valid = cma_validate_zones(m->cma); + goto out; + } + start_pfn = virt_to_phys(m) >> PAGE_SHIFT; valid = !pfn_range_intersects_zones(nid, start_pfn, pages_per_huge_page(m->hstate)); +out: if (!valid) hstate_boot_nrinvalid[hstate_index(m->hstate)]++; @@ -3378,11 +3463,6 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, } } -static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m) -{ - return (m->flags & HUGE_BOOTMEM_HVO); -} - /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. @@ -3432,14 +3512,21 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid) */ folio_set_hugetlb_vmemmap_optimized(folio); + if (hugetlb_bootmem_page_earlycma(m)) + folio_set_hugetlb_cma(folio); + list_add(&folio->lru, &folio_list); /* * We need to restore the 'stolen' pages to totalram_pages * in order to fix confusing memory reports from free(1) and * other side-effects, like CommitLimit going negative. + * + * For CMA pages, this is done in init_cma_pageblock + * (via hugetlb_bootmem_init_migratetype), so skip it here. */ - adjust_managed_page_count(page, pages_per_huge_page(h)); + if (!folio_test_hugetlb_cma(folio)) + adjust_managed_page_count(page, pages_per_huge_page(h)); cond_resched(); } @@ -3624,8 +3711,11 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long allocated; - /* skip gigantic hugepages allocation if hugetlb_cma enabled */ - if (hstate_is_gigantic(h) && hugetlb_cma_size) { + /* + * Skip gigantic hugepages allocation if early CMA + * reservations are not available. + */ + if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); return; } From 474fe91f213a400334d41397de1a447560be76a6 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Fri, 28 Feb 2025 18:29:28 +0000 Subject: [PATCH 160/431] mm/hugetlb: move hugetlb CMA code in to its own file hugetlb.c contained a number of CONFIG_CMA ifdefs, and the code inside them was large enough to merit being in its own file, so move it, cleaning up things a bit. Hide some direct variable access behind functions to accommodate the move. No functional change intended. Link: https://lkml.kernel.org/r/20250228182928.2645936-28-fvdl@google.com Signed-off-by: Frank van der Linden Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: Heiko Carstens Cc: Joao Martins Cc: Johannes Weiner Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Muchun Song Cc: Oscar Salvador Cc: Peter Zijlstra Cc: Roman Gushchin (Cruise) Cc: Usama Arif Cc: Vasily Gorbik Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton --- MAINTAINERS | 2 + mm/Makefile | 3 + mm/hugetlb.c | 269 +++------------------------------------------ mm/hugetlb_cma.c | 275 +++++++++++++++++++++++++++++++++++++++++++++++ mm/hugetlb_cma.h | 57 ++++++++++ 5 files changed, 354 insertions(+), 252 deletions(-) create mode 100644 mm/hugetlb_cma.c create mode 100644 mm/hugetlb_cma.h diff --git a/MAINTAINERS b/MAINTAINERS index 60421d3e48a8..40f233b0fa9c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10708,6 +10708,8 @@ F: fs/hugetlbfs/ F: include/linux/hugetlb.h F: include/trace/events/hugetlbfs.h F: mm/hugetlb.c +F: mm/hugetlb_cma.c +F: mm/hugetlb_cma.h F: mm/hugetlb_vmemmap.c F: mm/hugetlb_vmemmap.h F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c diff --git a/mm/Makefile b/mm/Makefile index 53392d2af3a5..2600e94abd3c 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -79,6 +79,9 @@ obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o +ifdef CONFIG_CMA +obj-$(CONFIG_HUGETLBFS) += hugetlb_cma.o +endif obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8f753695438c..7a96c6edeaef 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -49,19 +49,13 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" +#include "hugetlb_cma.h" #include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; -#ifdef CONFIG_CMA -static struct cma *hugetlb_cma[MAX_NUMNODES]; -static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; -#endif -static bool hugetlb_cma_only; -static unsigned long hugetlb_cma_size __initdata; - __initdata struct list_head huge_boot_pages[MAX_NUMNODES]; static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata; @@ -128,14 +122,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma); static void hugetlb_free_folio(struct folio *folio) { -#ifdef CONFIG_CMA - int nid = folio_nid(folio); - if (folio_test_hugetlb_cma(folio)) { - WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); + hugetlb_cma_free_folio(folio); return; } -#endif + folio_put(folio); } @@ -1492,31 +1483,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, if (nid == NUMA_NO_NODE) nid = numa_mem_id(); retry: - folio = NULL; -#ifdef CONFIG_CMA - { - int node; - - if (hugetlb_cma[nid]) - folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); - - if (!folio && !(gfp_mask & __GFP_THISNODE)) { - for_each_node_mask(node, *nodemask) { - if (node == nid || !hugetlb_cma[node]) - continue; - - folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); - if (folio) - break; - } - } - - if (folio) - folio_set_hugetlb_cma(folio); - } -#endif + folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); if (!folio) { - if (hugetlb_cma_only) + if (hugetlb_cma_exclusive_alloc()) return NULL; folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); @@ -3191,47 +3160,14 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, return ERR_PTR(-ENOSPC); } -static bool __init hugetlb_early_cma(struct hstate *h) -{ - if (arch_has_huge_bootmem_alloc()) - return false; - - return (hstate_is_gigantic(h) && hugetlb_cma_only); -} - static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) { struct huge_bootmem_page *m; - unsigned long flags; - struct cma *cma; int listnode = nid; -#ifdef CONFIG_CMA - if (hugetlb_early_cma(h)) { - flags = HUGE_BOOTMEM_CMA; - cma = hugetlb_cma[nid]; - m = cma_reserve_early(cma, huge_page_size(h)); - if (!m) { - int node; - - if (node_exact) - return NULL; - for_each_online_node(node) { - cma = hugetlb_cma[node]; - if (!cma || node == nid) - continue; - m = cma_reserve_early(cma, huge_page_size(h)); - if (m) { - listnode = node; - break; - } - } - } - } else -#endif - { - flags = 0; - cma = NULL; + if (hugetlb_early_cma(h)) + m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact); + else { if (node_exact) m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h), 0, @@ -3250,6 +3186,11 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) if (m) listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); } + + if (m) { + m->flags = 0; + m->cma = NULL; + } } if (m) { @@ -3264,8 +3205,6 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact) INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages[listnode]); m->hstate = h; - m->flags = flags; - m->cma = cma; } return m; @@ -3715,7 +3654,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) * Skip gigantic hugepages allocation if early CMA * reservations are not available. */ - if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) { + if (hstate_is_gigantic(h) && hugetlb_cma_total_size() && + !hugetlb_early_cma(h)) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); return; } @@ -3752,7 +3692,7 @@ static void __init hugetlb_init_hstates(void) */ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) continue; - if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) + if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER) continue; for_each_hstate(h2) { if (h2 == h) @@ -4654,14 +4594,6 @@ static void hugetlb_register_all_nodes(void) { } #endif -#ifdef CONFIG_CMA -static void __init hugetlb_cma_check(void); -#else -static inline __init void hugetlb_cma_check(void) -{ -} -#endif - static void __init hugetlb_sysfs_init(void) { struct hstate *h; @@ -4845,8 +4777,7 @@ static __init void hugetlb_parse_params(void) hcp->setup(hcp->val); } - if (!hugetlb_cma_size) - hugetlb_cma_only = false; + hugetlb_cma_validate_params(); } /* @@ -7916,169 +7847,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), ALIGN_DOWN(vma->vm_end, PUD_SIZE)); } - -#ifdef CONFIG_CMA -static bool cma_reserve_called __initdata; - -static int __init cmdline_parse_hugetlb_cma(char *p) -{ - int nid, count = 0; - unsigned long tmp; - char *s = p; - - while (*s) { - if (sscanf(s, "%lu%n", &tmp, &count) != 1) - break; - - if (s[count] == ':') { - if (tmp >= MAX_NUMNODES) - break; - nid = array_index_nospec(tmp, MAX_NUMNODES); - - s += count + 1; - tmp = memparse(s, &s); - hugetlb_cma_size_in_node[nid] = tmp; - hugetlb_cma_size += tmp; - - /* - * Skip the separator if have one, otherwise - * break the parsing. - */ - if (*s == ',') - s++; - else - break; - } else { - hugetlb_cma_size = memparse(p, &p); - break; - } - } - - return 0; -} - -early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); - -static int __init cmdline_parse_hugetlb_cma_only(char *p) -{ - return kstrtobool(p, &hugetlb_cma_only); -} - -early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only); - -void __init hugetlb_cma_reserve(int order) -{ - unsigned long size, reserved, per_node; - bool node_specific_cma_alloc = false; - int nid; - - /* - * HugeTLB CMA reservation is required for gigantic - * huge pages which could not be allocated via the - * page allocator. Just warn if there is any change - * breaking this assumption. - */ - VM_WARN_ON(order <= MAX_PAGE_ORDER); - cma_reserve_called = true; - - if (!hugetlb_cma_size) - return; - - for (nid = 0; nid < MAX_NUMNODES; nid++) { - if (hugetlb_cma_size_in_node[nid] == 0) - continue; - - if (!node_online(nid)) { - pr_warn("hugetlb_cma: invalid node %d specified\n", nid); - hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; - hugetlb_cma_size_in_node[nid] = 0; - continue; - } - - if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { - pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", - nid, (PAGE_SIZE << order) / SZ_1M); - hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; - hugetlb_cma_size_in_node[nid] = 0; - } else { - node_specific_cma_alloc = true; - } - } - - /* Validate the CMA size again in case some invalid nodes specified. */ - if (!hugetlb_cma_size) - return; - - if (hugetlb_cma_size < (PAGE_SIZE << order)) { - pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", - (PAGE_SIZE << order) / SZ_1M); - hugetlb_cma_size = 0; - return; - } - - if (!node_specific_cma_alloc) { - /* - * If 3 GB area is requested on a machine with 4 numa nodes, - * let's allocate 1 GB on first three nodes and ignore the last one. - */ - per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); - pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", - hugetlb_cma_size / SZ_1M, per_node / SZ_1M); - } - - reserved = 0; - for_each_online_node(nid) { - int res; - char name[CMA_MAX_NAME]; - - if (node_specific_cma_alloc) { - if (hugetlb_cma_size_in_node[nid] == 0) - continue; - - size = hugetlb_cma_size_in_node[nid]; - } else { - size = min(per_node, hugetlb_cma_size - reserved); - } - - size = round_up(size, PAGE_SIZE << order); - - snprintf(name, sizeof(name), "hugetlb%d", nid); - /* - * Note that 'order per bit' is based on smallest size that - * may be returned to CMA allocator in the case of - * huge page demotion. - */ - res = cma_declare_contiguous_multi(size, PAGE_SIZE << order, - HUGETLB_PAGE_ORDER, name, - &hugetlb_cma[nid], nid); - if (res) { - pr_warn("hugetlb_cma: reservation failed: err %d, node %d", - res, nid); - continue; - } - - reserved += size; - pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", - size / SZ_1M, nid); - - if (reserved >= hugetlb_cma_size) - break; - } - - if (!reserved) - /* - * hugetlb_cma_size is used to determine if allocations from - * cma are possible. Set to zero if no cma regions are set up. - */ - hugetlb_cma_size = 0; -} - -static void __init hugetlb_cma_check(void) -{ - if (!hugetlb_cma_size || cma_reserve_called) - return; - - pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); -} - -#endif /* CONFIG_CMA */ diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c new file mode 100644 index 000000000000..e0f2d5c3a84c --- /dev/null +++ b/mm/hugetlb_cma.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include +#include + +#include +#include "internal.h" +#include "hugetlb_cma.h" + + +static struct cma *hugetlb_cma[MAX_NUMNODES]; +static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; +static bool hugetlb_cma_only; +static unsigned long hugetlb_cma_size __initdata; + +void hugetlb_cma_free_folio(struct folio *folio) +{ + int nid = folio_nid(folio); + + WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio)); +} + + +struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) +{ + int node; + int order = huge_page_order(h); + struct folio *folio = NULL; + + if (hugetlb_cma[nid]) + folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask); + + if (!folio && !(gfp_mask & __GFP_THISNODE)) { + for_each_node_mask(node, *nodemask) { + if (node == nid || !hugetlb_cma[node]) + continue; + + folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask); + if (folio) + break; + } + } + + if (folio) + folio_set_hugetlb_cma(folio); + + return folio; +} + +struct huge_bootmem_page * __init +hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact) +{ + struct cma *cma; + struct huge_bootmem_page *m; + int node = *nid; + + cma = hugetlb_cma[*nid]; + m = cma_reserve_early(cma, huge_page_size(h)); + if (!m) { + if (node_exact) + return NULL; + + for_each_online_node(node) { + cma = hugetlb_cma[node]; + if (!cma || node == *nid) + continue; + m = cma_reserve_early(cma, huge_page_size(h)); + if (m) { + *nid = node; + break; + } + } + } + + if (m) { + m->flags = HUGE_BOOTMEM_CMA; + m->cma = cma; + } + + return m; +} + + +static bool cma_reserve_called __initdata; + +static int __init cmdline_parse_hugetlb_cma(char *p) +{ + int nid, count = 0; + unsigned long tmp; + char *s = p; + + while (*s) { + if (sscanf(s, "%lu%n", &tmp, &count) != 1) + break; + + if (s[count] == ':') { + if (tmp >= MAX_NUMNODES) + break; + nid = array_index_nospec(tmp, MAX_NUMNODES); + + s += count + 1; + tmp = memparse(s, &s); + hugetlb_cma_size_in_node[nid] = tmp; + hugetlb_cma_size += tmp; + + /* + * Skip the separator if have one, otherwise + * break the parsing. + */ + if (*s == ',') + s++; + else + break; + } else { + hugetlb_cma_size = memparse(p, &p); + break; + } + } + + return 0; +} + +early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); + +static int __init cmdline_parse_hugetlb_cma_only(char *p) +{ + return kstrtobool(p, &hugetlb_cma_only); +} + +early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only); + +void __init hugetlb_cma_reserve(int order) +{ + unsigned long size, reserved, per_node; + bool node_specific_cma_alloc = false; + int nid; + + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + VM_WARN_ON(order <= MAX_PAGE_ORDER); + cma_reserve_called = true; + + if (!hugetlb_cma_size) + return; + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + if (hugetlb_cma_size_in_node[nid] == 0) + continue; + + if (!node_online(nid)) { + pr_warn("hugetlb_cma: invalid node %d specified\n", nid); + hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; + hugetlb_cma_size_in_node[nid] = 0; + continue; + } + + if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { + pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", + nid, (PAGE_SIZE << order) / SZ_1M); + hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; + hugetlb_cma_size_in_node[nid] = 0; + } else { + node_specific_cma_alloc = true; + } + } + + /* Validate the CMA size again in case some invalid nodes specified. */ + if (!hugetlb_cma_size) + return; + + if (hugetlb_cma_size < (PAGE_SIZE << order)) { + pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", + (PAGE_SIZE << order) / SZ_1M); + hugetlb_cma_size = 0; + return; + } + + if (!node_specific_cma_alloc) { + /* + * If 3 GB area is requested on a machine with 4 numa nodes, + * let's allocate 1 GB on first three nodes and ignore the last one. + */ + per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); + pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", + hugetlb_cma_size / SZ_1M, per_node / SZ_1M); + } + + reserved = 0; + for_each_online_node(nid) { + int res; + char name[CMA_MAX_NAME]; + + if (node_specific_cma_alloc) { + if (hugetlb_cma_size_in_node[nid] == 0) + continue; + + size = hugetlb_cma_size_in_node[nid]; + } else { + size = min(per_node, hugetlb_cma_size - reserved); + } + + size = round_up(size, PAGE_SIZE << order); + + snprintf(name, sizeof(name), "hugetlb%d", nid); + /* + * Note that 'order per bit' is based on smallest size that + * may be returned to CMA allocator in the case of + * huge page demotion. + */ + res = cma_declare_contiguous_multi(size, PAGE_SIZE << order, + HUGETLB_PAGE_ORDER, name, + &hugetlb_cma[nid], nid); + if (res) { + pr_warn("hugetlb_cma: reservation failed: err %d, node %d", + res, nid); + continue; + } + + reserved += size; + pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", + size / SZ_1M, nid); + + if (reserved >= hugetlb_cma_size) + break; + } + + if (!reserved) + /* + * hugetlb_cma_size is used to determine if allocations from + * cma are possible. Set to zero if no cma regions are set up. + */ + hugetlb_cma_size = 0; +} + +void __init hugetlb_cma_check(void) +{ + if (!hugetlb_cma_size || cma_reserve_called) + return; + + pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); +} + +bool hugetlb_cma_exclusive_alloc(void) +{ + return hugetlb_cma_only; +} + +unsigned long __init hugetlb_cma_total_size(void) +{ + return hugetlb_cma_size; +} + +void __init hugetlb_cma_validate_params(void) +{ + if (!hugetlb_cma_size) + hugetlb_cma_only = false; +} + +bool __init hugetlb_early_cma(struct hstate *h) +{ + if (arch_has_huge_bootmem_alloc()) + return false; + + return hstate_is_gigantic(h) && hugetlb_cma_only; +} diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h new file mode 100644 index 000000000000..f7d7fb9880a2 --- /dev/null +++ b/mm/hugetlb_cma.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HUGETLB_CMA_H +#define _LINUX_HUGETLB_CMA_H + +#ifdef CONFIG_CMA +void hugetlb_cma_free_folio(struct folio *folio); +struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); +struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, + bool node_exact); +void hugetlb_cma_check(void); +bool hugetlb_cma_exclusive_alloc(void); +unsigned long hugetlb_cma_total_size(void); +void hugetlb_cma_validate_params(void); +bool hugetlb_early_cma(struct hstate *h); +#else +static inline void hugetlb_cma_free_folio(struct folio *folio) +{ +} + +static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h, + gfp_t gfp_mask, int nid, nodemask_t *nodemask) +{ + return NULL; +} + +static inline +struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, + bool node_exact) +{ + return NULL; +} + +static inline void hugetlb_cma_check(void) +{ +} + +static inline bool hugetlb_cma_exclusive_alloc(void) +{ + return false; +} + +static inline unsigned long hugetlb_cma_total_size(void) +{ + return 0; +} + +static inline void hugetlb_cma_validate_params(void) +{ +} + +static inline bool hugetlb_early_cma(struct hstate *h) +{ + return false; +} +#endif +#endif From 2560c8c3f41d7a53e1554d1e4f207bf966e7527f Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 19 Feb 2025 12:24:02 +0100 Subject: [PATCH 161/431] arm/pgtable: remove duplicate included header file The header file asm-generic/pgtable-nopud.h is included whether CONFIG_MMU is defined or not. Include it only once before the #ifndef/#else/#endif preprocessor directives and remove the following make includecheck warning: asm-generic/pgtable-nopud.h is included more than once Link: https://lkml.kernel.org/r/20250219112403.3959-2-thorsten.blum@linux.dev Signed-off-by: Thorsten Blum Reviewed-by: Mike Rapoport (Microsoft) Signed-off-by: Andrew Morton --- arch/arm/include/asm/pgtable.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be91e376df79..6b986ef6042f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -19,14 +19,13 @@ extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) #endif -#ifndef CONFIG_MMU - #include + +#ifndef CONFIG_MMU #include #else -#include #include #include From f809b9f3046f702bc1ae40695acb27c1b0abc346 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 19 Feb 2025 14:01:45 -0800 Subject: [PATCH 162/431] mm/damon: implement a new DAMOS filter type for unmapped pages Patch series "mm/damon: introduce DAMOS filter type for unmapped pages". User decides whether their memory will be mapped or unmapped. It implies that the two types of memory can have different characteristics and management requirements. Provide the DAMON-observaibility DAMOS-operation capability for the different types by introducing a new DAMOS filter type for unmapped pages. This patch (of 2): Implement yet another DAMOS filter type for unmapped pages on DAMON kernel API, and add support of it from the physical address space DAMON operations set (paddr). Since it is for only unmapped pages, support from the virtual address spaces DAMON operations set (vaddr) is not required. Link: https://lkml.kernel.org/r/20250219220146.133650-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250219220146.133650-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 2 ++ mm/damon/paddr.c | 3 +++ mm/damon/sysfs-schemes.c | 1 + 3 files changed, 6 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 5e7ae7bca5dc..242910b190c9 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -337,6 +337,7 @@ struct damos_stat { * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages. * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage. + * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages. * @DAMOS_FILTER_TYPE_ADDR: Address range. * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. * @NR_DAMOS_FILTER_TYPES: Number of filter types. @@ -357,6 +358,7 @@ enum damos_filter_type { DAMOS_FILTER_TYPE_MEMCG, DAMOS_FILTER_TYPE_YOUNG, DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, + DAMOS_FILTER_TYPE_UNMAPPED, DAMOS_FILTER_TYPE_ADDR, DAMOS_FILTER_TYPE_TARGET, NR_DAMOS_FILTER_TYPES, diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 1a5974640b93..d5db313ca717 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -236,6 +236,9 @@ static bool damos_pa_filter_match(struct damos_filter *filter, matched = filter->sz_range.min <= folio_sz && folio_sz <= filter->sz_range.max; break; + case DAMOS_FILTER_TYPE_UNMAPPED: + matched = !folio_mapped(folio) || !folio_raw_mapping(folio); + break; default: break; } diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 881d00bb3a34..66a1c46cee84 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -331,6 +331,7 @@ static const char * const damon_sysfs_scheme_filter_type_strs[] = { "memcg", "young", "hugepage_size", + "unmapped", "addr", "target", }; From 375c28a0df0ee8869d0980228d659d91f85455f9 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 19 Feb 2025 14:01:46 -0800 Subject: [PATCH 163/431] Docs/mm/damon/design: document unmapped DAMOS filter type Document availability and meaning of unmapped DAMOS filter type on design document. Since introduction of the type requires no additional user ABI, usage and ABI document need no update. Link: https://lkml.kernel.org/r/20250219220146.133650-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 6a66aa0833fd..5af991551a86 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -617,6 +617,8 @@ Below ``type`` of filters are currently supported. scheme. - hugepage_size - Applied to pages that managed in a given size range. + - unmapped + - Applied to pages that unmapped. To know how user-space can set the filters via :ref:`DAMON sysfs interface `, refer to :ref:`filters ` part of the From 9fa26fb554baaf71826814804749f5cff130c4d6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 19 Feb 2025 08:36:07 +0000 Subject: [PATCH 164/431] mm/mincore: improve performance by adding an unlikely hint Adding an unlikely() hint on the masked start comparison error return path improves run-time performance of the mincore system call. Benchmarking on an i9-12900 shows an improvement of 7ns on mincore calls on a 256KB mmap'd region where 50% of the pages we resident. Improvement was from ~970 ns down to 963 ns, so a small ~0.7% improvement. Results based on running 20 tests with turbo disabled (to reduce clock freq turbo changes), with 10 second run per test and comparing the number of mincores calls per second. The % standard deviation of the 20 tests was ~0.10%, so results are reliable. Link: https://lkml.kernel.org/r/20250219083607.5183-1-colin.i.king@gmail.com Signed-off-by: Colin Ian King Cc: Matthew Wilcow Signed-off-by: Andrew Morton --- mm/mincore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/mincore.c b/mm/mincore.c index d6bd19e520fc..832f29f46767 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -239,7 +239,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, start = untagged_addr(start); /* Check the start address: needs to be page-aligned.. */ - if (start & ~PAGE_MASK) + if (unlikely(start & ~PAGE_MASK)) return -EINVAL; /* ..and we need to be passed a valid user-space range */ From 58abac769b05f6e972d34a02a46a04589d6e9853 Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Wed, 12 Feb 2025 10:58:42 +0800 Subject: [PATCH 165/431] mm/folio_queue: delete __folio_order and use folio_order directly __folio_order is the same as folio_order, remove __folio_order and then just include mm.h and use folio_order directly. Link: https://lkml.kernel.org/r/20250212025843.80283-2-liuye@kylinos.cn Signed-off-by: Liu Ye Reviewed-by: Shivank Garg Reviewed-by: Dev Jain Acked-by: David Howells Cc: Christian Brauner Signed-off-by: Andrew Morton --- include/linux/folio_queue.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index 4d3f8074c137..45ad2408a80c 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -15,6 +15,7 @@ #define _LINUX_FOLIO_QUEUE_H #include +#include /* * Segment in a queue of running buffers. Each segment can hold a number of @@ -216,13 +217,6 @@ static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot) clear_bit(slot, &folioq->marks3); } -static inline unsigned int __folio_order(struct folio *folio) -{ - if (!folio_test_large(folio)) - return 0; - return folio->_flags_1 & 0xff; -} - /** * folioq_append: Add a folio to a folio queue segment * @folioq: The segment to add to @@ -241,7 +235,7 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli unsigned int slot = folioq->vec.nr++; folioq->vec.folios[slot] = folio; - folioq->orders[slot] = __folio_order(folio); + folioq->orders[slot] = folio_order(folio); return slot; } @@ -263,7 +257,7 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct unsigned int slot = folioq->vec.nr++; folioq->vec.folios[slot] = folio; - folioq->orders[slot] = __folio_order(folio); + folioq->orders[slot] = folio_order(folio); folioq_mark(folioq, slot); return slot; } From bd175a1d84e3ff05da032160ca2399437c23a59f Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:10 +0900 Subject: [PATCH 166/431] zram: sleepable entry locking Patch series "zsmalloc/zram: there be preemption", v10. Currently zram runs compression and decompression in non-preemptible sections, e.g. zcomp_stream_get() // grabs CPU local lock zcomp_compress() or zram_slot_lock() // grabs entry spin-lock zcomp_stream_get() // grabs CPU local lock zs_map_object() // grabs rwlock and CPU local lock zcomp_decompress() Potentially a little troublesome for a number of reasons. For instance, this makes it impossible to use async compression algorithms or/and H/W compression algorithms, which can wait for OP completion or resource availability. This also restricts what compression algorithms can do internally, for example, zstd can allocate internal state memory for C/D dictionaries: do_fsync() do_writepages() zram_bio_write() zram_write_page() // become non-preemptible zcomp_compress() zstd_compress() ZSTD_compress_usingCDict() ZSTD_compressBegin_usingCDict_internal() ZSTD_resetCCtx_usingCDict() ZSTD_resetCCtx_internal() zstd_custom_alloc() // memory allocation Not to mention that the system can be configured to maximize compression ratio at a cost of CPU/HW time (e.g. lz4hc or deflate with very high compression level) so zram can stay in non-preemptible section (even under spin-lock or/and rwlock) for an extended period of time. Aside from compression algorithms, this also restricts what zram can do. One particular example is zram_write_page() zsmalloc handle allocation, which has an optimistic allocation (disallowing direct reclaim) and a pessimistic fallback path, which then forces zram to compress the page one more time. This series changes zram to not directly impose atomicity restrictions on compression algorithms (and on itself), which makes zram write() fully preemptible; zram read(), sadly, is not always preemptible yet. There are still indirect atomicity restrictions imposed by zsmalloc(). One notable example is object mapping API, which returns with: a) local CPU lock held b) zspage rwlock held First, zsmalloc's zspage lock is converted from rwlock to a special type of RW-lookalike look with some extra guarantees/features. Second, a new handle mapping is introduced which doesn't use per-CPU buffers (and hence no local CPU lock), does fewer memcpy() calls, but requires users to provide a pointer to temp buffer for object copy-in (when needed). Third, zram is converted to the new zsmalloc mapping API and thus zram read() becomes preemptible. This patch (of 19): Concurrent modifications of meta table entries is now handled by per-entry spin-lock. This has a number of shortcomings. First, this imposes atomic requirements on compression backends. zram can call both zcomp_compress() and zcomp_decompress() under entry spin-lock, which implies that we can use only compression algorithms that don't schedule/sleep/wait during compression and decompression. This, for instance, makes it impossible to use some of the ASYNC compression algorithms (H/W compression, etc.) implementations. Second, this can potentially trigger watchdogs. For example, entry re-compression with secondary algorithms is performed under entry spin-lock. Given that we chain secondary compression algorithms and that some of them can be configured for best compression ratio (and worst compression speed) zram can stay under spin-lock for quite some time. Having a per-entry mutex (or, for instance, a rw-semaphore) significantly increases sizeof() of each entry and hence the meta table. Therefore entry locking returns back to bit locking, as before, however, this time also preempt-rt friendly, because if waits-on-bit instead of spinning-on-bit. Lock owners are also now permitted to schedule, which is a first step on the path of making zram non-atomic. Link: https://lkml.kernel.org/r/20250303022425.285971-1-senozhatsky@chromium.org Link: https://lkml.kernel.org/r/20250303022425.285971-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 54 ++++++++++++++++++++++++++++------- drivers/block/zram/zram_drv.h | 15 ++++++---- 2 files changed, 52 insertions(+), 17 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9f5020b077c5..70599d41b828 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -58,19 +58,56 @@ static void zram_free_page(struct zram *zram, size_t index); static int zram_read_from_zspool(struct zram *zram, struct page *page, u32 index); -static int zram_slot_trylock(struct zram *zram, u32 index) +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map) + +static void zram_slot_lock_init(struct zram *zram, u32 index) { - return spin_trylock(&zram->table[index].lock); + static struct lock_class_key __key; + + lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock", + &__key, 0); +} + +/* + * entry locking rules: + * + * 1) Lock is exclusive + * + * 2) lock() function can sleep waiting for the lock + * + * 3) Lock owner can sleep + * + * 4) Use TRY lock variant when in atomic context + * - must check return value and handle locking failers + */ +static __must_check bool zram_slot_trylock(struct zram *zram, u32 index) +{ + unsigned long *lock = &zram->table[index].flags; + + if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) { + mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); + return true; + } + + return false; } static void zram_slot_lock(struct zram *zram, u32 index) { - spin_lock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_); + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); } static void zram_slot_unlock(struct zram *zram, u32 index) { - spin_unlock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_release(slot_dep_map(zram, index), _RET_IP_); + clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock); } static inline bool init_done(struct zram *zram) @@ -93,7 +130,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) zram->table[index].handle = handle; } -/* flag operations require table entry bit_spin_lock() being held */ static bool zram_test_flag(struct zram *zram, u32 index, enum zram_pageflags flag) { @@ -1473,15 +1509,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) huge_class_size = zs_huge_class_size(zram->mem_pool); for (index = 0; index < num_pages; index++) - spin_lock_init(&zram->table[index].lock); + zram_slot_lock_init(zram, index); + return true; } -/* - * To protect concurrent access to the same index entry, - * caller should hold this table index entry's bit_spinlock to - * indicate this index entry is accessing. - */ static void zram_free_page(struct zram *zram, size_t index) { unsigned long handle; diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index db78d7c01b9a..c804f78a7fa8 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -28,7 +28,6 @@ #define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) - /* * ZRAM is mainly used for memory efficiency so we want to keep memory * footprint small and thus squeeze size and zram pageflags into a flags @@ -46,6 +45,7 @@ /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */ + ZRAM_ENTRY_LOCK, /* entry access lock bit */ ZRAM_WB, /* page is stored on backing_device */ ZRAM_PP_SLOT, /* Selected for post-processing */ ZRAM_HUGE, /* Incompressible page */ @@ -58,16 +58,19 @@ enum zram_pageflags { __NR_ZRAM_PAGEFLAGS, }; -/*-- Data structures */ - -/* Allocated for each disk page */ +/* + * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit + * of flags) to save memory. There can be plenty of entries and standard + * locking primitives (e.g. mutex) will significantly increase sizeof() + * of each entry and hence of the meta table. + */ struct zram_table_entry { unsigned long handle; - unsigned int flags; - spinlock_t lock; + unsigned long flags; #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME ktime_t ac_time; #endif + struct lockdep_map dep_map; }; struct zram_stats { From 2efa9e9eb4db2725c9f419f241cb0e09fc3d8574 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:11 +0900 Subject: [PATCH 167/431] zram: permit preemption with active compression stream Currently, per-CPU stream access is done from a non-preemptible (atomic) section, which imposes the same atomicity requirements on compression backends as entry spin-lock, and makes it impossible to use algorithms that can schedule/wait/sleep during compression and decompression. Switch to preemptible per-CPU model, similar to the one used in zswap. Instead of a per-CPU local lock, each stream carries a mutex which is locked throughout entire time zram uses it for compression or decompression, so that cpu-dead event waits for zram to stop using a particular per-CPU stream and release it. Link: https://lkml.kernel.org/r/20250303022425.285971-3-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Suggested-by: Yosry Ahmed Reviewed-by: Yosry Ahmed Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- drivers/block/zram/zcomp.c | 41 +++++++++++++++++++++++++---------- drivers/block/zram/zcomp.h | 6 ++--- drivers/block/zram/zram_drv.c | 20 ++++++++--------- 3 files changed, 42 insertions(+), 25 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index bb514403e305..53e4c37441be 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -109,13 +109,29 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - local_lock(&comp->stream->lock); - return this_cpu_ptr(comp->stream); + for (;;) { + struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream); + + /* + * Inspired by zswap + * + * stream is returned with ->mutex locked which prevents + * cpu_dead() from releasing this stream under us, however + * there is still a race window between raw_cpu_ptr() and + * mutex_lock(), during which we could have been migrated + * from a CPU that has already destroyed its stream. If + * so then unlock and re-try on the current CPU. + */ + mutex_lock(&zstrm->lock); + if (likely(zstrm->buffer)) + return zstrm; + mutex_unlock(&zstrm->lock); + } } -void zcomp_stream_put(struct zcomp *comp) +void zcomp_stream_put(struct zcomp_strm *zstrm) { - local_unlock(&comp->stream->lock); + mutex_unlock(&zstrm->lock); } int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, @@ -151,12 +167,9 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm, int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); - struct zcomp_strm *zstrm; + struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu); int ret; - zstrm = per_cpu_ptr(comp->stream, cpu); - local_lock_init(&zstrm->lock); - ret = zcomp_strm_init(comp, zstrm); if (ret) pr_err("Can't allocate a compression stream\n"); @@ -166,16 +179,17 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); - struct zcomp_strm *zstrm; + struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu); - zstrm = per_cpu_ptr(comp->stream, cpu); + mutex_lock(&zstrm->lock); zcomp_strm_free(comp, zstrm); + mutex_unlock(&zstrm->lock); return 0; } static int zcomp_init(struct zcomp *comp, struct zcomp_params *params) { - int ret; + int ret, cpu; comp->stream = alloc_percpu(struct zcomp_strm); if (!comp->stream) @@ -186,6 +200,9 @@ static int zcomp_init(struct zcomp *comp, struct zcomp_params *params) if (ret) goto cleanup; + for_each_possible_cpu(cpu) + mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock); + ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node); if (ret < 0) goto cleanup; diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index ad5762813842..23b8236b9090 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -3,7 +3,7 @@ #ifndef _ZCOMP_H_ #define _ZCOMP_H_ -#include +#include #define ZCOMP_PARAM_NO_LEVEL INT_MIN @@ -31,7 +31,7 @@ struct zcomp_ctx { }; struct zcomp_strm { - local_lock_t lock; + struct mutex lock; /* compression buffer */ void *buffer; struct zcomp_ctx ctx; @@ -77,7 +77,7 @@ struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params); void zcomp_destroy(struct zcomp *comp); struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); -void zcomp_stream_put(struct zcomp *comp); +void zcomp_stream_put(struct zcomp_strm *zstrm); int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, const void *src, unsigned int *dst_len); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 70599d41b828..dd669d48ae6f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1607,7 +1607,7 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index) ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); kunmap_local(dst); zs_unmap_object(zram->mem_pool, handle); - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); return ret; } @@ -1768,14 +1768,14 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) kunmap_local(mem); if (unlikely(ret)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); pr_err("Compression failed! err=%d\n", ret); zs_free(zram->mem_pool, handle); return ret; } if (comp_len >= huge_class_size) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); return write_incompressible_page(zram, page, index); } @@ -1799,7 +1799,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); atomic64_inc(&zram->stats.writestall); handle = zs_malloc(zram->mem_pool, comp_len, GFP_NOIO | __GFP_HIGHMEM | @@ -1811,7 +1811,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) } if (!zram_can_store_page(zram)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); zs_free(zram->mem_pool, handle); return -ENOMEM; } @@ -1819,7 +1819,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); memcpy(dst, zstrm->buffer, comp_len); - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); zs_unmap_object(zram->mem_pool, handle); zram_slot_lock(zram, index); @@ -1978,7 +1978,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, kunmap_local(src); if (ret) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); return ret; } @@ -1988,7 +1988,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, /* Continue until we make progress */ if (class_index_new >= class_index_old || (threshold && comp_len_new >= threshold)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); continue; } @@ -2046,13 +2046,13 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle_new)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); } dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); memcpy(dst, zstrm->buffer, comp_len_new); - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); zs_unmap_object(zram->mem_pool, handle_new); From be656187b8a9c07bd19e5268fa626ecb674876c6 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:12 +0900 Subject: [PATCH 168/431] zram: remove unused crypto include We stopped using crypto API (for the time being), so remove its include and replace CRYPTO_MAX_ALG_NAME with a local define. Link: https://lkml.kernel.org/r/20250303022425.285971-4-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zcomp.c | 1 - drivers/block/zram/zram_drv.c | 4 +++- drivers/block/zram/zram_drv.h | 1 - 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 53e4c37441be..cfdde2e0748a 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include "zcomp.h" diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index dd669d48ae6f..248dab7cc7f4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex); static int zram_major; static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; +#define ZRAM_MAX_ALGO_NAME_SZ 128 + /* Module params (documentation at end) */ static unsigned int num_devices = 1; /* @@ -1148,7 +1150,7 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) size_t sz; sz = strlen(buf); - if (sz >= CRYPTO_MAX_ALG_NAME) + if (sz >= ZRAM_MAX_ALGO_NAME_SZ) return -E2BIG; compressor = kstrdup(buf, GFP_KERNEL); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index c804f78a7fa8..7c11f9dab335 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -17,7 +17,6 @@ #include #include -#include #include "zcomp.h" From 4127e13c9302f6892f73793f133ea4b4fffb2964 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:13 +0900 Subject: [PATCH 169/431] zram: remove max_comp_streams device attr max_comp_streams device attribute has been defunct since May 2016 when zram switched to per-CPU compression streams, remove it. Link: https://lkml.kernel.org/r/20250303022425.285971-5-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-block-zram | 8 ----- Documentation/admin-guide/blockdev/zram.rst | 36 ++++++--------------- drivers/block/zram/zram_drv.c | 23 ------------- 3 files changed, 10 insertions(+), 57 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index 1ef69e0271f9..36c57de0a10a 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -22,14 +22,6 @@ Description: device. The reset operation frees all the memory associated with this device. -What: /sys/block/zram/max_comp_streams -Date: February 2014 -Contact: Sergey Senozhatsky -Description: - The max_comp_streams file is read-write and specifies the - number of backend's zcomp_strm compression streams (number of - concurrent compress operations). - What: /sys/block/zram/comp_algorithm Date: February 2014 Contact: Sergey Senozhatsky diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst index 1576fb93f06c..9bdb30901a93 100644 --- a/Documentation/admin-guide/blockdev/zram.rst +++ b/Documentation/admin-guide/blockdev/zram.rst @@ -54,7 +54,7 @@ The list of possible return codes: If you use 'echo', the returned value is set by the 'echo' utility, and, in general case, something like:: - echo 3 > /sys/block/zram0/max_comp_streams + echo foo > /sys/block/zram0/comp_algorithm if [ $? -ne 0 ]; then handle_error fi @@ -73,21 +73,7 @@ This creates 4 devices: /dev/zram{0,1,2,3} num_devices parameter is optional and tells zram how many devices should be pre-created. Default: 1. -2) Set max number of compression streams -======================================== - -Regardless of the value passed to this attribute, ZRAM will always -allocate multiple compression streams - one per online CPU - thus -allowing several concurrent compression operations. The number of -allocated compression streams goes down when some of the CPUs -become offline. There is no single-compression-stream mode anymore, -unless you are running a UP system or have only 1 CPU online. - -To find out how many streams are currently available:: - - cat /sys/block/zram0/max_comp_streams - -3) Select compression algorithm +2) Select compression algorithm =============================== Using comp_algorithm device attribute one can see available and @@ -107,7 +93,7 @@ Examples:: For the time being, the `comp_algorithm` content shows only compression algorithms that are supported by zram. -4) Set compression algorithm parameters: Optional +3) Set compression algorithm parameters: Optional ================================================= Compression algorithms may support specific parameters which can be @@ -138,7 +124,7 @@ better the compression ratio, it even can take negatives values for some algorithms), for other algorithms `level` is acceleration level (the higher the value the lower the compression ratio). -5) Set Disksize +4) Set Disksize =============== Set disk size by writing the value to sysfs node 'disksize'. @@ -158,7 +144,7 @@ There is little point creating a zram of greater than twice the size of memory since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. -6) Set memory limit: Optional +5) Set memory limit: Optional ============================= Set memory limit by writing the value to sysfs node 'mem_limit'. @@ -177,7 +163,7 @@ Examples:: # To disable memory limit echo 0 > /sys/block/zram0/mem_limit -7) Activate +6) Activate =========== :: @@ -188,7 +174,7 @@ Examples:: mkfs.ext4 /dev/zram1 mount /dev/zram1 /tmp -8) Add/remove zram devices +7) Add/remove zram devices ========================== zram provides a control interface, which enables dynamic (on-demand) device @@ -208,7 +194,7 @@ execute:: echo X > /sys/class/zram-control/hot_remove -9) Stats +8) Stats ======== Per-device statistics are exported as various nodes under /sys/block/zram/ @@ -228,8 +214,6 @@ mem_limit WO specifies the maximum amount of memory ZRAM can writeback_limit WO specifies the maximum amount of write IO zram can write out to backing device as 4KB unit writeback_limit_enable RW show and set writeback_limit feature -max_comp_streams RW the number of possible concurrent compress - operations comp_algorithm RW show and change the compression algorithm algorithm_params WO setup compression algorithm parameters compact WO trigger memory compaction @@ -310,7 +294,7 @@ a single line of text and contains the following stats separated by whitespace: Unit: 4K bytes ============== ============================================================= -10) Deactivate +9) Deactivate ============== :: @@ -318,7 +302,7 @@ a single line of text and contains the following stats separated by whitespace: swapoff /dev/zram0 umount /dev/zram1 -11) Reset +10) Reset ========= Write any positive value to 'reset' sysfs node:: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 248dab7cc7f4..93cedc60ac16 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1103,27 +1103,6 @@ static void zram_debugfs_register(struct zram *zram) {}; static void zram_debugfs_unregister(struct zram *zram) {}; #endif -/* - * We switched to per-cpu streams and this attr is not needed anymore. - * However, we will keep it around for some time, because: - * a) we may revert per-cpu streams in the future - * b) it's visible to user space and we need to follow our 2 years - * retirement rule; but we already have a number of 'soon to be - * altered' attrs, so max_comp_streams need to wait for the next - * layoff cycle. - */ -static ssize_t max_comp_streams_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); -} - -static ssize_t max_comp_streams_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - return len; -} - static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) { /* Do not free statically defined compression algorithms */ @@ -2540,7 +2519,6 @@ static DEVICE_ATTR_WO(reset); static DEVICE_ATTR_WO(mem_limit); static DEVICE_ATTR_WO(mem_used_max); static DEVICE_ATTR_WO(idle); -static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); #ifdef CONFIG_ZRAM_WRITEBACK static DEVICE_ATTR_RW(backing_dev); @@ -2562,7 +2540,6 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_mem_limit.attr, &dev_attr_mem_used_max.attr, &dev_attr_idle.attr, - &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, #ifdef CONFIG_ZRAM_WRITEBACK &dev_attr_backing_dev.attr, From 80af56cb29332b78792ca1bc785157e404b10004 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:14 +0900 Subject: [PATCH 170/431] zram: remove second stage of handle allocation Previously zram write() was atomic which required us to pass __GFP_KSWAPD_RECLAIM to zsmalloc handle allocation on a fast path and attempt a slow path allocation (with recompression) if the fast path failed. Since we are not in atomic context anymore we can permit direct reclaim during handle allocation, and hence can have a single allocation path. There is no slow path anymore so we don't unlock per-CPU stream (and don't lose compressed data) which means that there is no need to do recompression now (which should reduce CPU and battery usage). Link: https://lkml.kernel.org/r/20250303022425.285971-6-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 39 +++++++---------------------------- 1 file changed, 7 insertions(+), 32 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 93cedc60ac16..f043f35b17a4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1723,11 +1723,11 @@ static int write_incompressible_page(struct zram *zram, struct page *page, static int zram_write_page(struct zram *zram, struct page *page, u32 index) { int ret = 0; - unsigned long handle = -ENOMEM; - unsigned int comp_len = 0; + unsigned long handle; + unsigned int comp_len; void *dst, *mem; struct zcomp_strm *zstrm; - unsigned long element = 0; + unsigned long element; bool same_filled; /* First, free memory allocated to this slot (if any) */ @@ -1741,7 +1741,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) if (same_filled) return write_same_filled_page(zram, element, index); -compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); mem = kmap_local_page(page); ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, @@ -1751,7 +1750,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) if (unlikely(ret)) { zcomp_stream_put(zstrm); pr_err("Compression failed! err=%d\n", ret); - zs_free(zram->mem_pool, handle); return ret; } @@ -1760,35 +1758,12 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) return write_incompressible_page(zram, page, index); } - /* - * handle allocation has 2 paths: - * a) fast path is executed with preemption disabled (for - * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, - * since we can't sleep; - * b) slow path enables preemption and attempts to allocate - * the page with __GFP_DIRECT_RECLAIM bit set. we have to - * put per-cpu compression stream and, thus, to re-do - * the compression once handle is allocated. - * - * if we have a 'non-null' handle here then we are coming - * from the slow path and handle has already been allocated. - */ - if (IS_ERR_VALUE(handle)) - handle = zs_malloc(zram->mem_pool, comp_len, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + handle = zs_malloc(zram->mem_pool, comp_len, + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) { zcomp_stream_put(zstrm); - atomic64_inc(&zram->stats.writestall); - handle = zs_malloc(zram->mem_pool, comp_len, - GFP_NOIO | __GFP_HIGHMEM | - __GFP_MOVABLE); - if (IS_ERR_VALUE(handle)) - return PTR_ERR((void *)handle); - - goto compress_again; + return PTR_ERR((void *)handle); } if (!zram_can_store_page(zram)) { From 9c7ccc8d99ad4afbfb7f6328dff0a88bed63bc27 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:15 +0900 Subject: [PATCH 171/431] zram: add GFP_NOWARN to incompressible zsmalloc handle allocation We normally use __GFP_NOWARN for zsmalloc handle allocations, add it to write_incompressible_page() allocation too. Link: https://lkml.kernel.org/r/20250303022425.285971-7-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f043f35b17a4..249a936b6aac 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1691,7 +1691,8 @@ static int write_incompressible_page(struct zram *zram, struct page *page, * like we do for compressible pages. */ handle = zs_malloc(zram->mem_pool, PAGE_SIZE, - GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); From f3b0c6c8996a8b4e717f71e13ebb7501841f0eb7 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:16 +0900 Subject: [PATCH 172/431] zram: remove writestall zram_stats member There is no zsmalloc handle allocation slow path now and writestall is not possible any longer. Remove it from zram_stats. Link: https://lkml.kernel.org/r/20250303022425.285971-8-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 3 +-- drivers/block/zram/zram_drv.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 249a936b6aac..fc9321af3ef4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1437,9 +1437,8 @@ static ssize_t debug_stat_show(struct device *dev, down_read(&zram->init_lock); ret = scnprintf(buf, PAGE_SIZE, - "version: %d\n%8llu %8llu\n", + "version: %d\n0 %8llu\n", version, - (u64)atomic64_read(&zram->stats.writestall), (u64)atomic64_read(&zram->stats.miss_free)); up_read(&zram->init_lock); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 7c11f9dab335..6cee93f9c0d0 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -82,7 +82,6 @@ struct zram_stats { atomic64_t huge_pages_since; /* no. of huge pages since zram set up */ atomic64_t pages_stored; /* no. of pages currently stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */ - atomic64_t writestall; /* no. of write slow paths */ atomic64_t miss_free; /* no. of missed free */ #ifdef CONFIG_ZRAM_WRITEBACK atomic64_t bd_count; /* no. of pages in backing device */ From d7fdc5a620aeecf96074cf8c1b852539b12cb067 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:17 +0900 Subject: [PATCH 173/431] zram: limit max recompress prio to num_active_comps Use the actual number of algorithms zram was configure with instead of theoretical limit of ZRAM_MAX_COMPS. Also make sure that min prio is not above max prio. Link: https://lkml.kernel.org/r/20250303022425.285971-9-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fc9321af3ef4..776c31606eec 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2027,16 +2027,19 @@ static ssize_t recompress_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; struct zram *zram = dev_to_zram(dev); char *args, *param, *val, *algo = NULL; u64 num_recomp_pages = ULLONG_MAX; struct zram_pp_ctl *ctl = NULL; struct zram_pp_slot *pps; u32 mode = 0, threshold = 0; + u32 prio, prio_max; struct page *page; ssize_t ret; + prio = ZRAM_SECONDARY_COMP; + prio_max = zram->num_active_comps; + args = skip_spaces(buf); while (*args) { args = next_arg(args, ¶m, &val); @@ -2089,7 +2092,7 @@ static ssize_t recompress_store(struct device *dev, if (prio == ZRAM_PRIMARY_COMP) prio = ZRAM_SECONDARY_COMP; - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; continue; } } @@ -2117,7 +2120,7 @@ static ssize_t recompress_store(struct device *dev, continue; if (!strcmp(zram->comp_algs[prio], algo)) { - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; found = true; break; } @@ -2129,6 +2132,12 @@ static ssize_t recompress_store(struct device *dev, } } + prio_max = min(prio_max, (u32)zram->num_active_comps); + if (prio >= prio_max) { + ret = -EINVAL; + goto release_init_lock; + } + page = alloc_page(GFP_KERNEL); if (!page) { ret = -ENOMEM; From 9724bef96df47ba0dec907416cfa0b5e1cd7202e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:18 +0900 Subject: [PATCH 174/431] zram: filter out recomp targets based on priority Do no select for post processing slots that are already compressed with same or higher priority compression algorithm. This should save some memory, as previously we would still put those entries into corresponding post-processing buckets and filter them out later in recompress_slot(). Link: https://lkml.kernel.org/r/20250303022425.285971-10-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 776c31606eec..6dee885bef9b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1823,7 +1823,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, #define RECOMPRESS_IDLE (1 << 0) #define RECOMPRESS_HUGE (1 << 1) -static int scan_slots_for_recompress(struct zram *zram, u32 mode, +static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max, struct zram_pp_ctl *ctl) { unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; @@ -1855,6 +1855,10 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; + /* Already compressed with same of higher priority */ + if (zram_get_priority(zram, index) + 1 >= prio_max) + goto next; + pps->index = index; place_pp_slot(zram, ctl, pps); pps = NULL; @@ -1911,6 +1915,16 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, zram_clear_flag(zram, index, ZRAM_IDLE); class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); + + prio = max(prio, zram_get_priority(zram, index) + 1); + /* + * Recompression slots scan should not select slots that are + * already compressed with a higher priority algorithm, but + * just in case + */ + if (prio >= prio_max) + return 0; + /* * Iterate the secondary comp algorithms list (in order of priority) * and try to recompress the page. @@ -1919,13 +1933,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (!zram->comps[prio]) continue; - /* - * Skip if the object is already re-compressed with a higher - * priority algorithm (or same algorithm). - */ - if (prio <= zram_get_priority(zram, index)) - continue; - num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); src = kmap_local_page(page); @@ -2150,7 +2157,7 @@ static ssize_t recompress_store(struct device *dev, goto release_init_lock; } - scan_slots_for_recompress(zram, mode, ctl); + scan_slots_for_recompress(zram, mode, prio_max, ctl); ret = len; while ((pps = select_pp_slot(ctl))) { From b0624f0b223420047625eaff66af9e9d6d3df85f Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:19 +0900 Subject: [PATCH 175/431] zram: rework recompression loop This reworks recompression loop handling: - set a rule that stream-put NULLs the stream pointer If the loop returns with a non-NULL stream then it's a successful recompression, otherwise the stream should always be NULL. - do not count the number of recompressions Mark object as incompressible as soon as the algorithm with the highest priority failed to compress that object. - count compression errors as resource usage Even if compression has failed, we still need to bump num_recomp_pages counter. Link: https://lkml.kernel.org/r/20250303022425.285971-11-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 54 +++++++++++++---------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 6dee885bef9b..bb88b63d193b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1888,9 +1888,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, unsigned int comp_len_new; unsigned int class_index_old; unsigned int class_index_new; - u32 num_recomps = 0; void *src, *dst; - int ret; + int ret = 0; handle_old = zram_get_handle(zram, index); if (!handle_old) @@ -1933,7 +1932,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (!zram->comps[prio]) continue; - num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); src = kmap_local_page(page); ret = zcomp_compress(zram->comps[prio], zstrm, @@ -1942,7 +1940,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (ret) { zcomp_stream_put(zstrm); - return ret; + zstrm = NULL; + break; } class_index_new = zs_lookup_class_index(zram->mem_pool, @@ -1952,6 +1951,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (class_index_new >= class_index_old || (threshold && comp_len_new >= threshold)) { zcomp_stream_put(zstrm); + zstrm = NULL; continue; } @@ -1959,14 +1959,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, break; } - /* - * We did not try to recompress, e.g. when we have only one - * secondary algorithm and the page is already recompressed - * using that algorithm - */ - if (!zstrm) - return 0; - /* * Decrement the limit (if set) on pages we can recompress, even * when current recompression was unsuccessful or did not compress @@ -1976,38 +1968,32 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (*num_recomp_pages) *num_recomp_pages -= 1; - if (class_index_new >= class_index_old) { + /* Compression error */ + if (ret) + return ret; + + if (!zstrm) { /* * Secondary algorithms failed to re-compress the page - * in a way that would save memory, mark the object as - * incompressible so that we will not try to compress - * it again. + * in a way that would save memory. * - * We need to make sure that all secondary algorithms have - * failed, so we test if the number of recompressions matches - * the number of active secondary algorithms. + * Mark the object incompressible if the max-priority + * algorithm couldn't re-compress it. */ - if (num_recomps == zram->num_active_comps - 1) - zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); + if (prio < zram->num_active_comps) + return 0; + zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); return 0; } - /* Successful recompression but above threshold */ - if (threshold && comp_len_new >= threshold) - return 0; - /* - * No direct reclaim (slow path) for handle allocation and no - * re-compression attempt (unlike in zram_write_bvec()) since - * we already have stored that object in zsmalloc. If we cannot - * alloc memory for recompressed object then we bail out and - * simply keep the old (existing) object in zsmalloc. + * We are holding per-CPU stream mutex and entry lock so better + * avoid direct reclaim. Allocation error is not fatal since + * we still have the old object in the mem_pool. */ handle_new = zs_malloc(zram->mem_pool, comp_len_new, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle_new)) { zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); From 7e1b0212d4d59281a80c836841539a91b724bf24 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:20 +0900 Subject: [PATCH 176/431] zram: move post-processing target allocation Allocate post-processing target in place_pp_slot(). This simplifies scan_slots_for_writeback() and scan_slots_for_recompress() loops because we don't need to track pps pointer state anymore. Previously we have to explicitly NULL the point if it has been added to a post-processing bucket or re-use previously allocated pointer otherwise and make sure we don't leak the memory in the end. We are also fine doing GFP_NOIO allocation, as post-processing can be called under memory pressure so we better pick as many slots as we can as soon as we can and start post-processing them, possibly saving the memory. Allocation failure there is not fatal, we will post-process whatever we put into the buckets on previous iterations. Link: https://lkml.kernel.org/r/20250303022425.285971-12-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 50 +++++++++++++++-------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index bb88b63d193b..f6e887f94b71 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -295,15 +295,24 @@ static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl) kfree(ctl); } -static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, - struct zram_pp_slot *pps) +static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, + u32 index) { - u32 idx; + struct zram_pp_slot *pps; + u32 bid; - idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; - list_add(&pps->entry, &ctl->pp_buckets[idx]); + pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN); + if (!pps) + return false; + + INIT_LIST_HEAD(&pps->entry); + pps->index = index; + + bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; + list_add(&pps->entry, &ctl->pp_buckets[bid]); zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); + return true; } static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl) @@ -737,15 +746,8 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, unsigned long index, struct zram_pp_ctl *ctl) { - struct zram_pp_slot *pps = NULL; - for (; nr_pages != 0; index++, nr_pages--) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -765,14 +767,13 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } @@ -1827,16 +1828,10 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max, struct zram_pp_ctl *ctl) { unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct zram_pp_slot *pps = NULL; unsigned long index; for (index = 0; index < nr_pages; index++) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -1859,14 +1854,13 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max, if (zram_get_priority(zram, index) + 1 >= prio_max) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } From 0d6fa44e4e25f57cca8f9a638369d5bf58412cb6 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:21 +0900 Subject: [PATCH 177/431] zsmalloc: rename pool lock The old name comes from the times when the pool did not have compaction (defragmentation). Rename it to ->lock because these days it synchronizes not only migration. Link: https://lkml.kernel.org/r/20250303022425.285971-13-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Reviewed-by: Yosry Ahmed Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6d0e47f7ae33..2e338cde0d21 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -18,7 +18,7 @@ /* * lock ordering: * page_lock - * pool->migrate_lock + * pool->lock * class->lock * zspage->lock */ @@ -223,8 +223,8 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct work_struct free_work; #endif - /* protect page/zspage migration */ - rwlock_t migrate_lock; + /* protect zspage migration/compaction */ + rwlock_t lock; atomic_t compaction_in_progress; }; @@ -1206,7 +1206,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, BUG_ON(in_interrupt()); /* It guarantees it can get zspage from handle safely */ - read_lock(&pool->migrate_lock); + read_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); zspage = get_zspage(zpdesc); @@ -1218,7 +1218,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, * which is smaller granularity. */ migrate_read_lock(zspage); - read_unlock(&pool->migrate_lock); + read_unlock(&pool->lock); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1450,16 +1450,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle) return; /* - * The pool->migrate_lock protects the race with zpage's migration + * The pool->lock protects the race with zpage's migration * so it's safe to get the page from handle. */ - read_lock(&pool->migrate_lock); + read_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_zpdesc(obj, &f_zpdesc); zspage = get_zspage(f_zpdesc); class = zspage_class(pool, zspage); spin_lock(&class->lock); - read_unlock(&pool->migrate_lock); + read_unlock(&pool->lock); class_stat_sub(class, ZS_OBJS_INUSE, 1); obj_free(class->size, obj); @@ -1796,7 +1796,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * The pool migrate_lock protects the race between zpage migration * and zs_free. */ - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); class = zspage_class(pool, zspage); /* @@ -1833,7 +1833,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock. */ - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); spin_unlock(&class->lock); migrate_write_unlock(zspage); @@ -1956,7 +1956,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, * protect the race between zpage migration and zs_free * as well as zpage allocation/free */ - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); spin_lock(&class->lock); while (zs_can_compact(class)) { int fg; @@ -1983,14 +1983,14 @@ static unsigned long __zs_compact(struct zs_pool *pool, src_zspage = NULL; if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 - || rwlock_is_contended(&pool->migrate_lock)) { + || rwlock_is_contended(&pool->lock)) { putback_zspage(class, dst_zspage); dst_zspage = NULL; spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); cond_resched(); - write_lock(&pool->migrate_lock); + write_lock(&pool->lock); spin_lock(&class->lock); } } @@ -2002,7 +2002,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(class, dst_zspage); spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + write_unlock(&pool->lock); return pages_freed; } @@ -2014,10 +2014,10 @@ unsigned long zs_compact(struct zs_pool *pool) unsigned long pages_freed = 0; /* - * Pool compaction is performed under pool->migrate_lock so it is basically + * Pool compaction is performed under pool->lock so it is basically * single-threaded. Having more than one thread in __zs_compact() - * will increase pool->migrate_lock contention, which will impact other - * zsmalloc operations that need pool->migrate_lock. + * will increase pool->lock contention, which will impact other + * zsmalloc operations that need pool->lock. */ if (atomic_xchg(&pool->compaction_in_progress, 1)) return 0; @@ -2139,7 +2139,7 @@ struct zs_pool *zs_create_pool(const char *name) return NULL; init_deferred_free(pool); - rwlock_init(&pool->migrate_lock); + rwlock_init(&pool->lock); atomic_set(&pool->compaction_in_progress, 0); pool->name = kstrdup(name, GFP_KERNEL); From e27af3f9360ee130b7ab0b274088f92146a0855b Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:22 +0900 Subject: [PATCH 178/431] zsmalloc: sleepable zspage reader-lock In order to implement preemptible object mapping we need a zspage lock that satisfies several preconditions: - it should be reader-write type of a lock - it should be possible to hold it from any context, but also being preemptible if the context allows it - we never sleep while acquiring but can sleep while holding in read mode An rwsemaphore doesn't suffice, due to atomicity requirements, rwlock doesn't satisfy due to reader-preemptability requirement. It's also worth to mention, that per-zspage rwsem is a little too memory heavy (we can easily have double digits megabytes used only on rwsemaphores). Switch over from rwlock_t to a atomic_t-based implementation of a reader-writer semaphore that satisfies all of the preconditions. The spin-lock based zspage_lock is suggested by Hillf Danton. Link: https://lkml.kernel.org/r/20250303022425.285971-14-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Suggested-by: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 166 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 114 insertions(+), 52 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2e338cde0d21..818bf381a517 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -257,6 +257,15 @@ static inline void free_zpdesc(struct zpdesc *zpdesc) __free_page(page); } +#define ZS_PAGE_UNLOCKED 0 +#define ZS_PAGE_WRLOCKED -1 + +struct zspage_lock { + spinlock_t lock; + int cnt; + struct lockdep_map dep_map; +}; + struct zspage { struct { unsigned int huge:HUGE_BITS; @@ -269,7 +278,7 @@ struct zspage { struct zpdesc *first_zpdesc; struct list_head list; /* fullness list */ struct zs_pool *pool; - rwlock_t lock; + struct zspage_lock zsl; }; struct mapping_area { @@ -279,6 +288,84 @@ struct mapping_area { enum zs_mapmode vm_mm; /* mapping mode */ }; +static void zspage_lock_init(struct zspage *zspage) +{ + static struct lock_class_key __key; + struct zspage_lock *zsl = &zspage->zsl; + + lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0); + spin_lock_init(&zsl->lock); + zsl->cnt = ZS_PAGE_UNLOCKED; +} + +/* + * The zspage lock can be held from atomic contexts, but it needs to remain + * preemptible when held for reading because it remains held outside of those + * atomic contexts, otherwise we unnecessarily lose preemptibility. + * + * To achieve this, the following rules are enforced on readers and writers: + * + * - Writers are blocked by both writers and readers, while readers are only + * blocked by writers (i.e. normal rwlock semantics). + * + * - Writers are always atomic (to allow readers to spin waiting for them). + * + * - Writers always use trylock (as the lock may be held be sleeping readers). + * + * - Readers may spin on the lock (as they can only wait for atomic writers). + * + * - Readers may sleep while holding the lock (as writes only use trylock). + */ +static void zspage_read_lock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt++; + spin_unlock(&zsl->lock); + + lock_acquired(&zsl->dep_map, _RET_IP_); +} + +static void zspage_read_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + spin_lock(&zsl->lock); + zsl->cnt--; + spin_unlock(&zsl->lock); +} + +static __must_check bool zspage_write_trylock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + spin_lock(&zsl->lock); + if (zsl->cnt == ZS_PAGE_UNLOCKED) { + zsl->cnt = ZS_PAGE_WRLOCKED; + rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_); + lock_acquired(&zsl->dep_map, _RET_IP_); + return true; + } + + spin_unlock(&zsl->lock); + return false; +} + +static void zspage_write_unlock(struct zspage *zspage) +{ + struct zspage_lock *zsl = &zspage->zsl; + + rwsem_release(&zsl->dep_map, _RET_IP_); + + zsl->cnt = ZS_PAGE_UNLOCKED; + spin_unlock(&zsl->lock); +} + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetZsHugePage(struct zspage *zspage) { @@ -290,12 +377,6 @@ static bool ZsHugePage(struct zspage *zspage) return zspage->huge; } -static void migrate_lock_init(struct zspage *zspage); -static void migrate_read_lock(struct zspage *zspage); -static void migrate_read_unlock(struct zspage *zspage); -static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_unlock(struct zspage *zspage); - #ifdef CONFIG_COMPACTION static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); @@ -992,7 +1073,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, return NULL; zspage->magic = ZSPAGE_MAGIC; - migrate_lock_init(zspage); + zspage->pool = pool; + zspage->class = class->index; + zspage_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { struct zpdesc *zpdesc; @@ -1015,8 +1098,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, create_page_chain(class, zspage, zpdescs); init_zspage(class, zspage); - zspage->pool = pool; - zspage->class = class->index; return zspage; } @@ -1217,7 +1298,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, * zs_unmap_object API so delegate the locking from class to zspage * which is smaller granularity. */ - migrate_read_lock(zspage); + zspage_read_lock(zspage); read_unlock(&pool->lock); class = zspage_class(pool, zspage); @@ -1277,7 +1358,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) } local_unlock(&zs_map_area.lock); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); } EXPORT_SYMBOL_GPL(zs_unmap_object); @@ -1671,18 +1752,18 @@ static void lock_zspage(struct zspage *zspage) /* * Pages we haven't locked yet can be migrated off the list while we're * trying to lock them, so we need to be careful and only attempt to - * lock each page under migrate_read_lock(). Otherwise, the page we lock + * lock each page under zspage_read_lock(). Otherwise, the page we lock * may no longer belong to the zspage. This means that we may wait for * the wrong page to unlock, so we must take a reference to the page - * prior to waiting for it to unlock outside migrate_read_lock(). + * prior to waiting for it to unlock outside zspage_read_lock(). */ while (1) { - migrate_read_lock(zspage); + zspage_read_lock(zspage); zpdesc = get_first_zpdesc(zspage); if (zpdesc_trylock(zpdesc)) break; zpdesc_get(zpdesc); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); zpdesc_wait_locked(zpdesc); zpdesc_put(zpdesc); } @@ -1693,41 +1774,16 @@ static void lock_zspage(struct zspage *zspage) curr_zpdesc = zpdesc; } else { zpdesc_get(zpdesc); - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); zpdesc_wait_locked(zpdesc); zpdesc_put(zpdesc); - migrate_read_lock(zspage); + zspage_read_lock(zspage); } } - migrate_read_unlock(zspage); + zspage_read_unlock(zspage); } #endif /* CONFIG_COMPACTION */ -static void migrate_lock_init(struct zspage *zspage) -{ - rwlock_init(&zspage->lock); -} - -static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) -{ - read_lock(&zspage->lock); -} - -static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) -{ - read_unlock(&zspage->lock); -} - -static void migrate_write_lock(struct zspage *zspage) -{ - write_lock(&zspage->lock); -} - -static void migrate_write_unlock(struct zspage *zspage) -{ - write_unlock(&zspage->lock); -} - #ifdef CONFIG_COMPACTION static const struct movable_operations zsmalloc_mops; @@ -1785,9 +1841,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); - /* We're committed, tell the world that this is a Zsmalloc page. */ - __zpdesc_set_zsmalloc(newzpdesc); - /* The page is locked, so this pointer must remain valid */ zspage = get_zspage(zpdesc); pool = zspage->pool; @@ -1803,8 +1856,15 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * the class lock protects zpage alloc/free in the zspage. */ spin_lock(&class->lock); - /* the migrate_write_lock protects zpage access via zs_map_object */ - migrate_write_lock(zspage); + /* the zspage write_lock protects zpage access via zs_map_object */ + if (!zspage_write_trylock(zspage)) { + spin_unlock(&class->lock); + write_unlock(&pool->lock); + return -EINVAL; + } + + /* We're committed, tell the world that this is a Zsmalloc page. */ + __zpdesc_set_zsmalloc(newzpdesc); offset = get_first_obj_offset(zpdesc); s_addr = kmap_local_zpdesc(zpdesc); @@ -1835,7 +1895,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, */ write_unlock(&pool->lock); spin_unlock(&class->lock); - migrate_write_unlock(zspage); + zspage_write_unlock(zspage); zpdesc_get(newzpdesc); if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { @@ -1971,9 +2031,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, if (!src_zspage) break; - migrate_write_lock(src_zspage); + if (!zspage_write_trylock(src_zspage)) + break; + migrate_zspage(pool, src_zspage, dst_zspage); - migrate_write_unlock(src_zspage); + zspage_write_unlock(src_zspage); fg = putback_zspage(class, src_zspage); if (fg == ZS_INUSE_RATIO_0) { From 44f76413496ec343da0d8292ceecdcabe3e6ec16 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:23 +0900 Subject: [PATCH 179/431] zsmalloc: introduce new object mapping API Current object mapping API is a little cumbersome. First, it's inconsistent, sometimes it returns with page-faults disabled and sometimes with page-faults enabled. Second, and most importantly, it enforces atomicity restrictions on its users. zs_map_object() has to return a liner object address which is not always possible because some objects span multiple physical (non-contiguous) pages. For such objects zsmalloc uses a per-CPU buffer to which object's data is copied before a pointer to that per-CPU buffer is returned back to the caller. This leads to another, final, issue - extra memcpy(). Since the caller gets a pointer to per-CPU buffer it can memcpy() data only to that buffer, and during zs_unmap_object() zsmalloc will memcpy() from that per-CPU buffer to physical pages that object in question spans across. New API splits functions by access mode: - zs_obj_read_begin(handle, local_copy) Returns a pointer to handle memory. For objects that span two physical pages a local_copy buffer is used to store object's data before the address is returned to the caller. Otherwise the object's page is kmap_local mapped directly. - zs_obj_read_end(handle, buf) Unmaps the page if it was kmap_local mapped by zs_obj_read_begin(). - zs_obj_write(handle, buf, len) Copies len-bytes from compression buffer to handle memory (takes care of objects that span two pages). This does not need any additional (e.g. per-CPU) buffers and writes the data directly to zsmalloc pool pages. In terms of performance, on a synthetic and completely reproducible test that allocates fixed number of objects of fixed sizes and iterates over those objects, first mapping in RO then in RW mode: OLD API ======= 3 first results out of 10 369,205,778 instructions # 0.80 insn per cycle 40,467,926 branches # 113.732 M/sec 369,002,122 instructions # 0.62 insn per cycle 40,426,145 branches # 189.361 M/sec 369,036,706 instructions # 0.63 insn per cycle 40,430,860 branches # 204.105 M/sec [..] NEW API ======= 3 first results out of 10 265,799,293 instructions # 0.51 insn per cycle 29,834,567 branches # 170.281 M/sec 265,765,970 instructions # 0.55 insn per cycle 29,829,019 branches # 161.602 M/sec 265,764,702 instructions # 0.51 insn per cycle 29,828,015 branches # 189.677 M/sec [..] T-test on all 10 runs ===================== Difference at 95.0% confidence -1.03219e+08 +/- 55308.7 -27.9705% +/- 0.0149878% (Student's t, pooled s = 58864.4) The old API will stay around until the remaining users switch to the new one. After that we'll also remove zsmalloc per-CPU buffer and CPU hotplug handling. The split of map(RO) and map(WO) into read_{begin/end}/write is suggested by Yosry Ahmed. Link: https://lkml.kernel.org/r/20250303022425.285971-15-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Suggested-by: Yosry Ahmed Reviewed-by: Yosry Ahmed Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Signed-off-by: Andrew Morton --- include/linux/zsmalloc.h | 8 +++ mm/zsmalloc.c | 125 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 133 insertions(+) diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index a48cd0ffe57d..7d70983cf398 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -58,4 +58,12 @@ unsigned long zs_compact(struct zs_pool *pool); unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size); void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); + +void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, + void *local_copy); +void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, + void *handle_mem); +void zs_obj_write(struct zs_pool *pool, unsigned long handle, + void *handle_mem, size_t mem_len); + #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 818bf381a517..63c99db71dc1 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1362,6 +1362,131 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_unmap_object); +void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, + void *local_copy) +{ + struct zspage *zspage; + struct zpdesc *zpdesc; + unsigned long obj, off; + unsigned int obj_idx; + struct size_class *class; + void *addr; + + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); + obj = handle_to_obj(handle); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); + + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); + + class = zspage_class(pool, zspage); + off = offset_in_page(class->size * obj_idx); + + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ + addr = kmap_local_zpdesc(zpdesc); + addr += off; + } else { + size_t sizes[2]; + + /* this object spans two pages */ + sizes[0] = PAGE_SIZE - off; + sizes[1] = class->size - sizes[0]; + addr = local_copy; + + memcpy_from_page(addr, zpdesc_page(zpdesc), + off, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_from_page(addr + sizes[0], + zpdesc_page(zpdesc), + 0, sizes[1]); + } + + if (!ZsHugePage(zspage)) + addr += ZS_HANDLE_SIZE; + + return addr; +} +EXPORT_SYMBOL_GPL(zs_obj_read_begin); + +void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, + void *handle_mem) +{ + struct zspage *zspage; + struct zpdesc *zpdesc; + unsigned long obj, off; + unsigned int obj_idx; + struct size_class *class; + + obj = handle_to_obj(handle); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); + class = zspage_class(pool, zspage); + off = offset_in_page(class->size * obj_idx); + + if (off + class->size <= PAGE_SIZE) { + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + handle_mem -= off; + kunmap_local(handle_mem); + } + + zspage_read_unlock(zspage); +} +EXPORT_SYMBOL_GPL(zs_obj_read_end); + +void zs_obj_write(struct zs_pool *pool, unsigned long handle, + void *handle_mem, size_t mem_len) +{ + struct zspage *zspage; + struct zpdesc *zpdesc; + unsigned long obj, off; + unsigned int obj_idx; + struct size_class *class; + + /* Guarantee we can get zspage from handle safely */ + read_lock(&pool->lock); + obj = handle_to_obj(handle); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc); + + /* Make sure migration doesn't move any pages in this zspage */ + zspage_read_lock(zspage); + read_unlock(&pool->lock); + + class = zspage_class(pool, zspage); + off = offset_in_page(class->size * obj_idx); + + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ + void *dst = kmap_local_zpdesc(zpdesc); + + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + memcpy(dst + off, handle_mem, mem_len); + kunmap_local(dst); + } else { + /* this object spans two pages */ + size_t sizes[2]; + + off += ZS_HANDLE_SIZE; + sizes[0] = PAGE_SIZE - off; + sizes[1] = mem_len - sizes[0]; + + memcpy_to_page(zpdesc_page(zpdesc), off, + handle_mem, sizes[0]); + zpdesc = get_next_zpdesc(zpdesc); + memcpy_to_page(zpdesc_page(zpdesc), 0, + handle_mem + sizes[0], sizes[1]); + } + + zspage_read_unlock(zspage); +} +EXPORT_SYMBOL_GPL(zs_obj_write); + /** * zs_huge_class_size() - Returns the size (in bytes) of the first huge * zsmalloc &size_class. From 82f91900c7222c6fae7884da133c5c81dca28d19 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:24 +0900 Subject: [PATCH 180/431] zram: switch to new zsmalloc object mapping API Use new read/write zsmalloc object API. For cases when RO mapped object spans two physical pages (requires temp buffer) compression streams now carry around one extra physical page. Link: https://lkml.kernel.org/r/20250303022425.285971-16-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zcomp.c | 4 +++- drivers/block/zram/zcomp.h | 2 ++ drivers/block/zram/zram_drv.c | 28 ++++++++++------------------ 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index cfdde2e0748a..a1d627054bb1 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -45,6 +45,7 @@ static const struct zcomp_ops *backends[] = { static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) { comp->ops->destroy_ctx(&zstrm->ctx); + vfree(zstrm->local_copy); vfree(zstrm->buffer); zstrm->buffer = NULL; } @@ -57,12 +58,13 @@ static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm) if (ret) return ret; + zstrm->local_copy = vzalloc(PAGE_SIZE); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ zstrm->buffer = vzalloc(2 * PAGE_SIZE); - if (!zstrm->buffer) { + if (!zstrm->buffer || !zstrm->local_copy) { zcomp_strm_free(comp, zstrm); return -ENOMEM; } diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 23b8236b9090..25339ed1e07e 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -34,6 +34,8 @@ struct zcomp_strm { struct mutex lock; /* compression buffer */ void *buffer; + /* local copy of handle memory */ + void *local_copy; struct zcomp_ctx ctx; }; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f6e887f94b71..62aef12417a4 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1561,11 +1561,11 @@ static int read_incompressible_page(struct zram *zram, struct page *page, void *src, *dst; handle = zram_get_handle(zram, index); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, NULL); dst = kmap_local_page(page); copy_page(dst, src); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_read_end(zram->mem_pool, handle, src); return 0; } @@ -1583,11 +1583,11 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index) prio = zram_get_priority(zram, index); zstrm = zcomp_stream_get(zram->comps[prio]); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy); dst = kmap_local_page(page); ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_read_end(zram->mem_pool, handle, src); zcomp_stream_put(zstrm); return ret; @@ -1683,7 +1683,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, u32 index) { unsigned long handle; - void *src, *dst; + void *src; /* * This function is called from preemptible context so we don't need @@ -1701,11 +1701,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page, return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); src = kmap_local_page(page); - memcpy(dst, src, PAGE_SIZE); + zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE); kunmap_local(src); - zs_unmap_object(zram->mem_pool, handle); zram_slot_lock(zram, index); zram_set_flag(zram, index, ZRAM_HUGE); @@ -1726,7 +1724,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) int ret = 0; unsigned long handle; unsigned int comp_len; - void *dst, *mem; + void *mem; struct zcomp_strm *zstrm; unsigned long element; bool same_filled; @@ -1773,11 +1771,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); - - memcpy(dst, zstrm->buffer, comp_len); + zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len); zcomp_stream_put(zstrm); - zs_unmap_object(zram->mem_pool, handle); zram_slot_lock(zram, index); zram_set_handle(zram, index, handle); @@ -1882,7 +1877,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, unsigned int comp_len_new; unsigned int class_index_old; unsigned int class_index_new; - void *src, *dst; + void *src; int ret = 0; handle_old = zram_get_handle(zram, index); @@ -1993,12 +1988,9 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, return PTR_ERR((void *)handle_new); } - dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); - memcpy(dst, zstrm->buffer, comp_len_new); + zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new); zcomp_stream_put(zstrm); - zs_unmap_object(zram->mem_pool, handle_new); - zram_free_page(zram, index); zram_set_handle(zram, index, handle_new); zram_set_obj_size(zram, index, comp_len_new); From f66140eb71053c0a444421c7f04c7aecc4e31716 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:25 +0900 Subject: [PATCH 181/431] zram: permit reclaim in zstd custom allocator When configured with pre-trained compression/decompression dictionary support, zstd requires custom memory allocator, which it calls internally from compression()/decompression() routines. That means allocation from atomic context (either under entry spin-lock, or per-CPU local-lock or both). Now, with non-atomic zram read()/write(), those limitations are relaxed and we can allow direct and indirect reclaim. Link: https://lkml.kernel.org/r/20250303022425.285971-17-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/backend_zstd.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c index 1184c0036f44..22c8067536f3 100644 --- a/drivers/block/zram/backend_zstd.c +++ b/drivers/block/zram/backend_zstd.c @@ -24,19 +24,10 @@ struct zstd_params { /* * For C/D dictionaries we need to provide zstd with zstd_custom_mem, * which zstd uses internally to allocate/free memory when needed. - * - * This means that allocator.customAlloc() can be called from zcomp_compress() - * under local-lock (per-CPU compression stream), in which case we must use - * GFP_ATOMIC. - * - * Another complication here is that we can be configured as a swap device. */ static void *zstd_custom_alloc(void *opaque, size_t size) { - if (!preemptible()) - return kvzalloc(size, GFP_ATOMIC); - - return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN); + return kvzalloc(size, GFP_NOIO | __GFP_NOWARN); } static void zstd_custom_free(void *opaque, void *address) From 5b683d4e987d2a9067d9146d8724b6ae1633519e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:26 +0900 Subject: [PATCH 182/431] zram: do not leak page on recompress_store error path Ensure the page used for local object data is freed on error out path. Link: https://lkml.kernel.org/r/20250303022425.285971-18-senozhatsky@chromium.org Fixes: 3f909a60cec1 ("zram: rework recompress target selection strategy") Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 62aef12417a4..e50a5a216974 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2013,7 +2013,7 @@ static ssize_t recompress_store(struct device *dev, struct zram_pp_slot *pps; u32 mode = 0, threshold = 0; u32 prio, prio_max; - struct page *page; + struct page *page = NULL; ssize_t ret; prio = ZRAM_SECONDARY_COMP; @@ -2157,9 +2157,9 @@ static ssize_t recompress_store(struct device *dev, cond_resched(); } - __free_page(page); - release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); From a6d2193b3ef53d5c5a2c56d397227f5e891f2df3 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:27 +0900 Subject: [PATCH 183/431] zram: do not leak page on writeback_store error path Ensure the page used for local object data is freed on error out path. Link: https://lkml.kernel.org/r/20250303022425.285971-19-senozhatsky@chromium.org Fixes: 330edc2bc059 (zram: rework writeback target selection strategy) Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index e50a5a216974..fda7d8624889 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -787,7 +787,7 @@ static ssize_t writeback_store(struct device *dev, unsigned long index = 0; struct bio bio; struct bio_vec bio_vec; - struct page *page; + struct page *page = NULL; ssize_t ret = len; int mode, err; unsigned long blk_idx = 0; @@ -929,8 +929,10 @@ static ssize_t writeback_store(struct device *dev, if (blk_idx) free_block_bdev(zram, blk_idx); - __free_page(page); + release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); From 2ad951865aa7849f203a771d59ec1a0f1356a34c Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 3 Mar 2025 11:03:28 +0900 Subject: [PATCH 184/431] zram: add might_sleep to zcomp API Explicitly state that zcomp compress/decompress must be called from non-atomic context. Link: https://lkml.kernel.org/r/20250303022425.285971-20-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Hillf Danton Cc: Kairui Song Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- drivers/block/zram/zcomp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index a1d627054bb1..d26a58c67e95 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -146,6 +146,7 @@ int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, }; int ret; + might_sleep(); ret = comp->ops->compress(comp->params, &zstrm->ctx, &req); if (!ret) *dst_len = req.dst_len; @@ -162,6 +163,7 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm, .dst_len = PAGE_SIZE, }; + might_sleep(); return comp->ops->decompress(comp->params, &zstrm->ctx, &req); } From 800ddf3cd74bda8061105cab7cc169b9a2667f44 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:12 +0000 Subject: [PATCH 185/431] selftests/mm: report errno when things fail in gup_longterm Patch series "selftests/mm: Some cleanups from trying to run them", v4. I never had much luck running mm selftests so I spent a few hours digging into why. Looks like most of the reason is missing SKIP checks, so this series is just adding a bunch of those that I found. I did not do anything like all of them, just the ones I spotted in gup_longterm, gup_test, mmap, userfaultfd and memfd_secret. It's a bit unfortunate to have to skip those tests when ftruncate() fails, but I don't have time to dig deep enough into it to actually make them pass. I have observed the issue on 9pfs and heard rumours that NFS has a similar problem. I'm now able to run these test groups successfully: - mmap - gup_test - compaction - migration - page_frag - userfaultfd - mlock I've never gone past "Waiting for hugetlb memory to get depleted", in the hugetlb tests. I don't know if they are stuck or if they would eventually work if I was patient enough (testing on a 1G machine). I have not investigated further. I had some issues with mlock tests failing due to -ENOSRCH from mlock2(), I can no longer reproduce that though, things work OK now. Of the remaining tests there may be others that work fine, but there's no convenient way to survey the whole output of run_vmtests.sh so I'm just going test by test here. In my spare moments I am slowly chipping away at a setup to run these tests continuously in a reasonably hermetic QEMU environment via virtme-ng: https://github.com/bjackman/linux/blob/5fad4b9c592290f38e0f8bc73c9abb9c99d8787c/README.md Hopefully that will eventually offer a way to provide a "canned" environment where the tests are known to work, which can be fairly easily reproduced by any developer. This patch (of 12): Just reporting failure doesn't tell you what went wrong. This can fail in different ways so report errno to help the reader get started debugging. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-0-dec210a658f5@google.com Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-1-dec210a658f5@google.com Signed-off-by: Brendan Jackman Reviewed-by: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/gup_longterm.c | 37 +++++++++++++---------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 9423ad439a61..15335820656b 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -96,13 +96,13 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) int ret; if (ftruncate(fd, size)) { - ksft_test_result_fail("ftruncate() failed\n"); + ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno)); return; } if (fallocate(fd, 0, 0, size)) { if (size == pagesize) - ksft_test_result_fail("fallocate() failed\n"); + ksft_test_result_fail("fallocate() failed (%s)\n", strerror(errno)); else ksft_test_result_skip("need more free huge pages\n"); return; @@ -112,7 +112,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) shared ? MAP_SHARED : MAP_PRIVATE, fd, 0); if (mem == MAP_FAILED) { if (size == pagesize || shared) - ksft_test_result_fail("mmap() failed\n"); + ksft_test_result_fail("mmap() failed (%s)\n", strerror(errno)); else ksft_test_result_skip("need more free huge pages\n"); return; @@ -130,7 +130,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) */ ret = mprotect(mem, size, PROT_READ); if (ret) { - ksft_test_result_fail("mprotect() failed\n"); + ksft_test_result_fail("mprotect() failed (%s)\n", strerror(errno)); goto munmap; } /* FALLTHROUGH */ @@ -165,18 +165,20 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) args.flags |= rw ? PIN_LONGTERM_TEST_FLAG_USE_WRITE : 0; ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args); if (ret && errno == EINVAL) { - ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n"); + ksft_test_result_skip("PIN_LONGTERM_TEST_START failed (EINVAL)n"); break; } else if (ret && errno == EFAULT) { ksft_test_result(!should_work, "Should have failed\n"); break; } else if (ret) { - ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n"); + ksft_test_result_fail("PIN_LONGTERM_TEST_START failed (%s)\n", + strerror(errno)); break; } if (ioctl(gup_fd, PIN_LONGTERM_TEST_STOP)) - ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n"); + ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed (%s)\n", + strerror(errno)); /* * TODO: if the kernel ever supports long-term R/W pinning on @@ -202,7 +204,8 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) /* Skip on errors, as we might just lack kernel support. */ ret = io_uring_queue_init(1, &ring, 0); if (ret < 0) { - ksft_test_result_skip("io_uring_queue_init() failed\n"); + ksft_test_result_skip("io_uring_queue_init() failed (%s)\n", + strerror(-ret)); break; } /* @@ -215,13 +218,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) /* Only new kernels return EFAULT. */ if (ret && (errno == ENOSPC || errno == EOPNOTSUPP || errno == EFAULT)) { - ksft_test_result(!should_work, "Should have failed\n"); + ksft_test_result(!should_work, "Should have failed (%s)\n", + strerror(errno)); } else if (ret) { /* * We might just lack support or have insufficient * MEMLOCK limits. */ - ksft_test_result_skip("io_uring_register_buffers() failed\n"); + ksft_test_result_skip("io_uring_register_buffers() failed (%s)\n", + strerror(-ret)); } else { ksft_test_result(should_work, "Should have worked\n"); io_uring_unregister_buffers(&ring); @@ -249,7 +254,7 @@ static void run_with_memfd(test_fn fn, const char *desc) fd = memfd_create("test", 0); if (fd < 0) { - ksft_test_result_fail("memfd_create() failed\n"); + ksft_test_result_fail("memfd_create() failed (%s)\n", strerror(errno)); return; } @@ -266,13 +271,13 @@ static void run_with_tmpfile(test_fn fn, const char *desc) file = tmpfile(); if (!file) { - ksft_test_result_fail("tmpfile() failed\n"); + ksft_test_result_fail("tmpfile() failed (%s)\n", strerror(errno)); return; } fd = fileno(file); if (fd < 0) { - ksft_test_result_fail("fileno() failed\n"); + ksft_test_result_fail("fileno() failed (%s)\n", strerror(errno)); goto close; } @@ -290,12 +295,12 @@ static void run_with_local_tmpfile(test_fn fn, const char *desc) fd = mkstemp(filename); if (fd < 0) { - ksft_test_result_fail("mkstemp() failed\n"); + ksft_test_result_fail("mkstemp() failed (%s)\n", strerror(errno)); return; } if (unlink(filename)) { - ksft_test_result_fail("unlink() failed\n"); + ksft_test_result_fail("unlink() failed (%s)\n", strerror(errno)); goto close; } @@ -317,7 +322,7 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc, fd = memfd_create("test", flags); if (fd < 0) { - ksft_test_result_skip("memfd_create() failed\n"); + ksft_test_result_skip("memfd_create() failed (%s)\n", strerror(errno)); return; } From 0046dbed80e67f57014bdfdcabd7a8ae5e73824a Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:13 +0000 Subject: [PATCH 186/431] selftests/mm: skip uffd-stress if userfaultfd not available It's pretty obvious that the test wouldn't work if you don't have the feature enabled. But, it's still useful to SKIP instead of failing so the reader can immediately tell that this is the reason why. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-2-dec210a658f5@google.com Signed-off-by: Brendan Jackman Reviewed-by: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-stress.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 944d559ade21..91174e9425cd 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -412,8 +412,8 @@ static void parse_test_type_arg(const char *raw_type) * feature. */ - if (uffd_get_features(&features)) - err("failed to get available features"); + if (uffd_get_features(&features) && errno == ENOENT) + ksft_exit_skip("failed to get available features (%d)\n", errno); test_uffdio_wp = test_uffdio_wp && (features & UFFD_FEATURE_PAGEFAULT_FLAG_WP); From f4b3e6c7f14c3e84c4faf228868a62289efed22b Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:14 +0000 Subject: [PATCH 187/431] selftests/mm: skip uffd-wp-mremap if userfaultfd not available It's obvious that this should fail in that case, but still, save the reader the effort of figuring out that they've run into this by just SKIPping Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-3-dec210a658f5@google.com Signed-off-by: Brendan Jackman Reviewed-by: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-wp-mremap.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c index 2c4f984bd73c..c2ba7d46c7b4 100644 --- a/tools/testing/selftests/mm/uffd-wp-mremap.c +++ b/tools/testing/selftests/mm/uffd-wp-mremap.c @@ -182,7 +182,10 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb /* Register range for uffd-wp. */ if (userfaultfd_open(&features)) { - ksft_test_result_fail("userfaultfd_open() failed\n"); + if (errno == ENOENT) + ksft_test_result_skip("userfaultfd not available\n"); + else + ksft_test_result_fail("userfaultfd_open() failed\n"); goto out; } if (uffd_register(uffd, mem, size, false, true, false)) { From f3b5535abce9f05318ee52de7f0a97be58d032a0 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:15 +0000 Subject: [PATCH 188/431] selftests/mm/uffd: rename nr_cpus -> nr_parallel A later commit will bound this variable so it no longer necessarily matches the number of CPUs. Rename it appropriately. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-4-dec210a658f5@google.com Signed-off-by: Brendan Jackman Reviewed-by: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 8 +++--- tools/testing/selftests/mm/uffd-common.h | 2 +- tools/testing/selftests/mm/uffd-stress.c | 28 ++++++++++---------- tools/testing/selftests/mm/uffd-unit-tests.c | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 5457a078690d..a37088a23ffe 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -10,7 +10,7 @@ #define BASE_PMD_ADDR ((void *)(1UL << 30)) volatile bool test_uffdio_copy_eexist = true; -unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; +unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size; char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap; int uffd = -1, uffd_flags, finished, *pipefd, test_type; bool map_shared; @@ -269,7 +269,7 @@ void uffd_test_ctx_clear(void) size_t i; if (pipefd) { - for (i = 0; i < nr_cpus * 2; ++i) { + for (i = 0; i < nr_parallel * 2; ++i) { if (close(pipefd[i])) err("close pipefd"); } @@ -365,10 +365,10 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg) */ uffd_test_ops->release_pages(area_dst); - pipefd = malloc(sizeof(int) * nr_cpus * 2); + pipefd = malloc(sizeof(int) * nr_parallel * 2); if (!pipefd) err("pipefd"); - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK)) err("pipe"); diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h index a70ae10b5f62..7700cbfa3975 100644 --- a/tools/testing/selftests/mm/uffd-common.h +++ b/tools/testing/selftests/mm/uffd-common.h @@ -98,7 +98,7 @@ struct uffd_test_case_ops { }; typedef struct uffd_test_case_ops uffd_test_case_ops_t; -extern unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; +extern unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size; extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap; extern int uffd, uffd_flags, finished, *pipefd, test_type; extern bool map_shared; diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 91174e9425cd..d6b57e5a2e1d 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -180,12 +180,12 @@ static void *background_thread(void *arg) static int stress(struct uffd_args *args) { unsigned long cpu; - pthread_t locking_threads[nr_cpus]; - pthread_t uffd_threads[nr_cpus]; - pthread_t background_threads[nr_cpus]; + pthread_t locking_threads[nr_parallel]; + pthread_t uffd_threads[nr_parallel]; + pthread_t background_threads[nr_parallel]; finished = 0; - for (cpu = 0; cpu < nr_cpus; cpu++) { + for (cpu = 0; cpu < nr_parallel; cpu++) { if (pthread_create(&locking_threads[cpu], &attr, locking_thread, (void *)cpu)) return 1; @@ -203,7 +203,7 @@ static int stress(struct uffd_args *args) background_thread, (void *)cpu)) return 1; } - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(background_threads[cpu], NULL)) return 1; @@ -219,11 +219,11 @@ static int stress(struct uffd_args *args) uffd_test_ops->release_pages(area_src); finished = 1; - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_parallel; cpu++) if (pthread_join(locking_threads[cpu], NULL)) return 1; - for (cpu = 0; cpu < nr_cpus; cpu++) { + for (cpu = 0; cpu < nr_parallel; cpu++) { char c; if (bounces & BOUNCE_POLL) { if (write(pipefd[cpu*2+1], &c, 1) != 1) @@ -246,11 +246,11 @@ static int userfaultfd_stress(void) { void *area; unsigned long nr; - struct uffd_args args[nr_cpus]; + struct uffd_args args[nr_parallel]; uint64_t mem_size = nr_pages * page_size; int flags = 0; - memset(args, 0, sizeof(struct uffd_args) * nr_cpus); + memset(args, 0, sizeof(struct uffd_args) * nr_parallel); if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON) flags = UFFD_FEATURE_WP_UNPOPULATED; @@ -325,7 +325,7 @@ static int userfaultfd_stress(void) */ uffd_test_ops->release_pages(area_dst); - uffd_stats_reset(args, nr_cpus); + uffd_stats_reset(args, nr_parallel); /* bounce pass */ if (stress(args)) { @@ -359,7 +359,7 @@ static int userfaultfd_stress(void) swap(area_src_alias, area_dst_alias); - uffd_stats_report(args, nr_cpus); + uffd_stats_report(args, nr_parallel); } uffd_test_ctx_clear(); @@ -453,9 +453,9 @@ int main(int argc, char **argv) return KSFT_SKIP; } - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + nr_parallel = sysconf(_SC_NPROCESSORS_ONLN); - nr_pages_per_cpu = bytes / page_size / nr_cpus; + nr_pages_per_cpu = bytes / page_size / nr_parallel; if (!nr_pages_per_cpu) { _err("invalid MiB"); usage(); @@ -466,7 +466,7 @@ int main(int argc, char **argv) _err("invalid bounces"); usage(); } - nr_pages = nr_pages_per_cpu * nr_cpus; + nr_pages = nr_pages_per_cpu * nr_parallel; printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", nr_pages, nr_pages_per_cpu); diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 74c8bc02b506..24ea82ee2231 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -198,7 +198,7 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test, nr_pages = UFFD_TEST_MEM_SIZE / page_size; /* TODO: remove this global var.. it's so ugly */ - nr_cpus = 1; + nr_parallel = 1; /* Initialize test arguments */ args->mem_type = mem_type; From db0f1c138f18296e9c1c91619a0517c05ee50f1b Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:16 +0000 Subject: [PATCH 189/431] selftests/mm: print some details when uffd-stress gets bad params So this can be debugged more easily. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-5-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-stress.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index d6b57e5a2e1d..4ba5bf13a010 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -457,7 +457,8 @@ int main(int argc, char **argv) nr_pages_per_cpu = bytes / page_size / nr_parallel; if (!nr_pages_per_cpu) { - _err("invalid MiB"); + _err("pages_per_cpu = 0, cannot test (%lu / %lu / %lu)", + bytes, page_size, nr_parallel); usage(); } From bf6d575e24ee91f7ba8a752c0354bb00db1d3bf2 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:17 +0000 Subject: [PATCH 190/431] selftests/mm: don't fail uffd-stress if too many CPUs This calculation divides a fixed parameter by an environment-dependent parameter i.e. the number of CPUs. The simple way to avoid machine-specific failures here is to just put a cap on the max value of the latter. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-6-dec210a658f5@google.com Signed-off-by: Brendan Jackman Suggested-by: Mateusz Guzik Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-stress.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 4ba5bf13a010..40af7f67c407 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -435,6 +435,7 @@ static void sigalrm(int sig) int main(int argc, char **argv) { + unsigned long nr_cpus; size_t bytes; if (argc < 4) @@ -453,7 +454,15 @@ int main(int argc, char **argv) return KSFT_SKIP; } - nr_parallel = sysconf(_SC_NPROCESSORS_ONLN); + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (nr_cpus > 32) { + /* Don't let calculation below go to zero. */ + ksft_print_msg("_SC_NPROCESSORS_ONLN (%lu) too large, capping nr_threads to 32\n", + nr_cpus); + nr_parallel = 32; + } else { + nr_parallel = nr_cpus; + } nr_pages_per_cpu = bytes / page_size / nr_parallel; if (!nr_pages_per_cpu) { From 571a4b62ed63cace383619b0b4ef0c7e012237e1 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:18 +0000 Subject: [PATCH 191/431] selftests/mm: skip map_populate on weird filesystems It seems that 9pfs does not allow truncating unlinked files, Mark Brown has noted that NFS may also behave this way. It doesn't seem quite right to call this a "bug" but it's probably a special enough case that it makes sense for the test to just SKIP if it happens. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-7-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/map_populate.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c index 5c8a53869b1b..433e54fb634f 100644 --- a/tools/testing/selftests/mm/map_populate.c +++ b/tools/testing/selftests/mm/map_populate.c @@ -87,6 +87,13 @@ int main(int argc, char **argv) BUG_ON(!ftmp, "tmpfile()"); ret = ftruncate(fileno(ftmp), MMAP_SZ); + if (ret < 0 && errno == ENOENT) { + /* + * This probably means tmpfile() made a file on a filesystem + * that doesn't handle temporary files the way we want. + */ + ksft_exit_skip("ftruncate(fileno(tmpfile())) gave ENOENT, weird filesystem?\n"); + } BUG_ON(ret, "ftruncate()"); smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE, From 32b42970e8614c0b8652fcd441acec937bc2595e Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:19 +0000 Subject: [PATCH 192/431] selftests/mm: skip gup_longterm tests on weird filesystems Some filesystems don't support ftruncate()ing unlinked files. They return ENOENT. In that case, skip the test. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-8-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/gup_longterm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 15335820656b..03271442aae5 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -96,7 +96,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) int ret; if (ftruncate(fd, size)) { - ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno)); + if (errno == ENOENT) { + /* + * This can happen if the file has been unlinked and the + * filesystem doesn't support truncating unlinked files. + */ + ksft_test_result_skip("ftruncate() failed with ENOENT\n"); + } else { + ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno)); + } return; } From e9269b2cc403b7681980e7219cf2dc339fca8d38 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:20 +0000 Subject: [PATCH 193/431] selftests/mm: drop unnecessary sudo usage This script must be run as root anyway (see all the writing to privileged files in /proc etc). Remove the unnecessary use of sudo to avoid breaking on single-user systems that don't have sudo. This also avoids confusing readers. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-9-dec210a658f5@google.com Signed-off-by: Brendan Jackman Reviewed-by: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 4b5e45a10219..31a576d70b57 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -402,7 +402,7 @@ CATEGORY="madv_populate" run_test ./madv_populate if [ -x ./memfd_secret ] then -(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix +(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix CATEGORY="memfd_secret" run_test ./memfd_secret fi From f896c6de833342bda61b8fe39f612023f7daf2a5 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:21 +0000 Subject: [PATCH 194/431] selftests/mm: ensure uffd-wp-mremap gets pages of each size This test allocates a page of every available size and doesn't have any SKIP logic if the allocation fails. So, ensure it's available and skip the test if we can't do so. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-10-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 31a576d70b57..e1c20dcf8486 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -325,9 +325,30 @@ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 3 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16 -CATEGORY="userfaultfd" run_test ./uffd-wp-mremap +# uffd-wp-mremap requires at least one page of each size. +have_all_size_hugepgs=true +declare -A nr_size_hugepgs +for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do + old=$(cat $f) + nr_size_hugepgs["$f"]="$old" + if [ "$old" == 0 ]; then + echo 1 > "$f" + fi + if [ $(cat "$f") == 0 ]; then + have_all_size_hugepgs=false + break + fi +done +if $have_all_size_hugepgs; then + CATEGORY="userfaultfd" run_test ./uffd-wp-mremap +else + echo "# SKIP ./uffd-wp-mremap" +fi #cleanup +for f in "${!nr_size_hugepgs[@]}"; do + echo "${nr_size_hugepgs["$f"]}" > "$f" +done echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages CATEGORY="compaction" run_test ./compaction_test From 5d2146a3354f8eeb1f9f9581ee9a40e0a9d2c714 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:22 +0000 Subject: [PATCH 195/431] selftests/mm: skip mlock tests if nobody user can't read it If running from a directory that can't be read by unprivileged users, executing on-fault-test via the nobody user will fail. The kselftest build does give the file the correct permissions, but after being installed it might be in a directory without global execute permissions. Since the script can't safely fix that, just skip if it happens. Note that the stderr of the `ls` command is unfiltered meaning the user sees a "permission denied" error that can help inform them why the test was skipped. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-11-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/run_vmtests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index e1c20dcf8486..9aff33b10999 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -353,7 +353,7 @@ echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages CATEGORY="compaction" run_test ./compaction_test -if command -v sudo &> /dev/null; +if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null; then CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit else From 1ddae9d67ee11886f9a35b78ad837eb26559e9ab Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 11 Mar 2025 13:18:23 +0000 Subject: [PATCH 196/431] selftests/mm/mlock: print error on failure It's not really possible to start diagnosing this without knowing the actual error. Also update the mlock2 helper to behave like libc would by setting errno and returning -1. Link: https://lkml.kernel.org/r/20250311-mm-selftests-v4-12-dec210a658f5@google.com Signed-off-by: Brendan Jackman Cc: Dev Jain Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mlock-random-test.c | 4 ++-- tools/testing/selftests/mm/mlock2.h | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c index 1cd80b0f76c3..b8d7e966f44c 100644 --- a/tools/testing/selftests/mm/mlock-random-test.c +++ b/tools/testing/selftests/mm/mlock-random-test.c @@ -161,9 +161,9 @@ static void test_mlock_within_limit(char *p, int alloc_size) MLOCK_ONFAULT); if (ret) - ksft_exit_fail_msg("%s() failure at |%p(%d)| mlock:|%p(%d)|\n", + ksft_exit_fail_msg("%s() failure (%s) at |%p(%d)| mlock:|%p(%d)|\n", is_mlock ? "mlock" : "mlock2", - p, alloc_size, + strerror(errno), p, alloc_size, p + start_offset, lock_size); } diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h index 4417eaa5cfb7..81e77fa41901 100644 --- a/tools/testing/selftests/mm/mlock2.h +++ b/tools/testing/selftests/mm/mlock2.h @@ -6,7 +6,13 @@ static int mlock2_(void *start, size_t len, int flags) { - return syscall(__NR_mlock2, start, len, flags); + int ret = syscall(__NR_mlock2, start, len, flags); + + if (ret) { + errno = ret; + return -1; + } + return 0; } static FILE *seek_to_smaps_entry(unsigned long addr) From 43e9bbc3bb19893377364379e224c35db0256b88 Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Sun, 23 Feb 2025 00:08:48 +0800 Subject: [PATCH 197/431] mm, swap: remove setting SWAP_MAP_BAD for discard cluster Before alloc from a cluster, we will aqcuire cluster's lock and make sure it is usable by cluster_is_usable(), so there is no need to set SWAP_MAP_BAD for cluster to be discarded. Link: https://lkml.kernel.org/r/20250222160850.505274-5-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Reviewed-by: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index cab68e57f4cc..80e4ad24fe53 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -479,15 +479,6 @@ static void move_cluster(struct swap_info_struct *si, static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) { - unsigned int idx = cluster_index(si, ci); - /* - * If scan_swap_map_slots() can't find a free cluster, it will check - * si->swap_map directly. To make sure the discarding cluster isn't - * taken by scan_swap_map_slots(), mark the swap entries bad (occupied). - * It will be cleared after discard - */ - memset(si->swap_map + idx * SWAPFILE_CLUSTER, - SWAP_MAP_BAD, SWAPFILE_CLUSTER); VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); @@ -571,8 +562,6 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si) * return the cluster to allocation list. */ ci->flags = CLUSTER_FLAG_NONE; - memset(si->swap_map + idx * SWAPFILE_CLUSTER, - 0, SWAPFILE_CLUSTER); __free_cluster(si, ci); spin_unlock(&ci->lock); ret = true; From 2310f0894225024397dfa193ccfc69b74366072e Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Sun, 23 Feb 2025 00:08:49 +0800 Subject: [PATCH 198/431] mm, swap: correct comment in swap_usage_sub() We will add si back to plist in swap_usage_sub(), just correct the wrong comment which says we will remove si from plist in swap_usage_sub(). Link: https://lkml.kernel.org/r/20250222160850.505274-6-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 80e4ad24fe53..dc9f93b66f69 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1098,7 +1098,7 @@ static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) /* * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, - * remove it from the plist. + * add it to the plist. */ if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) add_to_avail_list(si, false); From 0a8a5b6c4129e61070eb9a45979c395fb6ab31c4 Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Sun, 23 Feb 2025 00:08:50 +0800 Subject: [PATCH 199/431] mm: swap: remove stale comment of swap_reclaim_full_clusters() swap_reclaim_full_clusters() has no return value now, just remove the stale comment which says swap_reclaim_full_clusters() wil return a bool value. Link: https://lkml.kernel.org/r/20250222160850.505274-7-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swapfile.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index dc9f93b66f69..a7f60006c52c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -820,7 +820,6 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, return found; } -/* Return true if reclaimed a whole cluster */ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; From 8e2f2aeb8b48aceef6e6c07b2d9bede4eaa50c06 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 21 Feb 2025 12:05:22 +0000 Subject: [PATCH 200/431] fs/proc/task_mmu: add guard region bit to pagemap Patch series "fs/proc/task_mmu: add guard region bit to pagemap". Currently there is no means of determining whether a given page in a mapping range is designated a guard region (as installed via madvise() using the MADV_GUARD_INSTALL flag). This is generally not an issue, but in some instances users may wish to determine whether this is the case. This series adds this ability via /proc/$pid/pagemap, updates the documentation and adds a self test to assert that this functions correctly. This patch (of 2): Currently there is no means by which users can determine whether a given page in memory is in fact a guard region, that is having had the MADV_GUARD_INSTALL madvise() flag applied to it. This is intentional, as to provide this information in VMA metadata would contradict the intent of the feature (providing a means to change fault behaviour at a page table level rather than a VMA level), and would require VMA metadata operations to scan page tables, which is unacceptable. In many cases, users have no need to reflect and determine what regions have been designated guard regions, as it is the user who has established them in the first place. But in some instances, such as monitoring software, or software that relies upon being able to ascertain the nature of mappings within a remote process for instance, it becomes useful to be able to determine which pages have the guard region marker applied. This patch makes use of an unused pagemap bit (58) to provide this information. This patch updates the documentation at the same time as making the change such that the implementation of the feature and the documentation of it are tied together. Link: https://lkml.kernel.org/r/cover.1740139449.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/521d99c08b975fb06a1e7201e971cc24d68196d1.1740139449.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Cc: Jann Horn Cc: Jonathan Corbet Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcow (Oracle) Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/pagemap.rst | 3 ++- fs/proc/task_mmu.c | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst index caba0f52dd36..a297e824f990 100644 --- a/Documentation/admin-guide/mm/pagemap.rst +++ b/Documentation/admin-guide/mm/pagemap.rst @@ -21,7 +21,8 @@ There are four components to pagemap: * Bit 56 page exclusively mapped (since 4.2) * Bit 57 pte is uffd-wp write-protected (since 5.13) (see Documentation/admin-guide/mm/userfaultfd.rst) - * Bits 58-60 zero + * Bit 58 pte is a guard region (since 6.15) (see madvise (2) man page) + * Bits 59-60 zero * Bit 61 page is file-page or shared-anon (since 3.5) * Bit 62 page swapped * Bit 63 page present diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f02cd362309a..c17615e21a5d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1632,6 +1632,7 @@ struct pagemapread { #define PM_SOFT_DIRTY BIT_ULL(55) #define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_UFFD_WP BIT_ULL(57) +#define PM_GUARD_REGION BIT_ULL(58) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) @@ -1732,6 +1733,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, page = pfn_swap_entry_to_page(entry); if (pte_marker_entry_uffd_wp(entry)) flags |= PM_UFFD_WP; + if (is_guard_swp_entry(entry)) + flags |= PM_GUARD_REGION; } if (page) { @@ -1931,7 +1934,8 @@ static const struct mm_walk_ops pagemap_ops = { * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) * Bit 56 page exclusively mapped * Bit 57 pte is uffd-wp write-protected - * Bits 58-60 zero + * Bit 58 pte is a guard region + * Bits 59-60 zero * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present From f3b92176f4f7100f7e150975f0378f31ea5ce040 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 21 Feb 2025 12:05:23 +0000 Subject: [PATCH 201/431] tools/selftests: add guard region test for /proc/$pid/pagemap Add a test to the guard region self tests to assert that the /proc/$pid/pagemap information now made availabile to the user correctly identifies and reports guard regions. As a part of this change, update vm_util.h to add the new bit (note there is no header file in the kernel where this is exposed, the user is expected to provide their own mask) and utilise the helper functions there for pagemap functionality. [lorenzo.stoakes@oracle.com: fixup define name] Link: https://lkml.kernel.org/r/32e83941-e6f5-42ee-9292-a44c16463cf1@lucifer.local Link: https://lkml.kernel.org/r/164feb0a43ae72650e6b20c3910213f469566311.1740139449.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Jann Horn Cc: Jonathan Corbet Cc: Kalesh Singh Cc: Liam Howlett Cc: Matthew Wilcow (Oracle) Cc: "Paul E . McKenney" Cc: Shuah Khan Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-regions.c | 47 ++++++++++++++++++++++ tools/testing/selftests/mm/vm_util.h | 1 + 2 files changed, 48 insertions(+) diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c index ea9b5815e828..280d1831bf73 100644 --- a/tools/testing/selftests/mm/guard-regions.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -19,6 +19,7 @@ #include #include #include +#include "vm_util.h" /* * Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1: @@ -2032,4 +2033,50 @@ TEST_F(guard_regions, anon_zeropage) ASSERT_EQ(munmap(ptr, 10 * page_size), 0); } +/* + * Assert that /proc/$pid/pagemap correctly identifies guard region ranges. + */ +TEST_F(guard_regions, pagemap) +{ + const unsigned long page_size = self->page_size; + int proc_fd; + char *ptr; + int i; + + proc_fd = open("/proc/self/pagemap", O_RDONLY); + ASSERT_NE(proc_fd, -1); + + ptr = mmap_(self, variant, NULL, 10 * page_size, + PROT_READ | PROT_WRITE, 0, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Read from pagemap, and assert no guard regions are detected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + unsigned long entry = pagemap_get_entry(proc_fd, ptr_p); + unsigned long masked = entry & PM_GUARD_REGION; + + ASSERT_EQ(masked, 0); + } + + /* Install a guard region in every other page. */ + for (i = 0; i < 10; i += 2) { + char *ptr_p = &ptr[i * page_size]; + + ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0); + } + + /* Re-read from pagemap, and assert guard regions are detected. */ + for (i = 0; i < 10; i++) { + char *ptr_p = &ptr[i * page_size]; + unsigned long entry = pagemap_get_entry(proc_fd, ptr_p); + unsigned long masked = entry & PM_GUARD_REGION; + + ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0); + } + + ASSERT_EQ(close(proc_fd), 0); + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index b60ac68a9dc8..0e629586556b 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -10,6 +10,7 @@ #define PM_SOFT_DIRTY BIT_ULL(55) #define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_UFFD_WP BIT_ULL(57) +#define PM_GUARD_REGION BIT_ULL(58) #define PM_FILE BIT_ULL(61) #define PM_SWAP BIT_ULL(62) #define PM_PRESENT BIT_ULL(63) From c2f6ea38fc1b640aa7a2e155cc1c0410ff91afa2 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 24 Feb 2025 19:08:24 -0500 Subject: [PATCH 202/431] mm: page_alloc: don't steal single pages from biggest buddy The fallback code searches for the biggest buddy first in an attempt to steal the whole block and encourage type grouping down the line. The approach used to be this: - Non-movable requests will split the largest buddy and steal the remainder. This splits up contiguity, but it allows subsequent requests of this type to fall back into adjacent space. - Movable requests go and look for the smallest buddy instead. The thinking is that movable requests can be compacted, so grouping is less important than retaining contiguity. c0cd6f557b90 ("mm: page_alloc: fix freelist movement during block conversion") enforces freelist type hygiene, which restricts stealing to either claiming the whole block or just taking the requested chunk; no additional pages or buddy remainders can be stolen any more. The patch mishandled when to switch to finding the smallest buddy in that new reality. As a result, it may steal the exact request size, but from the biggest buddy. This causes fracturing for no good reason. Fix this by committing to the new behavior: either steal the whole block, or fall back to the smallest buddy. Remove single-page stealing from steal_suitable_fallback(). Rename it to try_to_steal_block() to make the intentions clear. If this fails, always fall back to the smallest buddy. The following is from 4 runs of mmtest's thpchallenge. "Pollute" is single page fallback, "steal" is conversion of a partially used block. The numbers for free block conversions (omitted) are comparable. vanilla patched @pollute[unmovable from reclaimable]: 27 106 @pollute[unmovable from movable]: 82 46 @pollute[reclaimable from unmovable]: 256 83 @pollute[reclaimable from movable]: 46 8 @pollute[movable from unmovable]: 4841 868 @pollute[movable from reclaimable]: 5278 12568 @steal[unmovable from reclaimable]: 11 12 @steal[unmovable from movable]: 113 49 @steal[reclaimable from unmovable]: 19 34 @steal[reclaimable from movable]: 47 21 @steal[movable from unmovable]: 250 183 @steal[movable from reclaimable]: 81 93 The allocator appears to do a better job at keeping stealing and polluting to the first fallback preference. As a result, the numbers for "from movable" - the least preferred fallback option, and most detrimental to compactability - are down across the board. Link: https://lkml.kernel.org/r/20250225001023.1494422-2-hannes@cmpxchg.org Fixes: c0cd6f557b90 ("mm: page_alloc: fix freelist movement during block conversion") Signed-off-by: Johannes Weiner Suggested-by: Vlastimil Babka Reviewed-by: Brendan Jackman Reviewed-by: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 80 +++++++++++++++++++++---------------------------- 1 file changed, 34 insertions(+), 46 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d875f055aa53..462f0e5342e5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1986,13 +1986,12 @@ static inline bool boost_watermark(struct zone *zone) * can claim the whole pageblock for the requested migratetype. If not, we check * the pageblock for constituent pages; if at least half of the pages are free * or compatible, we can still claim the whole block, so pages freed in the - * future will be put on the correct free list. Otherwise, we isolate exactly - * the order we need from the fallback block and leave its migratetype alone. + * future will be put on the correct free list. */ static struct page * -steal_suitable_fallback(struct zone *zone, struct page *page, - int current_order, int order, int start_type, - unsigned int alloc_flags, bool whole_block) +try_to_steal_block(struct zone *zone, struct page *page, + int current_order, int order, int start_type, + unsigned int alloc_flags) { int free_pages, movable_pages, alike_pages; unsigned long start_pfn; @@ -2005,7 +2004,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page, * highatomic accounting. */ if (is_migrate_highatomic(block_type)) - goto single_page; + return NULL; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { @@ -2026,14 +2025,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page, if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); - /* We are not allowed to try stealing from the whole block */ - if (!whole_block) - goto single_page; - /* moving whole block can fail due to zone boundary conditions */ if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, &movable_pages)) - goto single_page; + return NULL; /* * Determine how many pages are compatible with our allocation. @@ -2066,9 +2061,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page, return __rmqueue_smallest(zone, order, start_type); } -single_page: - page_del_and_expand(zone, page, order, current_order, block_type); - return page; + return NULL; } /* @@ -2250,14 +2243,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, } /* - * Try finding a free buddy page on the fallback list and put it on the free - * list of requested migratetype, possibly along with other pages from the same - * block, depending on fragmentation avoidance heuristics. Returns true if - * fallback was found so that __rmqueue_smallest() can grab it. + * Try finding a free buddy page on the fallback list. + * + * This will attempt to steal a whole pageblock for the requested type + * to ensure grouping of such requests in the future. + * + * If a whole block cannot be stolen, regress to __rmqueue_smallest() + * logic to at least break up as little contiguity as possible. * * The use of signed ints for order and current_order is a deliberate * deviation from the rest of this file, to make the for loop * condition simpler. + * + * Return the stolen page, or NULL if none can be found. */ static __always_inline struct page * __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, @@ -2291,45 +2289,35 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, if (fallback_mt == -1) continue; - /* - * We cannot steal all free pages from the pageblock and the - * requested migratetype is movable. In that case it's better to - * steal and split the smallest available page instead of the - * largest available page, because even if the next movable - * allocation falls back into a different pageblock than this - * one, it won't cause permanent fragmentation. - */ - if (!can_steal && start_migratetype == MIGRATE_MOVABLE - && current_order > order) - goto find_smallest; + if (!can_steal) + break; - goto do_steal; + page = get_page_from_free_area(area, fallback_mt); + page = try_to_steal_block(zone, page, current_order, order, + start_migratetype, alloc_flags); + if (page) + goto got_one; } - return NULL; + if (alloc_flags & ALLOC_NOFRAGMENT) + return NULL; -find_smallest: + /* No luck stealing blocks. Find the smallest fallback page */ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, start_migratetype, false, &can_steal); - if (fallback_mt != -1) - break; + if (fallback_mt == -1) + continue; + + page = get_page_from_free_area(area, fallback_mt); + page_del_and_expand(zone, page, order, current_order, fallback_mt); + goto got_one; } - /* - * This should not happen - we already found a suitable fallback - * when looking for the largest page. - */ - VM_BUG_ON(current_order > MAX_PAGE_ORDER); - -do_steal: - page = get_page_from_free_area(area, fallback_mt); - - /* take off list, maybe claim block, expand remainder */ - page = steal_suitable_fallback(zone, page, current_order, order, - start_migratetype, alloc_flags, can_steal); + return NULL; +got_one: trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); From 020396a581dc69be2d30939fabde6c029d847034 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 24 Feb 2025 19:08:25 -0500 Subject: [PATCH 203/431] mm: page_alloc: remove remnants of unlocked migratetype updates The freelist hygiene patches made migratetype accesses fully protected under the zone->lock. Remove remnants of handling the race conditions that existed before from the MIGRATE_HIGHATOMIC code. Link: https://lkml.kernel.org/r/20250225001023.1494422-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Brendan Jackman Reviewed-by: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 50 ++++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 34 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 462f0e5342e5..9e6f0db6c79f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1991,20 +1991,10 @@ static inline bool boost_watermark(struct zone *zone) static struct page * try_to_steal_block(struct zone *zone, struct page *page, int current_order, int order, int start_type, - unsigned int alloc_flags) + int block_type, unsigned int alloc_flags) { int free_pages, movable_pages, alike_pages; unsigned long start_pfn; - int block_type; - - block_type = get_pageblock_migratetype(page); - - /* - * This can happen due to races and we want to prevent broken - * highatomic accounting. - */ - if (is_migrate_highatomic(block_type)) - return NULL; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { @@ -2179,33 +2169,22 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &(zone->free_area[order]); - int mt; + unsigned long size; page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); if (!page) continue; - mt = get_pageblock_migratetype(page); /* - * In page freeing path, migratetype change is racy so - * we can counter several free pages in a pageblock - * in this loop although we changed the pageblock type - * from highatomic to ac->migratetype. So we should - * adjust the count once. + * It should never happen but changes to + * locking could inadvertently allow a per-cpu + * drain to add pages to MIGRATE_HIGHATOMIC + * while unreserving so be safe and watch for + * underflows. */ - if (is_migrate_highatomic(mt)) { - unsigned long size; - /* - * It should never happen but changes to - * locking could inadvertently allow a per-cpu - * drain to add pages to MIGRATE_HIGHATOMIC - * while unreserving so be safe and watch for - * underflows. - */ - size = max(pageblock_nr_pages, 1UL << order); - size = min(size, zone->nr_reserved_highatomic); - zone->nr_reserved_highatomic -= size; - } + size = max(pageblock_nr_pages, 1UL << order); + size = min(size, zone->nr_reserved_highatomic); + zone->nr_reserved_highatomic -= size; /* * Convert to ac->migratetype and avoid the normal @@ -2217,10 +2196,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * may increase. */ if (order < pageblock_order) - ret = move_freepages_block(zone, page, mt, + ret = move_freepages_block(zone, page, + MIGRATE_HIGHATOMIC, ac->migratetype); else { - move_to_free_list(page, zone, order, mt, + move_to_free_list(page, zone, order, + MIGRATE_HIGHATOMIC, ac->migratetype); change_pageblock_range(page, order, ac->migratetype); @@ -2294,7 +2275,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, page = get_page_from_free_area(area, fallback_mt); page = try_to_steal_block(zone, page, current_order, order, - start_migratetype, alloc_flags); + start_migratetype, fallback_mt, + alloc_flags); if (page) goto got_one; } From a4138a2702a4428317ecdb115934554df4b788b4 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 24 Feb 2025 19:08:26 -0500 Subject: [PATCH 204/431] mm: page_alloc: group fallback functions together The way the fallback rules are spread out makes them hard to follow. Move the functions next to each other at least. Link: https://lkml.kernel.org/r/20250225001023.1494422-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Brendan Jackman Reviewed-by: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 394 ++++++++++++++++++++++++------------------------ 1 file changed, 197 insertions(+), 197 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9e6f0db6c79f..945437d7ac44 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1903,6 +1903,43 @@ static void change_pageblock_range(struct page *pageblock_page, } } +static inline bool boost_watermark(struct zone *zone) +{ + unsigned long max_boost; + + if (!watermark_boost_factor) + return false; + /* + * Don't bother in zones that are unlikely to produce results. + * On small machines, including kdump capture kernels running + * in a small area, boosting the watermark can cause an out of + * memory situation immediately. + */ + if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) + return false; + + max_boost = mult_frac(zone->_watermark[WMARK_HIGH], + watermark_boost_factor, 10000); + + /* + * high watermark may be uninitialised if fragmentation occurs + * very early in boot so do not boost. We do not fall + * through and boost by pageblock_nr_pages as failing + * allocations that early means that reclaim is not going + * to help and it may even be impossible to reclaim the + * boosted watermark resulting in a hang. + */ + if (!max_boost) + return false; + + max_boost = max(pageblock_nr_pages, max_boost); + + zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, + max_boost); + + return true; +} + /* * When we are falling back to another migratetype during allocation, try to * steal extra free pages from the same pageblocks to satisfy further @@ -1944,41 +1981,38 @@ static bool can_steal_fallback(unsigned int order, int start_mt) return false; } -static inline bool boost_watermark(struct zone *zone) +/* + * Check whether there is a suitable fallback freepage with requested order. + * If only_stealable is true, this function returns fallback_mt only if + * we can steal other freepages all together. This would help to reduce + * fragmentation due to mixed migratetype pages in one pageblock. + */ +int find_suitable_fallback(struct free_area *area, unsigned int order, + int migratetype, bool only_stealable, bool *can_steal) { - unsigned long max_boost; + int i; + int fallback_mt; - if (!watermark_boost_factor) - return false; - /* - * Don't bother in zones that are unlikely to produce results. - * On small machines, including kdump capture kernels running - * in a small area, boosting the watermark can cause an out of - * memory situation immediately. - */ - if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) - return false; + if (area->nr_free == 0) + return -1; - max_boost = mult_frac(zone->_watermark[WMARK_HIGH], - watermark_boost_factor, 10000); + *can_steal = false; + for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { + fallback_mt = fallbacks[migratetype][i]; + if (free_area_empty(area, fallback_mt)) + continue; - /* - * high watermark may be uninitialised if fragmentation occurs - * very early in boot so do not boost. We do not fall - * through and boost by pageblock_nr_pages as failing - * allocations that early means that reclaim is not going - * to help and it may even be impossible to reclaim the - * boosted watermark resulting in a hang. - */ - if (!max_boost) - return false; + if (can_steal_fallback(order, migratetype)) + *can_steal = true; - max_boost = max(pageblock_nr_pages, max_boost); + if (!only_stealable) + return fallback_mt; - zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, - max_boost); + if (*can_steal) + return fallback_mt; + } - return true; + return -1; } /* @@ -2054,175 +2088,6 @@ try_to_steal_block(struct zone *zone, struct page *page, return NULL; } -/* - * Check whether there is a suitable fallback freepage with requested order. - * If only_stealable is true, this function returns fallback_mt only if - * we can steal other freepages all together. This would help to reduce - * fragmentation due to mixed migratetype pages in one pageblock. - */ -int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal) -{ - int i; - int fallback_mt; - - if (area->nr_free == 0) - return -1; - - *can_steal = false; - for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { - fallback_mt = fallbacks[migratetype][i]; - if (free_area_empty(area, fallback_mt)) - continue; - - if (can_steal_fallback(order, migratetype)) - *can_steal = true; - - if (!only_stealable) - return fallback_mt; - - if (*can_steal) - return fallback_mt; - } - - return -1; -} - -/* - * Reserve the pageblock(s) surrounding an allocation request for - * exclusive use of high-order atomic allocations if there are no - * empty page blocks that contain a page with a suitable order - */ -static void reserve_highatomic_pageblock(struct page *page, int order, - struct zone *zone) -{ - int mt; - unsigned long max_managed, flags; - - /* - * The number reserved as: minimum is 1 pageblock, maximum is - * roughly 1% of a zone. But if 1% of a zone falls below a - * pageblock size, then don't reserve any pageblocks. - * Check is race-prone but harmless. - */ - if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) - return; - max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); - if (zone->nr_reserved_highatomic >= max_managed) - return; - - spin_lock_irqsave(&zone->lock, flags); - - /* Recheck the nr_reserved_highatomic limit under the lock */ - if (zone->nr_reserved_highatomic >= max_managed) - goto out_unlock; - - /* Yoink! */ - mt = get_pageblock_migratetype(page); - /* Only reserve normal pageblocks (i.e., they can merge with others) */ - if (!migratetype_is_mergeable(mt)) - goto out_unlock; - - if (order < pageblock_order) { - if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) - goto out_unlock; - zone->nr_reserved_highatomic += pageblock_nr_pages; - } else { - change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); - zone->nr_reserved_highatomic += 1 << order; - } - -out_unlock: - spin_unlock_irqrestore(&zone->lock, flags); -} - -/* - * Used when an allocation is about to fail under memory pressure. This - * potentially hurts the reliability of high-order allocations when under - * intense memory pressure but failed atomic allocations should be easier - * to recover from than an OOM. - * - * If @force is true, try to unreserve pageblocks even though highatomic - * pageblock is exhausted. - */ -static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, - bool force) -{ - struct zonelist *zonelist = ac->zonelist; - unsigned long flags; - struct zoneref *z; - struct zone *zone; - struct page *page; - int order; - int ret; - - for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, - ac->nodemask) { - /* - * Preserve at least one pageblock unless memory pressure - * is really high. - */ - if (!force && zone->nr_reserved_highatomic <= - pageblock_nr_pages) - continue; - - spin_lock_irqsave(&zone->lock, flags); - for (order = 0; order < NR_PAGE_ORDERS; order++) { - struct free_area *area = &(zone->free_area[order]); - unsigned long size; - - page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); - if (!page) - continue; - - /* - * It should never happen but changes to - * locking could inadvertently allow a per-cpu - * drain to add pages to MIGRATE_HIGHATOMIC - * while unreserving so be safe and watch for - * underflows. - */ - size = max(pageblock_nr_pages, 1UL << order); - size = min(size, zone->nr_reserved_highatomic); - zone->nr_reserved_highatomic -= size; - - /* - * Convert to ac->migratetype and avoid the normal - * pageblock stealing heuristics. Minimally, the caller - * is doing the work and needs the pages. More - * importantly, if the block was always converted to - * MIGRATE_UNMOVABLE or another type then the number - * of pageblocks that cannot be completely freed - * may increase. - */ - if (order < pageblock_order) - ret = move_freepages_block(zone, page, - MIGRATE_HIGHATOMIC, - ac->migratetype); - else { - move_to_free_list(page, zone, order, - MIGRATE_HIGHATOMIC, - ac->migratetype); - change_pageblock_range(page, order, - ac->migratetype); - ret = 1; - } - /* - * Reserving the block(s) already succeeded, - * so this should not fail on zone boundaries. - */ - WARN_ON_ONCE(ret == -1); - if (ret > 0) { - spin_unlock_irqrestore(&zone->lock, flags); - return ret; - } - } - spin_unlock_irqrestore(&zone->lock, flags); - } - - return false; -} - /* * Try finding a free buddy page on the fallback list. * @@ -3143,6 +3008,141 @@ struct page *rmqueue(struct zone *preferred_zone, return page; } +/* + * Reserve the pageblock(s) surrounding an allocation request for + * exclusive use of high-order atomic allocations if there are no + * empty page blocks that contain a page with a suitable order + */ +static void reserve_highatomic_pageblock(struct page *page, int order, + struct zone *zone) +{ + int mt; + unsigned long max_managed, flags; + + /* + * The number reserved as: minimum is 1 pageblock, maximum is + * roughly 1% of a zone. But if 1% of a zone falls below a + * pageblock size, then don't reserve any pageblocks. + * Check is race-prone but harmless. + */ + if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) + return; + max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); + if (zone->nr_reserved_highatomic >= max_managed) + return; + + spin_lock_irqsave(&zone->lock, flags); + + /* Recheck the nr_reserved_highatomic limit under the lock */ + if (zone->nr_reserved_highatomic >= max_managed) + goto out_unlock; + + /* Yoink! */ + mt = get_pageblock_migratetype(page); + /* Only reserve normal pageblocks (i.e., they can merge with others) */ + if (!migratetype_is_mergeable(mt)) + goto out_unlock; + + if (order < pageblock_order) { + if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) + goto out_unlock; + zone->nr_reserved_highatomic += pageblock_nr_pages; + } else { + change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); + zone->nr_reserved_highatomic += 1 << order; + } + +out_unlock: + spin_unlock_irqrestore(&zone->lock, flags); +} + +/* + * Used when an allocation is about to fail under memory pressure. This + * potentially hurts the reliability of high-order allocations when under + * intense memory pressure but failed atomic allocations should be easier + * to recover from than an OOM. + * + * If @force is true, try to unreserve pageblocks even though highatomic + * pageblock is exhausted. + */ +static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, + bool force) +{ + struct zonelist *zonelist = ac->zonelist; + unsigned long flags; + struct zoneref *z; + struct zone *zone; + struct page *page; + int order; + int ret; + + for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, + ac->nodemask) { + /* + * Preserve at least one pageblock unless memory pressure + * is really high. + */ + if (!force && zone->nr_reserved_highatomic <= + pageblock_nr_pages) + continue; + + spin_lock_irqsave(&zone->lock, flags); + for (order = 0; order < NR_PAGE_ORDERS; order++) { + struct free_area *area = &(zone->free_area[order]); + unsigned long size; + + page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); + if (!page) + continue; + + /* + * It should never happen but changes to + * locking could inadvertently allow a per-cpu + * drain to add pages to MIGRATE_HIGHATOMIC + * while unreserving so be safe and watch for + * underflows. + */ + size = max(pageblock_nr_pages, 1UL << order); + size = min(size, zone->nr_reserved_highatomic); + zone->nr_reserved_highatomic -= size; + + /* + * Convert to ac->migratetype and avoid the normal + * pageblock stealing heuristics. Minimally, the caller + * is doing the work and needs the pages. More + * importantly, if the block was always converted to + * MIGRATE_UNMOVABLE or another type then the number + * of pageblocks that cannot be completely freed + * may increase. + */ + if (order < pageblock_order) + ret = move_freepages_block(zone, page, + MIGRATE_HIGHATOMIC, + ac->migratetype); + else { + move_to_free_list(page, zone, order, + MIGRATE_HIGHATOMIC, + ac->migratetype); + change_pageblock_range(page, order, + ac->migratetype); + ret = 1; + } + /* + * Reserving the block(s) already succeeded, + * so this should not fail on zone boundaries. + */ + WARN_ON_ONCE(ret == -1); + if (ret > 0) { + spin_unlock_irqrestore(&zone->lock, flags); + return ret; + } + } + spin_unlock_irqrestore(&zone->lock, flags); + } + + return false; +} + static inline long __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) { From 442b1eca223b4860cc85ef970ae602d125aec5a4 Mon Sep 17 00:00:00 2001 From: Jane Chu Date: Mon, 24 Feb 2025 14:14:45 -0700 Subject: [PATCH 205/431] mm: make page_mapped_in_vma() hugetlb walk aware When a process consumes a UE in a page, the memory failure handler attempts to collect information for a potential SIGBUS. If the page is an anonymous page, page_mapped_in_vma(page, vma) is invoked in order to 1. retrieve the vaddr from the process' address space, 2. verify that the vaddr is indeed mapped to the poisoned page, where 'page' is the precise small page with UE. It's been observed that when injecting poison to a non-head subpage of an anonymous hugetlb page, no SIGBUS shows up, while injecting to the head page produces a SIGBUS. The cause is that, though hugetlb_walk() returns a valid pmd entry (on x86), but check_pte() detects mismatch between the head page per the pmd and the input subpage. Thus the vaddr is considered not mapped to the subpage and the process is not collected for SIGBUS purpose. This is the calling stack: collect_procs_anon page_mapped_in_vma page_vma_mapped_walk hugetlb_walk huge_pte_lock check_pte check_pte() header says that it "check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is mapped at the @pvmw->pte" but practically works only if pvmw->pfn is the head page pfn at pvmw->pte. Hindsight acknowledging that some pvmw->pte could point to a hugepage of some sort such that it makes sense to make check_pte() work for hugepage. Link: https://lkml.kernel.org/r/20250224211445.2663312-1-jane.chu@oracle.com Signed-off-by: Jane Chu Cc: Hugh Dickins Cc: Kirill A. Shuemov Cc: linmiaohe Cc: Matthew Wilcow (Oracle) Cc: Peter Xu Cc: Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 32679be22d30..e463c3be934a 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -84,6 +84,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking + * @pte_nr: the number of small pages described by @pvmw->pte. * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. @@ -100,7 +101,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, * Otherwise, return false. * */ -static bool check_pte(struct page_vma_mapped_walk *pvmw) +static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr) { unsigned long pfn; pte_t ptent = ptep_get(pvmw->pte); @@ -132,7 +133,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) pfn = pte_pfn(ptent); } - return (pfn - pvmw->pfn) < pvmw->nr_pages; + if ((pfn + pte_nr - 1) < pvmw->pfn) + return false; + if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) + return false; + return true; } /* Returns true if the two ranges overlap. Careful to not overflow. */ @@ -207,7 +212,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) return false; pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); - if (!check_pte(pvmw)) + if (!check_pte(pvmw, pages_per_huge_page(hstate))) return not_found(pvmw); return true; } @@ -290,7 +295,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) goto next_pte; } this_pte: - if (check_pte(pvmw)) + if (check_pte(pvmw, 1)) return true; next_pte: do { From fae85955053130be0b8b3d10e19cadcc173c7e4b Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:29 +0800 Subject: [PATCH 206/431] mm, swap: avoid reclaiming irrelevant swap cache Patch series "mm, swap: remove swap slot cache", v3. Slot cache was initially introduced by commit 67afa38e012e ("mm/swap: add cache for swap slots allocation") to reduce the lock contention of si->lock. Previous series "mm, swap: rework of swap allocator locks" [1] removed swap slot cache for freeing path as freeing path no longer touches si->lock in most cased. Allocation path also have slight to none contention on si->lock since that series, but slot cache still helps to reduce other overheads, like counters and the plist. This series removes the slot cache from allocation path too, by using the cluster as allocation fast path and also reduce other overheads. Now slot cache is completely gone, the code is much simplified without obvious feature or performance change, also clean up related workaround. Also this should avoid other potential issues, e.g. the long pinning of swap slots: swap slot cache pins swap slots with HAS_CACHE, causing reclaim or allocation fail to use these slots on scanning. The only behavior change is the swap device allocation rotation mechanism, as explained in the patch "mm, swap: use percpu cluster as allocation fast path". Test results are looking good after deleting the swap slot cache: - vm-scalability with: `usemem --init-time -O -y -x -R -31 1G`, 12G memory cgroup using simulated pmem as SWAP (32G pmem, 32 CPUs), 16 test runs for each case, measuring the total throughput: Before (KB/s) (stdev) After (KB/s) (stdev) Random (4K): 424907.60 (24410.78) 414745.92 (34554.78) Random (64K): 163308.82 (11635.72) 167314.50 (18434.99) Sequential (4K, !-R): 6150056.79 (103205.90) 6321469.06 (115878.16) - Build linux kernel with make -j96, using 4K folio with 1.5G memory cgroup limit and 64K folio with 2G memory cgroup limit, on top of tmpfs, 12 test runs, measuring the system time: Before (s) (stdev) After (s) (stdev) make -j96 (4K): 6445.69 (61.95) 6408.80 (69.46) make -j96 (64K): 6841.71 (409.04) 6437.99 (435.55) The performance is unchanged, slightly better in some cases. [1] https://lore.kernel.org/linux-mm/20250113175732.48099-1-ryncsn@gmail.com/ This patch (of 7): Swap allocator will do swap cache reclaim to recycle HAS_CACHE slots for allocation. It initiates the reclaim from the offset to be reclaimed and looks up the corresponding folio. The lookup process is lockless, so it's possible the folio will be removed from the swap cache and given a different swap entry before the reclaim locks the folio. If it happens, the reclaim will end up reclaiming an irrelevant folio, and return wrong return value. This shouldn't cause any problem with correctness or stability, but it is indeed confusing and unexpected, and will increase fragmentation, decrease performance. Fix this by checking whether the folio is still pointing to the offset the allocator want to reclaim before reclaiming it. Link: https://lkml.kernel.org/r/20250313165935.63303-1-ryncsn@gmail.com Link: https://lkml.kernel.org/r/20250313165935.63303-2-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index a7f60006c52c..5618cd1c4b03 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -210,6 +210,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, int ret, nr_pages; bool need_reclaim; +again: folio = filemap_get_folio(address_space, swap_cache_index(entry)); if (IS_ERR(folio)) return 0; @@ -227,8 +228,16 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, if (!folio_trylock(folio)) goto out; - /* offset could point to the middle of a large folio */ + /* + * Offset could point to the middle of a large folio, or folio + * may no longer point to the expected offset before it's locked. + */ entry = folio->swap; + if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) { + folio_unlock(folio); + folio_put(folio); + goto again; + } offset = swp_offset(entry); need_reclaim = ((flags & TTRS_ANYWAY) || From 3123fb0a18d6c76b536a315b88553211cd3c2b1d Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:30 +0800 Subject: [PATCH 207/431] mm, swap: drop the flag TTRS_DIRECT This flag exists temporarily to allow the allocator to bypass the slot cache during freeing, so reclaiming one slot will free the slot immediately. But now we have already removed slot cache usage on freeing, so this flag has no effect now. Link: https://lkml.kernel.org/r/20250313165935.63303-3-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 5618cd1c4b03..6f2de59c6355 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -158,8 +158,6 @@ static long swap_usage_in_pages(struct swap_info_struct *si) #define TTRS_UNMAPPED 0x2 /* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 -/* Reclaim directly, bypass the slot cache and don't touch device lock */ -#define TTRS_DIRECT 0x8 static bool swap_only_has_cache(struct swap_info_struct *si, unsigned long offset, int nr_pages) @@ -257,23 +255,8 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, if (!need_reclaim) goto out_unlock; - if (!(flags & TTRS_DIRECT)) { - /* Free through slot cache */ - delete_from_swap_cache(folio); - folio_set_dirty(folio); - ret = nr_pages; - goto out_unlock; - } - - xa_lock_irq(&address_space->i_pages); - __delete_from_swap_cache(folio, entry, NULL); - xa_unlock_irq(&address_space->i_pages); - folio_ref_sub(folio, nr_pages); + delete_from_swap_cache(folio); folio_set_dirty(folio); - - ci = lock_cluster(si, offset); - swap_entry_range_free(si, ci, entry, nr_pages); - unlock_cluster(ci); ret = nr_pages; out_unlock: folio_unlock(folio); @@ -697,7 +680,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si, offset++; break; case SWAP_HAS_CACHE: - nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); + nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); if (nr_reclaim > 0) offset += nr_reclaim; else @@ -849,7 +832,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { spin_unlock(&ci->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, - TTRS_ANYWAY | TTRS_DIRECT); + TTRS_ANYWAY); spin_lock(&ci->lock); if (nr_reclaim) { offset += abs(nr_reclaim); From 78524b05f1a3e16a5d00cc9c6259c41a9d6003ce Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:31 +0800 Subject: [PATCH 208/431] mm, swap: avoid redundant swap device pinning Currently __read_swap_cache_async() has get/put_swap_device() calls to increase/decrease a swap device reference to prevent swapoff. While some of its callers have already held the swap device reference, e.g in do_swap_page() and shmem_swapin_folio() where __read_swap_cache_async() will finally called. Now there are only two callers not holding a swap device reference, so make them hold a reference instead. And drop the get/put_swap_device calls in __read_swap_cache_async. This should reduce the overhead for swap in during page fault slightly. Link: https://lkml.kernel.org/r/20250313165935.63303-4-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swap_state.c | 14 ++++++++------ mm/zswap.c | 8 +++++++- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index a54b035d6a6c..50840a2887a5 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -426,17 +426,13 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, bool skip_if_exists) { - struct swap_info_struct *si; + struct swap_info_struct *si = swp_swap_info(entry); struct folio *folio; struct folio *new_folio = NULL; struct folio *result = NULL; void *shadow = NULL; *new_page_allocated = false; - si = get_swap_device(entry); - if (!si) - return NULL; - for (;;) { int err; /* @@ -532,7 +528,6 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, put_swap_folio(new_folio, entry); folio_unlock(new_folio); put_and_return: - put_swap_device(si); if (!(*new_page_allocated) && new_folio) folio_put(new_folio); return result; @@ -552,11 +547,16 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug) { + struct swap_info_struct *si; bool page_allocated; struct mempolicy *mpol; pgoff_t ilx; struct folio *folio; + si = get_swap_device(entry); + if (!si) + return NULL; + mpol = get_vma_policy(vma, addr, 0, &ilx); folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, &page_allocated, false); @@ -564,6 +564,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (page_allocated) swap_read_folio(folio, plug); + + put_swap_device(si); return folio; } diff --git a/mm/zswap.c b/mm/zswap.c index 23365e76a3ce..8a1ded8fa973 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1051,14 +1051,20 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct folio *folio; struct mempolicy *mpol; bool folio_was_allocated; + struct swap_info_struct *si; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; /* try to allocate swap cache folio */ + si = get_swap_device(swpentry); + if (!si) + return -EEXIST; + mpol = get_task_policy(current); folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, - NO_INTERLEAVE_INDEX, &folio_was_allocated, true); + NO_INTERLEAVE_INDEX, &folio_was_allocated, true); + put_swap_device(si); if (!folio) return -ENOMEM; From 280cfccaa20c012f0979021939c68ada03c3d973 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:32 +0800 Subject: [PATCH 209/431] mm, swap: don't update the counter up-front The counter update before allocation design was useful to avoid unnecessary scan when device is full, so it will abort early if the counter indicates the device is full. But that is an uncommon case, and now scanning of a full device is very fast, so the up-front update is not helpful any more. Remove it and simplify the slot allocation logic. Link: https://lkml.kernel.org/r/20250313165935.63303-5-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 6f2de59c6355..db836670c334 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1201,22 +1201,10 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) int order = swap_entry_order(entry_order); unsigned long size = 1 << order; struct swap_info_struct *si, *next; - long avail_pgs; int n_ret = 0; int node; spin_lock(&swap_avail_lock); - - avail_pgs = atomic_long_read(&nr_swap_pages) / size; - if (avail_pgs <= 0) { - spin_unlock(&swap_avail_lock); - goto noswap; - } - - n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); - - atomic_long_sub(n_goal * size, &nr_swap_pages); - start_over: node = numa_node_id(); plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { @@ -1250,10 +1238,8 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) spin_unlock(&swap_avail_lock); check_out: - if (n_ret < n_goal) - atomic_long_add((long)(n_goal - n_ret) * size, - &nr_swap_pages); -noswap: + atomic_long_sub(n_ret * size, &nr_swap_pages); + return n_ret; } From 1b7e90020eb770aa52991a34f21552b4c38ea690 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:33 +0800 Subject: [PATCH 210/431] mm, swap: use percpu cluster as allocation fast path Current allocation workflow first traverses the plist with a global lock held, after choosing a device, it uses the percpu cluster on that swap device. This commit moves the percpu cluster variable out of being tied to individual swap devices, making it a global percpu variable, and will be used directly for allocation as a fast path. The global percpu cluster variable will never point to a HDD device, and allocations on a HDD device are still globally serialized. This improves the allocator performance and prepares for removal of the slot cache in later commits. There shouldn't be much observable behavior change, except one thing: this changes how swap device allocation rotation works. Currently, each allocation will rotate the plist, and because of the existence of slot cache (one order 0 allocation usually returns 64 entries), swap devices of the same priority are rotated for every 64 order 0 entries consumed. High order allocations are different, they will bypass the slot cache, and so swap device is rotated for every 16K, 32K, or up to 2M allocation. The rotation rule was never clearly defined or documented, it was changed several times without mentioning. After this commit, and once slot cache is gone in later commits, swap device rotation will happen for every consumed cluster. Ideally non-HDD devices will be rotated if 2M space has been consumed for each order. Fragmented clusters will rotate the device faster, which seems OK. HDD devices is rotated for every allocation regardless of the allocation order, which should be OK too and trivial. This commit also slightly changes allocation behaviour for slot cache. The new added cluster allocation fast path may allocate entries from different device to the slot cache, this is not observable from user space, only impact performance very slightly, and slot cache will be just gone in next commit, so this can be ignored. Link: https://lkml.kernel.org/r/20250313165935.63303-6-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Baolin Wang Cc: Baoquan He Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 11 ++- mm/swapfile.c | 158 ++++++++++++++++++++++++++++++++----------- 2 files changed, 121 insertions(+), 48 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fe91c293636..374bffc87427 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -284,12 +284,10 @@ enum swap_cluster_flags { #endif /* - * We assign a cluster to each CPU, so each CPU can allocate swap entry from - * its own cluster and swapout sequentially. The purpose is to optimize swapout - * throughput. + * We keep using same cluster for rotational device so IO will be sequential. + * The purpose is to optimize SWAP throughput on these device. */ -struct percpu_cluster { - local_lock_t lock; /* Protect the percpu_cluster above */ +struct swap_sequential_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; @@ -315,8 +313,7 @@ struct swap_info_struct { atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ - struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ - struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */ + struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ diff --git a/mm/swapfile.c b/mm/swapfile.c index db836670c334..8b296c4c636b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -116,6 +116,18 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0); atomic_t nr_rotate_swap = ATOMIC_INIT(0); +struct percpu_swap_cluster { + struct swap_info_struct *si[SWAP_NR_ORDERS]; + unsigned long offset[SWAP_NR_ORDERS]; + local_lock_t lock; +}; + +static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = { + .si = { NULL }, + .offset = { SWAP_ENTRY_INVALID }, + .lock = INIT_LOCAL_LOCK(), +}; + static struct swap_info_struct *swap_type_to_swap_info(int type) { if (type >= MAX_SWAPFILES) @@ -539,7 +551,7 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si) ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); /* * Delete the cluster from list to prepare for discard, but keep - * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster + * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be * pointing to it, or ran into by relocate_cluster. */ list_del(&ci->list); @@ -805,10 +817,12 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, out: relocate_cluster(si, ci); unlock_cluster(ci); - if (si->flags & SWP_SOLIDSTATE) - __this_cpu_write(si->percpu_cluster->next[order], next); - else + if (si->flags & SWP_SOLIDSTATE) { + this_cpu_write(percpu_swap_cluster.offset[order], next); + this_cpu_write(percpu_swap_cluster.si[order], si); + } else { si->global_cluster->next[order] = next; + } return found; } @@ -862,20 +876,18 @@ static void swap_reclaim_work(struct work_struct *work) } /* - * Try to get swap entries with specified order from current cpu's swap entry - * pool (a cluster). This might involve allocating a new cluster for current CPU - * too. + * Try to allocate swap entries with specified order and try set a new + * cluster for current CPU too. */ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, unsigned char usage) { struct swap_cluster_info *ci; - unsigned int offset, found = 0; + unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; if (si->flags & SWP_SOLIDSTATE) { - /* Fast path using per CPU cluster */ - local_lock(&si->percpu_cluster->lock); - offset = __this_cpu_read(si->percpu_cluster->next[order]); + if (si == this_cpu_read(percpu_swap_cluster.si[order])) + offset = this_cpu_read(percpu_swap_cluster.offset[order]); } else { /* Serialize HDD SWAP allocation for each device. */ spin_lock(&si->global_cluster_lock); @@ -973,9 +985,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o } } done: - if (si->flags & SWP_SOLIDSTATE) - local_unlock(&si->percpu_cluster->lock); - else + if (!(si->flags & SWP_SOLIDSTATE)) spin_unlock(&si->global_cluster_lock); return found; } @@ -1196,6 +1206,51 @@ static bool get_swap_device_info(struct swap_info_struct *si) return true; } +/* + * Fast path try to get swap entries with specified order from current + * CPU's swap entry pool (a cluster). + */ +static int swap_alloc_fast(swp_entry_t entries[], + unsigned char usage, + int order, int n_goal) +{ + struct swap_cluster_info *ci; + struct swap_info_struct *si; + unsigned int offset, found; + int n_ret = 0; + + n_goal = min(n_goal, SWAP_BATCH); + + /* + * Once allocated, swap_info_struct will never be completely freed, + * so checking it's liveness by get_swap_device_info is enough. + */ + si = this_cpu_read(percpu_swap_cluster.si[order]); + offset = this_cpu_read(percpu_swap_cluster.offset[order]); + if (!si || !offset || !get_swap_device_info(si)) + return 0; + + while (offset) { + ci = lock_cluster(si, offset); + if (!cluster_is_usable(ci, order)) { + unlock_cluster(ci); + break; + } + if (cluster_is_empty(ci)) + offset = cluster_offset(si, ci); + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); + if (!found) + break; + entries[n_ret++] = swp_entry(si->type, found); + if (n_ret == n_goal) + break; + offset = this_cpu_read(percpu_swap_cluster.offset[order]); + } + + put_swap_device(si); + return n_ret; +} + int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { int order = swap_entry_order(entry_order); @@ -1204,19 +1259,36 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) int n_ret = 0; int node; + /* Fast path using percpu cluster */ + local_lock(&percpu_swap_cluster.lock); + n_ret = swap_alloc_fast(swp_entries, + SWAP_HAS_CACHE, + order, n_goal); + if (n_ret == n_goal) + goto out; + + n_goal = min_t(int, n_goal - n_ret, SWAP_BATCH); + /* Rotate the device and switch to a new cluster */ spin_lock(&swap_avail_lock); start_over: node = numa_node_id(); plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { - /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries, order); + /* + * For order 0 allocation, try best to fill the request + * as it's used by slot cache. + * + * For mTHP allocation, it always have n_goal == 1, + * and falling a mTHP swapin will just make the caller + * fallback to order 0 allocation, so just bail out. + */ + n_ret += scan_swap_map_slots(si, SWAP_HAS_CACHE, n_goal, + swp_entries + n_ret, order); put_swap_device(si); if (n_ret || size > 1) - goto check_out; + goto out; } spin_lock(&swap_avail_lock); @@ -1234,12 +1306,10 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) if (plist_node_empty(&next->avail_lists[node])) goto start_over; } - spin_unlock(&swap_avail_lock); - -check_out: +out: + local_unlock(&percpu_swap_cluster.lock); atomic_long_sub(n_ret * size, &nr_swap_pages); - return n_ret; } @@ -2597,6 +2667,28 @@ static void wait_for_allocation(struct swap_info_struct *si) } } +/* + * Called after swap device's reference count is dead, so + * neither scan nor allocation will use it. + */ +static void flush_percpu_swap_cluster(struct swap_info_struct *si) +{ + int cpu, i; + struct swap_info_struct **pcp_si; + + for_each_possible_cpu(cpu) { + pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); + /* + * Invalidate the percpu swap cluster cache, si->users + * is dead, so no new user will point to it, just flush + * any existing user. + */ + for (i = 0; i < SWAP_NR_ORDERS; i++) + cmpxchg(&pcp_si[i], si, NULL); + } +} + + SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; @@ -2698,6 +2790,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) flush_work(&p->discard_work); flush_work(&p->reclaim_work); + flush_percpu_swap_cluster(p); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) @@ -2725,8 +2818,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) arch_swap_invalidate_area(p->type); zswap_swapoff(p->type); mutex_unlock(&swapon_mutex); - free_percpu(p->percpu_cluster); - p->percpu_cluster = NULL; kfree(p->global_cluster); p->global_cluster = NULL; vfree(swap_map); @@ -3125,7 +3216,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); struct swap_cluster_info *cluster_info; unsigned long i, j, idx; - int cpu, err = -ENOMEM; + int err = -ENOMEM; cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) @@ -3134,20 +3225,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); - if (si->flags & SWP_SOLIDSTATE) { - si->percpu_cluster = alloc_percpu(struct percpu_cluster); - if (!si->percpu_cluster) - goto err_free; - - for_each_possible_cpu(cpu) { - struct percpu_cluster *cluster; - - cluster = per_cpu_ptr(si->percpu_cluster, cpu); - for (i = 0; i < SWAP_NR_ORDERS; i++) - cluster->next[i] = SWAP_ENTRY_INVALID; - local_lock_init(&cluster->lock); - } - } else { + if (!(si->flags & SWP_SOLIDSTATE)) { si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); if (!si->global_cluster) @@ -3424,8 +3502,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) bad_swap_unlock_inode: inode_unlock(inode); bad_swap: - free_percpu(si->percpu_cluster); - si->percpu_cluster = NULL; kfree(si->global_cluster); si->global_cluster = NULL; inode = NULL; From 0ff67f990bd45726e0d9e91111d998e7a3595b32 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:34 +0800 Subject: [PATCH 211/431] mm, swap: remove swap slot cache Slot cache is no longer needed now, removing it and all related code. - vm-scalability with: `usemem --init-time -O -y -x -R -31 1G`, 12G memory cgroup using simulated pmem as SWAP (32G pmem, 32 CPUs), 16 test runs for each case, measuring the total throughput: Before (KB/s) (stdev) After (KB/s) (stdev) Random (4K): 424907.60 (24410.78) 414745.92 (34554.78) Random (64K): 163308.82 (11635.72) 167314.50 (18434.99) Sequential (4K, !-R): 6150056.79 (103205.90) 6321469.06 (115878.16) The performance changes are below noise level. - Build linux kernel with make -j96, using 4K folio with 1.5G memory cgroup limit and 64K folio with 2G memory cgroup limit, on top of tmpfs, 12 test runs, measuring the system time: Before (s) (stdev) After (s) (stdev) make -j96 (4K): 6445.69 (61.95) 6408.80 (69.46) make -j96 (64K): 6841.71 (409.04) 6437.99 (435.55) Similar to above, 64k mTHP case showed a slight improvement. Link: https://lkml.kernel.org/r/20250313165935.63303-7-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 3 - include/linux/swap_slots.h | 28 ---- mm/Makefile | 2 +- mm/swap_slots.c | 295 ------------------------------------- mm/swap_state.c | 8 +- mm/swapfile.c | 194 ++++++++---------------- 6 files changed, 67 insertions(+), 463 deletions(-) delete mode 100644 include/linux/swap_slots.h delete mode 100644 mm/swap_slots.c diff --git a/include/linux/swap.h b/include/linux/swap.h index 374bffc87427..c5856dcc263a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -465,7 +465,6 @@ void free_pages_and_swap_cache(struct encoded_page **, int); extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern atomic_t nr_rotate_swap; -extern bool has_usable_swap(void); /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) @@ -483,13 +482,11 @@ swp_entry_t folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); -extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h deleted file mode 100644 index 840aec3523b2..000000000000 --- a/include/linux/swap_slots.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_SWAP_SLOTS_H -#define _LINUX_SWAP_SLOTS_H - -#include -#include -#include - -#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH -#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE) -#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE) - -struct swap_slots_cache { - bool lock_initialized; - struct mutex alloc_lock; /* protects slots, nr, cur */ - swp_entry_t *slots; - int nr; - int cur; - int n_ret; -}; - -void disable_swap_slots_cache_lock(void); -void reenable_swap_slots_cache_unlock(void); -void enable_swap_slots_cache(void); - -extern bool swap_slot_cache_enabled; - -#endif /* _LINUX_SWAP_SLOTS_H */ diff --git a/mm/Makefile b/mm/Makefile index 2600e94abd3c..84b1127e43a5 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -75,7 +75,7 @@ ifdef CONFIG_MMU obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o endif -obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o +obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o diff --git a/mm/swap_slots.c b/mm/swap_slots.c deleted file mode 100644 index 9c7c171df7ba..000000000000 --- a/mm/swap_slots.c +++ /dev/null @@ -1,295 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Manage cache of swap slots to be used for and returned from - * swap. - * - * Copyright(c) 2016 Intel Corporation. - * - * Author: Tim Chen - * - * We allocate the swap slots from the global pool and put - * it into local per cpu caches. This has the advantage - * of no needing to acquire the swap_info lock every time - * we need a new slot. - * - * There is also opportunity to simply return the slot - * to local caches without needing to acquire swap_info - * lock. We do not reuse the returned slots directly but - * move them back to the global pool in a batch. This - * allows the slots to coalesce and reduce fragmentation. - * - * The swap entry allocated is marked with SWAP_HAS_CACHE - * flag in map_count that prevents it from being allocated - * again from the global pool. - * - * The swap slots cache is protected by a mutex instead of - * a spin lock as when we search for slots with scan_swap_map, - * we can possibly sleep. - */ - -#include -#include -#include -#include -#include -#include -#include - -static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); -static bool swap_slot_cache_active; -bool swap_slot_cache_enabled; -static bool swap_slot_cache_initialized; -static DEFINE_MUTEX(swap_slots_cache_mutex); -/* Serialize swap slots cache enable/disable operations */ -static DEFINE_MUTEX(swap_slots_cache_enable_mutex); - -static void __drain_swap_slots_cache(void); - -#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) - -static void deactivate_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_mutex); - swap_slot_cache_active = false; - __drain_swap_slots_cache(); - mutex_unlock(&swap_slots_cache_mutex); -} - -static void reactivate_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_mutex); - swap_slot_cache_active = true; - mutex_unlock(&swap_slots_cache_mutex); -} - -/* Must not be called with cpu hot plug lock */ -void disable_swap_slots_cache_lock(void) -{ - mutex_lock(&swap_slots_cache_enable_mutex); - swap_slot_cache_enabled = false; - if (swap_slot_cache_initialized) { - /* serialize with cpu hotplug operations */ - cpus_read_lock(); - __drain_swap_slots_cache(); - cpus_read_unlock(); - } -} - -static void __reenable_swap_slots_cache(void) -{ - swap_slot_cache_enabled = has_usable_swap(); -} - -void reenable_swap_slots_cache_unlock(void) -{ - __reenable_swap_slots_cache(); - mutex_unlock(&swap_slots_cache_enable_mutex); -} - -static bool check_cache_active(void) -{ - long pages; - - if (!swap_slot_cache_enabled) - return false; - - pages = get_nr_swap_pages(); - if (!swap_slot_cache_active) { - if (pages > num_online_cpus() * - THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) - reactivate_swap_slots_cache(); - goto out; - } - - /* if global pool of slot caches too low, deactivate cache */ - if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) - deactivate_swap_slots_cache(); -out: - return swap_slot_cache_active; -} - -static int alloc_swap_slot_cache(unsigned int cpu) -{ - struct swap_slots_cache *cache; - swp_entry_t *slots; - - /* - * Do allocation outside swap_slots_cache_mutex - * as kvzalloc could trigger reclaim and folio_alloc_swap, - * which can lock swap_slots_cache_mutex. - */ - slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), - GFP_KERNEL); - if (!slots) - return -ENOMEM; - - mutex_lock(&swap_slots_cache_mutex); - cache = &per_cpu(swp_slots, cpu); - if (cache->slots) { - /* cache already allocated */ - mutex_unlock(&swap_slots_cache_mutex); - - kvfree(slots); - - return 0; - } - - if (!cache->lock_initialized) { - mutex_init(&cache->alloc_lock); - cache->lock_initialized = true; - } - cache->nr = 0; - cache->cur = 0; - cache->n_ret = 0; - /* - * We initialized alloc_lock and free_lock earlier. We use - * !cache->slots or !cache->slots_ret to know if it is safe to acquire - * the corresponding lock and use the cache. Memory barrier below - * ensures the assumption. - */ - mb(); - cache->slots = slots; - mutex_unlock(&swap_slots_cache_mutex); - return 0; -} - -static void drain_slots_cache_cpu(unsigned int cpu, bool free_slots) -{ - struct swap_slots_cache *cache; - - cache = &per_cpu(swp_slots, cpu); - if (cache->slots) { - mutex_lock(&cache->alloc_lock); - swapcache_free_entries(cache->slots + cache->cur, cache->nr); - cache->cur = 0; - cache->nr = 0; - if (free_slots && cache->slots) { - kvfree(cache->slots); - cache->slots = NULL; - } - mutex_unlock(&cache->alloc_lock); - } -} - -static void __drain_swap_slots_cache(void) -{ - unsigned int cpu; - - /* - * This function is called during - * 1) swapoff, when we have to make sure no - * left over slots are in cache when we remove - * a swap device; - * 2) disabling of swap slot cache, when we run low - * on swap slots when allocating memory and need - * to return swap slots to global pool. - * - * We cannot acquire cpu hot plug lock here as - * this function can be invoked in the cpu - * hot plug path: - * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback - * -> memory allocation -> direct reclaim -> folio_alloc_swap - * -> drain_swap_slots_cache - * - * Hence the loop over current online cpu below could miss cpu that - * is being brought online but not yet marked as online. - * That is okay as we do not schedule and run anything on a - * cpu before it has been marked online. Hence, we will not - * fill any swap slots in slots cache of such cpu. - * There are no slots on such cpu that need to be drained. - */ - for_each_online_cpu(cpu) - drain_slots_cache_cpu(cpu, false); -} - -static int free_slot_cache(unsigned int cpu) -{ - mutex_lock(&swap_slots_cache_mutex); - drain_slots_cache_cpu(cpu, true); - mutex_unlock(&swap_slots_cache_mutex); - return 0; -} - -void enable_swap_slots_cache(void) -{ - mutex_lock(&swap_slots_cache_enable_mutex); - if (!swap_slot_cache_initialized) { - int ret; - - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", - alloc_swap_slot_cache, free_slot_cache); - if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " - "without swap slots cache.\n", __func__)) - goto out_unlock; - - swap_slot_cache_initialized = true; - } - - __reenable_swap_slots_cache(); -out_unlock: - mutex_unlock(&swap_slots_cache_enable_mutex); -} - -/* called with swap slot cache's alloc lock held */ -static int refill_swap_slots_cache(struct swap_slots_cache *cache) -{ - if (!use_swap_slot_cache) - return 0; - - cache->cur = 0; - if (swap_slot_cache_active) - cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, - cache->slots, 0); - - return cache->nr; -} - -swp_entry_t folio_alloc_swap(struct folio *folio) -{ - swp_entry_t entry; - struct swap_slots_cache *cache; - - entry.val = 0; - - if (folio_test_large(folio)) { - if (IS_ENABLED(CONFIG_THP_SWAP)) - get_swap_pages(1, &entry, folio_order(folio)); - goto out; - } - - /* - * Preemption is allowed here, because we may sleep - * in refill_swap_slots_cache(). But it is safe, because - * accesses to the per-CPU data structure are protected by the - * mutex cache->alloc_lock. - * - * The alloc path here does not touch cache->slots_ret - * so cache->free_lock is not taken. - */ - cache = raw_cpu_ptr(&swp_slots); - - if (likely(check_cache_active() && cache->slots)) { - mutex_lock(&cache->alloc_lock); - if (cache->slots) { -repeat: - if (cache->nr) { - entry = cache->slots[cache->cur]; - cache->slots[cache->cur++].val = 0; - cache->nr--; - } else if (refill_swap_slots_cache(cache)) { - goto repeat; - } - } - mutex_unlock(&cache->alloc_lock); - if (entry.val) - goto out; - } - - get_swap_pages(1, &entry, 0); -out: - if (mem_cgroup_try_charge_swap(folio, entry)) { - put_swap_folio(folio, entry); - entry.val = 0; - } - return entry; -} diff --git a/mm/swap_state.c b/mm/swap_state.c index 50840a2887a5..2b5744e211cd 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include "internal.h" @@ -447,13 +446,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* * Just skip read ahead for unused swap slot. - * During swap_off when swap_slot_cache is disabled, - * we have to handle the race between putting - * swap entry in swap cache and marking swap slot - * as SWAP_HAS_CACHE. That's done in later part of code or - * else swap_off will be aborted if we return NULL. */ - if (!swap_entry_swapped(si, entry) && swap_slot_cache_enabled) + if (!swap_entry_swapped(si, entry)) goto put_and_return; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 8b296c4c636b..9bd95173865d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -885,16 +884,20 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o struct swap_cluster_info *ci; unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; - if (si->flags & SWP_SOLIDSTATE) { - if (si == this_cpu_read(percpu_swap_cluster.si[order])) - offset = this_cpu_read(percpu_swap_cluster.offset[order]); - } else { + /* + * Swapfile is not block device so unable + * to allocate large entries. + */ + if (order && !(si->flags & SWP_BLKDEV)) + return 0; + + if (!(si->flags & SWP_SOLIDSTATE)) { /* Serialize HDD SWAP allocation for each device. */ spin_lock(&si->global_cluster_lock); offset = si->global_cluster->next[order]; - } + if (offset == SWAP_ENTRY_INVALID) + goto new_cluster; - if (offset) { ci = lock_cluster(si, offset); /* Cluster could have been used by another order */ if (cluster_is_usable(ci, order)) { @@ -1153,43 +1156,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_usage_sub(si, nr_entries); } -static int scan_swap_map_slots(struct swap_info_struct *si, - unsigned char usage, int nr, - swp_entry_t slots[], int order) -{ - unsigned int nr_pages = 1 << order; - int n_ret = 0; - - if (order > 0) { - /* - * Should not even be attempting large allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP) || - nr_pages > SWAPFILE_CLUSTER) { - VM_WARN_ON_ONCE(1); - return 0; - } - - /* - * Swapfile is not block device so unable - * to allocate large entries. - */ - if (!(si->flags & SWP_BLKDEV)) - return 0; - } - - while (n_ret < nr) { - unsigned long offset = cluster_alloc_swap_entry(si, order, usage); - - if (!offset) - break; - slots[n_ret++] = swp_entry(si->type, offset); - } - - return n_ret; -} - static bool get_swap_device_info(struct swap_info_struct *si) { if (!percpu_ref_tryget_live(&si->users)) @@ -1210,16 +1176,13 @@ static bool get_swap_device_info(struct swap_info_struct *si) * Fast path try to get swap entries with specified order from current * CPU's swap entry pool (a cluster). */ -static int swap_alloc_fast(swp_entry_t entries[], +static int swap_alloc_fast(swp_entry_t *entry, unsigned char usage, - int order, int n_goal) + int order) { struct swap_cluster_info *ci; struct swap_info_struct *si; - unsigned int offset, found; - int n_ret = 0; - - n_goal = min(n_goal, SWAP_BATCH); + unsigned int offset, found = SWAP_ENTRY_INVALID; /* * Once allocated, swap_info_struct will never be completely freed, @@ -1228,46 +1191,48 @@ static int swap_alloc_fast(swp_entry_t entries[], si = this_cpu_read(percpu_swap_cluster.si[order]); offset = this_cpu_read(percpu_swap_cluster.offset[order]); if (!si || !offset || !get_swap_device_info(si)) - return 0; + return false; - while (offset) { - ci = lock_cluster(si, offset); - if (!cluster_is_usable(ci, order)) { - unlock_cluster(ci); - break; - } + ci = lock_cluster(si, offset); + if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, usage); - if (!found) - break; - entries[n_ret++] = swp_entry(si->type, found); - if (n_ret == n_goal) - break; - offset = this_cpu_read(percpu_swap_cluster.offset[order]); + if (found) + *entry = swp_entry(si->type, found); + } else { + unlock_cluster(ci); } put_swap_device(si); - return n_ret; + return !!found; } -int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) +swp_entry_t folio_alloc_swap(struct folio *folio) { - int order = swap_entry_order(entry_order); - unsigned long size = 1 << order; + unsigned int order = folio_order(folio); + unsigned int size = 1 << order; struct swap_info_struct *si, *next; - int n_ret = 0; + swp_entry_t entry = {}; + unsigned long offset; int node; + if (order) { + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER) { + VM_WARN_ON_ONCE(1); + return entry; + } + } + /* Fast path using percpu cluster */ local_lock(&percpu_swap_cluster.lock); - n_ret = swap_alloc_fast(swp_entries, - SWAP_HAS_CACHE, - order, n_goal); - if (n_ret == n_goal) + if (swap_alloc_fast(&entry, SWAP_HAS_CACHE, order)) goto out; - n_goal = min_t(int, n_goal - n_ret, SWAP_BATCH); /* Rotate the device and switch to a new cluster */ spin_lock(&swap_avail_lock); start_over: @@ -1276,18 +1241,13 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { - /* - * For order 0 allocation, try best to fill the request - * as it's used by slot cache. - * - * For mTHP allocation, it always have n_goal == 1, - * and falling a mTHP swapin will just make the caller - * fallback to order 0 allocation, so just bail out. - */ - n_ret += scan_swap_map_slots(si, SWAP_HAS_CACHE, n_goal, - swp_entries + n_ret, order); + offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); put_swap_device(si); - if (n_ret || size > 1) + if (offset) { + entry = swp_entry(si->type, offset); + goto out; + } + if (order) goto out; } @@ -1309,8 +1269,14 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) spin_unlock(&swap_avail_lock); out: local_unlock(&percpu_swap_cluster.lock); - atomic_long_sub(n_ret * size, &nr_swap_pages); - return n_ret; + /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ + if (mem_cgroup_try_charge_swap(folio, entry)) { + put_swap_folio(folio, entry); + entry.val = 0; + } + if (entry.val) + atomic_long_sub(size, &nr_swap_pages); + return entry; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) @@ -1606,25 +1572,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster(ci); } -void swapcache_free_entries(swp_entry_t *entries, int n) -{ - int i; - struct swap_cluster_info *ci; - struct swap_info_struct *si = NULL; - - if (n <= 0) - return; - - for (i = 0; i < n; ++i) { - si = _swap_info_get(entries[i]); - if (si) { - ci = lock_cluster(si, swp_offset(entries[i])); - swap_entry_range_free(si, ci, entries[i], 1); - unlock_cluster(ci); - } - } -} - int __swap_count(swp_entry_t entry) { struct swap_info_struct *si = swp_swap_info(entry); @@ -1865,6 +1812,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr) swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type); + unsigned long offset; swp_entry_t entry = {0}; if (!si) @@ -1872,8 +1820,13 @@ swp_entry_t get_swap_page_of_type(int type) /* This is called for allocating swap entry, not cache */ if (get_swap_device_info(si)) { - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) - atomic_long_dec(&nr_swap_pages); + if (si->flags & SWP_WRITEOK) { + offset = cluster_alloc_swap_entry(si, 0, 1); + if (offset) { + entry = swp_entry(si->type, offset); + atomic_long_dec(&nr_swap_pages); + } + } put_swap_device(si); } fail: @@ -2634,21 +2587,6 @@ static void reinsert_swap_info(struct swap_info_struct *si) spin_unlock(&swap_lock); } -static bool __has_usable_swap(void) -{ - return !plist_head_empty(&swap_active_head); -} - -bool has_usable_swap(void) -{ - bool ret; - - spin_lock(&swap_lock); - ret = __has_usable_swap(); - spin_unlock(&swap_lock); - return ret; -} - /* * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range * see the updated flags, so there will be no more allocations. @@ -2761,8 +2699,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) wait_for_allocation(p); - disable_swap_slots_cache_lock(); - set_current_oom_origin(); err = try_to_unuse(p->type); clear_current_oom_origin(); @@ -2770,12 +2706,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (err) { /* re-insert swap space back into swap_list */ reinsert_swap_info(p); - reenable_swap_slots_cache_unlock(); goto out_dput; } - reenable_swap_slots_cache_unlock(); - /* * Wait for swap operations protected by get/put_swap_device() * to complete. Because of synchronize_rcu() here, all swap @@ -3525,8 +3458,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) putname(name); if (inode) inode_unlock(inode); - if (!error) - enable_swap_slots_cache(); return error; } @@ -3922,6 +3853,11 @@ static void free_swap_count_continuations(struct swap_info_struct *si) } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) +static bool __has_usable_swap(void) +{ + return !plist_head_empty(&swap_active_head); +} + void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { struct swap_info_struct *si, *next; From b487a2da3575b6cdfb6d6559311830c8fea70bb9 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Fri, 14 Mar 2025 00:59:35 +0800 Subject: [PATCH 212/431] mm, swap: simplify folio swap allocation With slot cache gone, clean up the allocation helpers even more. folio_alloc_swap will be the only entry for allocation and adding the folio to swap cache (except suspend), making it opposite of folio_free_swap. Link: https://lkml.kernel.org/r/20250313165935.63303-8-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Baolin Wang Cc: Baoquan He Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Matthew Wilcow (Oracle) Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 8 ++- mm/shmem.c | 21 +++----- mm/swap.h | 6 --- mm/swap_state.c | 57 ---------------------- mm/swapfile.c | 113 ++++++++++++++++++++++++++++--------------- mm/vmscan.c | 16 +++++- 6 files changed, 96 insertions(+), 125 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index c5856dcc263a..9c99eee160f9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -478,7 +478,7 @@ static inline long get_nr_swap_pages(void) } extern void si_swapinfo(struct sysinfo *); -swp_entry_t folio_alloc_swap(struct folio *folio); +int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); @@ -586,11 +586,9 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -static inline swp_entry_t folio_alloc_swap(struct folio *folio) +static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) { - swp_entry_t entry; - entry.val = 0; - return entry; + return -EINVAL; } static inline bool folio_free_swap(struct folio *folio) diff --git a/mm/shmem.c b/mm/shmem.c index 15fa7fa9c8e8..b276ae233dfa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1533,7 +1533,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); - swp_entry_t swap; pgoff_t index; int nr_pages; bool split = false; @@ -1615,14 +1614,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) folio_mark_uptodate(folio); } - swap = folio_alloc_swap(folio); - if (!swap.val) { - if (nr_pages > 1) - goto try_split; - - goto redirty; - } - /* * Add inode to shmem_unuse()'s list of swapped-out inodes, * if it's not already there. Do it now before the folio is @@ -1635,20 +1626,20 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) if (list_empty(&info->swaplist)) list_add(&info->swaplist, &shmem_swaplist); - if (add_to_swap_cache(folio, swap, - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, - NULL) == 0) { + if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) { shmem_recalc_inode(inode, 0, nr_pages); - swap_shmem_alloc(swap, nr_pages); - shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); + swap_shmem_alloc(folio->swap, nr_pages); + shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap)); mutex_unlock(&shmem_swaplist_mutex); BUG_ON(folio_mapped(folio)); return swap_writepage(&folio->page, wbc); } + list_del_init(&info->swaplist); mutex_unlock(&shmem_swaplist_mutex); - put_swap_folio(folio, swap); + if (nr_pages > 1) + goto try_split; redirty: folio_mark_dirty(folio); if (wbc->for_reclaim) diff --git a/mm/swap.h b/mm/swap.h index ad2f121de970..0abb68091b4f 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -50,7 +50,6 @@ static inline pgoff_t swap_cache_index(swp_entry_t entry) } void show_swap_cache_info(void); -bool add_to_swap(struct folio *folio); void *get_shadow_from_swap_cache(swp_entry_t entry); int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp); @@ -163,11 +162,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping, return filemap_get_folio(mapping, index); } -static inline bool add_to_swap(struct folio *folio) -{ - return false; -} - static inline void *get_shadow_from_swap_cache(swp_entry_t entry) { return NULL; diff --git a/mm/swap_state.c b/mm/swap_state.c index 2b5744e211cd..68fd981b514f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -166,63 +166,6 @@ void __delete_from_swap_cache(struct folio *folio, __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); } -/** - * add_to_swap - allocate swap space for a folio - * @folio: folio we want to move to swap - * - * Allocate swap space for the folio and add the folio to the - * swap cache. - * - * Context: Caller needs to hold the folio lock. - * Return: Whether the folio was added to the swap cache. - */ -bool add_to_swap(struct folio *folio) -{ - swp_entry_t entry; - int err; - - VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); - - entry = folio_alloc_swap(folio); - if (!entry.val) - return false; - - /* - * XArray node allocations from PF_MEMALLOC contexts could - * completely exhaust the page allocator. __GFP_NOMEMALLOC - * stops emergency reserves from being allocated. - * - * TODO: this could cause a theoretical memory reclaim - * deadlock in the swap out path. - */ - /* - * Add it to the swap cache. - */ - err = add_to_swap_cache(folio, entry, - __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); - if (err) - goto fail; - /* - * Normally the folio will be dirtied in unmap because its - * pte should be dirty. A special case is MADV_FREE page. The - * page's pte could have dirty bit cleared but the folio's - * SwapBacked flag is still set because clearing the dirty bit - * and SwapBacked flag has no lock protected. For such folio, - * unmap will not set dirty bit for it, so folio reclaim will - * not write the folio out. This can cause data corruption when - * the folio is swapped in later. Always setting the dirty flag - * for the folio solves the problem. - */ - folio_mark_dirty(folio); - - return true; - -fail: - put_swap_folio(folio, entry); - return false; -} - /* * This must be called only on folios that have * been verified to be in the swap cache and locked. diff --git a/mm/swapfile.c b/mm/swapfile.c index 9bd95173865d..2eff8b51a945 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1176,9 +1176,8 @@ static bool get_swap_device_info(struct swap_info_struct *si) * Fast path try to get swap entries with specified order from current * CPU's swap entry pool (a cluster). */ -static int swap_alloc_fast(swp_entry_t *entry, - unsigned char usage, - int order) +static bool swap_alloc_fast(swp_entry_t *entry, + int order) { struct swap_cluster_info *ci; struct swap_info_struct *si; @@ -1197,7 +1196,7 @@ static int swap_alloc_fast(swp_entry_t *entry, if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); - found = alloc_swap_scan_cluster(si, ci, offset, order, usage); + found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); if (found) *entry = swp_entry(si->type, found); } else { @@ -1208,47 +1207,30 @@ static int swap_alloc_fast(swp_entry_t *entry, return !!found; } -swp_entry_t folio_alloc_swap(struct folio *folio) +/* Rotate the device and switch to a new cluster */ +static bool swap_alloc_slow(swp_entry_t *entry, + int order) { - unsigned int order = folio_order(folio); - unsigned int size = 1 << order; - struct swap_info_struct *si, *next; - swp_entry_t entry = {}; - unsigned long offset; int node; + unsigned long offset; + struct swap_info_struct *si, *next; - if (order) { - /* - * Should not even be attempting large allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER) { - VM_WARN_ON_ONCE(1); - return entry; - } - } - - /* Fast path using percpu cluster */ - local_lock(&percpu_swap_cluster.lock); - if (swap_alloc_fast(&entry, SWAP_HAS_CACHE, order)) - goto out; - - /* Rotate the device and switch to a new cluster */ + node = numa_node_id(); spin_lock(&swap_avail_lock); start_over: - node = numa_node_id(); plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { + /* Rotate the device and switch to a new cluster */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); put_swap_device(si); if (offset) { - entry = swp_entry(si->type, offset); - goto out; + *entry = swp_entry(si->type, offset); + return true; } if (order) - goto out; + return false; } spin_lock(&swap_avail_lock); @@ -1267,16 +1249,67 @@ swp_entry_t folio_alloc_swap(struct folio *folio) goto start_over; } spin_unlock(&swap_avail_lock); -out: - local_unlock(&percpu_swap_cluster.lock); - /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ - if (mem_cgroup_try_charge_swap(folio, entry)) { - put_swap_folio(folio, entry); - entry.val = 0; + return false; +} + +/** + * folio_alloc_swap - allocate swap space for a folio + * @folio: folio we want to move to swap + * @gfp: gfp mask for shadow nodes + * + * Allocate swap space for the folio and add the folio to the + * swap cache. + * + * Context: Caller needs to hold the folio lock. + * Return: Whether the folio was added to the swap cache. + */ +int folio_alloc_swap(struct folio *folio, gfp_t gfp) +{ + unsigned int order = folio_order(folio); + unsigned int size = 1 << order; + swp_entry_t entry = {}; + + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); + + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) { + VM_WARN_ON_ONCE(1); + return -EINVAL; } - if (entry.val) - atomic_long_sub(size, &nr_swap_pages); - return entry; + + local_lock(&percpu_swap_cluster.lock); + if (!swap_alloc_fast(&entry, order)) + swap_alloc_slow(&entry, order); + local_unlock(&percpu_swap_cluster.lock); + + /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ + if (mem_cgroup_try_charge_swap(folio, entry)) + goto out_free; + + if (!entry.val) + return -ENOMEM; + + /* + * XArray node allocations from PF_MEMALLOC contexts could + * completely exhaust the page allocator. __GFP_NOMEMALLOC + * stops emergency reserves from being allocated. + * + * TODO: this could cause a theoretical memory reclaim + * deadlock in the swap out path. + */ + if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL)) + goto out_free; + + atomic_long_sub(size, &nr_swap_pages); + return 0; + +out_free: + put_swap_folio(folio, entry); + return -ENOMEM; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) diff --git a/mm/vmscan.c b/mm/vmscan.c index fcca38bc640f..be00af3763b5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1289,7 +1289,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, split_folio_to_list(folio, folio_list)) goto activate_locked; } - if (!add_to_swap(folio)) { + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) { int __maybe_unused order = folio_order(folio); if (!folio_test_large(folio)) @@ -1305,9 +1305,21 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, } #endif count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); - if (!add_to_swap(folio)) + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) goto activate_locked_split; } + /* + * Normally the folio will be dirtied in unmap because its + * pte should be dirty. A special case is MADV_FREE page. The + * page's pte could have dirty bit cleared but the folio's + * SwapBacked flag is still set because clearing the dirty bit + * and SwapBacked flag has no lock protected. For such folio, + * unmap will not set dirty bit for it, so folio reclaim will + * not write the folio out. This can cause data corruption when + * the folio is swapped in later. Always setting the dirty flag + * for the folio solves the problem. + */ + folio_mark_dirty(folio); } } From 88fb7794f69375b08ff5432d8b5bc79eeb3e1dbe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Feb 2025 08:30:33 -0800 Subject: [PATCH 213/431] vmalloc: drop Christoph from Reviewers I haven't been doing as much review as I should. As part of reducing my inbox flow drop me from the official Reviewers. I might still chime in on patches occasionally. Link: https://lkml.kernel.org/r/20250224163033.350072-1-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 40f233b0fa9c..c13201979633 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -25301,7 +25301,6 @@ F: tools/testing/vsock/ VMALLOC M: Andrew Morton R: Uladzislau Rezki -R: Christoph Hellwig L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org From ebc29409c2966bd6a6215b27fc654de7f55ce099 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Tue, 25 Feb 2025 18:45:09 +0000 Subject: [PATCH 214/431] mm/page_alloc: warn on nr_reserved_highatomic underflow As documented in the comment this underflow should not happen. The locking has indeed changed here since the comment was written, see the migratetype hygiene patches[0]. However, those changes made the locking _safer_, so the underflow _really_ shouldn't happen now. So upgrade the comment to a warning. [0] https://lore.kernel.org/all/20240320180429.678181-7-hannes@cmpxchg.org/T/#m3da87e6cc3348a4640aa298137bc9f8f61b76c84 Link: https://lkml.kernel.org/r/20250225-warn-underflow-v1-1-3dc542941d3a@google.com Signed-off-by: Brendan Jackman Reviewed-by: Vlastimil Babka Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/page_alloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 945437d7ac44..3c5624380b6c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3095,6 +3095,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, if (!page) continue; + size = max(pageblock_nr_pages, 1UL << order); /* * It should never happen but changes to * locking could inadvertently allow a per-cpu @@ -3102,8 +3103,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * while unreserving so be safe and watch for * underflows. */ - size = max(pageblock_nr_pages, 1UL << order); - size = min(size, zone->nr_reserved_highatomic); + if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) + size = zone->nr_reserved_highatomic; zone->nr_reserved_highatomic -= size; /* From a58f3dcf20ea9e7e968ee8369fd782bbb53dff73 Mon Sep 17 00:00:00 2001 From: Seongjun Kim Date: Wed, 26 Feb 2025 10:42:04 -0800 Subject: [PATCH 215/431] samples/damon: a typo in the kconfig - sameple There is a typo in the Kconfig file of the damon sample module. Correct it: s/sameple/sample/ Link: https://lkml.kernel.org/r/20250226184204.29370-1-sj@kernel.org Signed-off-by: Seongjun Kim Signed-off-by: SeongJae Park Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- samples/damon/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig index 63f6dcd71daa..564c49ed69a2 100644 --- a/samples/damon/Kconfig +++ b/samples/damon/Kconfig @@ -3,7 +3,7 @@ menu "DAMON Samples" config SAMPLE_DAMON_WSSE - bool "DAMON sameple module for working set size estimation" + bool "DAMON sample module for working set size estimation" depends on DAMON && DAMON_VADDR help This builds DAMON sample module for working set size estimation. @@ -15,7 +15,7 @@ config SAMPLE_DAMON_WSSE If unsure, say N. config SAMPLE_DAMON_PRCL - bool "DAMON sameple module for access-aware proactive reclamation" + bool "DAMON sample module for access-aware proactive reclamation" depends on DAMON && DAMON_VADDR help This builds DAMON sample module for access-aware proactive From 173a3dc051bda746e284ff3933fcf6771d7247b8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 26 Feb 2025 15:36:12 +0000 Subject: [PATCH 216/431] mm: assert the folio is locked in folio_start_writeback() The folio must be locked when we start writeback in order to prevent writeback from being started twice on the same folio. I don't expect this to catch any problems, but it should be good documentation. Link: https://lkml.kernel.org/r/20250226153614.3774896-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: "Darrick J. Wong" Signed-off-by: Andrew Morton --- mm/page-writeback.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index eb55ece39c56..8b325aa525eb 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -3109,6 +3109,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) int access_ret; VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); From 66add5e9093b4b4112aebacbc91d43ae4e030456 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 26 Feb 2025 14:22:53 +0100 Subject: [PATCH 217/431] lib/test_hmm: make dmirror_atomic_map() consume a single page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: cleanups for device-exclusive entries (hmm)", v2. Some smaller device-exclusive cleanups I have lying around. This patch (of 5): The caller now always passes a single page; let's simplify, and return "0" on success. Link: https://lkml.kernel.org/r/20250226132257.2826043-1-david@redhat.com Link: https://lkml.kernel.org/r/20250226132257.2826043-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Alistair Popple Cc: Jason Gunthorpe Cc: Jérôme Glisse Signed-off-by: Andrew Morton --- lib/test_hmm.c | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index e4afca8d1880..39a2286f8592 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -706,34 +706,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start, return 0; } -static int dmirror_atomic_map(unsigned long start, unsigned long end, - struct page **pages, struct dmirror *dmirror) +static int dmirror_atomic_map(unsigned long addr, struct page *page, + struct dmirror *dmirror) { - unsigned long pfn, mapped = 0; - int i; + void *entry; /* Map the migrated pages into the device's page tables. */ mutex_lock(&dmirror->mutex); - for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) { - void *entry; - - if (!pages[i]) - continue; - - entry = pages[i]; - entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC); - entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); - if (xa_is_err(entry)) { - mutex_unlock(&dmirror->mutex); - return xa_err(entry); - } - - mapped++; + entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC); + entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC); + if (xa_is_err(entry)) { + mutex_unlock(&dmirror->mutex); + return xa_err(entry); } mutex_unlock(&dmirror->mutex); - return mapped; + return 0; } static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, @@ -803,8 +792,7 @@ static int dmirror_exclusive(struct dmirror *dmirror, break; } - ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror); - ret = ret == 1 ? 0 : -EBUSY; + ret = dmirror_atomic_map(addr, page, dmirror); folio_unlock(folio); folio_put(folio); } From db0f6e674c2b61ff9d8880e7ae5ed11681fe9651 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 26 Feb 2025 14:22:54 +0100 Subject: [PATCH 218/431] mm/memory: remove PageAnonExclusive sanity-check in restore_exclusive_pte() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit b832a354d787 ("mm/memory: page_add_anon_rmap() -> folio_add_anon_rmap_pte()") we accidentally changed the sanity check to essentially ignore anonymous folio by mis-placing the "!" ... but we really always only get anonymous folios in restore_exclusive_pte(). However, in the meantime we removed the separate "writable device-exclusive entries" and always detect if the PTE can be writable using can_change_pte_writable() -- which also consults PageAnonExclusive. So let's just get rid of this sanity check completely. Link: https://lkml.kernel.org/r/20250226132257.2826043-3-david@redhat.com Signed-off-by: David Hildenbrand Cc: Alistair Popple Cc: Jason Gunthorpe Cc: Jérôme Glisse Signed-off-by: Andrew Morton --- mm/memory.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 270c9357475d..b207e3175392 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -738,9 +738,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, pte = pte_mkdirty(pte); pte = pte_mkwrite(pte, vma); } - - VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && - PageAnonExclusive(page)), folio); set_pte_at(vma->vm_mm, address, ptep, pte); /* From 248624f9c6b454c352fd7a95921706e489ae990f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 26 Feb 2025 14:22:55 +0100 Subject: [PATCH 219/431] mm/memory: pass folio and pte to restore_exclusive_pte() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's pass the folio and the pte to restore_exclusive_pte(), so we can avoid repeated page_folio() and ptep_get(). To do that, pass the pte to try_restore_exclusive_pte() and use a folio in there already. While at it, just avoid the "swp_entry_t entry" variable in try_restore_exclusive_pte() and add a folio-locked check to restore_exclusive_pte(). Link: https://lkml.kernel.org/r/20250226132257.2826043-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Alistair Popple Cc: Jason Gunthorpe Cc: Jérôme Glisse Signed-off-by: Andrew Morton --- mm/memory.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index b207e3175392..7b0317d07d4f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -717,14 +717,13 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, #endif static void restore_exclusive_pte(struct vm_area_struct *vma, - struct page *page, unsigned long address, - pte_t *ptep) + struct folio *folio, struct page *page, unsigned long address, + pte_t *ptep, pte_t orig_pte) { - struct folio *folio = page_folio(page); - pte_t orig_pte; pte_t pte; - orig_pte = ptep_get(ptep); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); @@ -751,16 +750,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, * Tries to restore an exclusive pte if the page lock can be acquired without * sleeping. */ -static int -try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, - unsigned long addr) +static int try_restore_exclusive_pte(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t orig_pte) { - swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); - struct page *page = pfn_swap_entry_to_page(entry); + struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte)); + struct folio *folio = page_folio(page); - if (trylock_page(page)) { - restore_exclusive_pte(vma, page, addr, src_pte); - unlock_page(page); + if (folio_trylock(folio)) { + restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte); + folio_unlock(folio); return 0; } @@ -866,7 +864,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * (ie. COW) mappings. */ VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); - if (try_restore_exclusive_pte(src_pte, src_vma, addr)) + if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) return -EBUSY; return -ENOENT; } else if (is_pte_marker_entry(entry)) { @@ -3987,7 +3985,8 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) - restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); + restore_exclusive_pte(vma, folio, vmf->page, vmf->address, + vmf->pte, vmf->orig_pte); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); From 2f95381f8a4cb1fd19ed67523d4bc98d8a117ec1 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 26 Feb 2025 14:22:56 +0100 Subject: [PATCH 220/431] mm/memory: document restore_exclusive_pte() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's document how this function is to be used, and why the folio lock is involved. Link: https://lkml.kernel.org/r/20250226132257.2826043-5-david@redhat.com Signed-off-by: David Hildenbrand Cc: Alistair Popple Cc: Jason Gunthorpe Cc: Jérôme Glisse Signed-off-by: Andrew Morton --- mm/memory.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 7b0317d07d4f..5b475d31ce41 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -716,6 +716,32 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, } #endif +/** + * restore_exclusive_pte - Restore a device-exclusive entry + * @vma: VMA covering @address + * @folio: the mapped folio + * @page: the mapped folio page + * @address: the virtual address + * @ptep: pte pointer into the locked page table mapping the folio page + * @orig_pte: pte value at @ptep + * + * Restore a device-exclusive non-swap entry to an ordinary present pte. + * + * The folio and the page table must be locked, and MMU notifiers must have + * been called to invalidate any (exclusive) device mappings. + * + * Locking the folio makes sure that anybody who just converted the pte to + * a device-exclusive entry can map it into the device to make forward + * progress without others converting it back until the folio was unlocked. + * + * If the folio lock ever becomes an issue, we can stop relying on the folio + * lock; it might make some scenarios with heavy thrashing less likely to + * make forward progress, but these scenarios might not be valid use cases. + * + * Note that the folio lock does not protect against all cases of concurrent + * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers + * must use MMU notifiers to sync against any concurrent changes. + */ static void restore_exclusive_pte(struct vm_area_struct *vma, struct folio *folio, struct page *page, unsigned long address, pte_t *ptep, pte_t orig_pte) From 720ba85040a6549674e5599685ca7df86a902448 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 26 Feb 2025 14:22:57 +0100 Subject: [PATCH 221/431] mm/mmu_notifier: use MMU_NOTIFY_CLEAR in remove_device_exclusive_entry() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's limit the use of MMU_NOTIFY_EXCLUSIVE to the case where we convert a present PTE to device-exclusive. For the other case, we can simply use MMU_NOTIFY_CLEAR, because it really is clearing the device-exclusive entry first, to then install the present entry. Update the documentation of MMU_NOTIFY_EXCLUSIVE, to document the single use case more thoroughly. If ever required, we could add a separate MMU_NOTIFY_CLEAR_EXCLUSIVE; for now using MMU_NOTIFY_CLEAR seems to be sufficient. Link: https://lkml.kernel.org/r/20250226132257.2826043-6-david@redhat.com Signed-off-by: David Hildenbrand Cc: Alistair Popple Cc: Jason Gunthorpe Cc: Jérôme Glisse Signed-off-by: Andrew Morton --- include/linux/mmu_notifier.h | 8 ++++---- mm/memory.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index d4e714661826..bc2402a45741 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -43,10 +43,10 @@ struct mmu_interval_notifier; * a device driver to possibly ignore the invalidation if the * owner field matches the driver's device private pgmap owner. * - * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no - * longer have exclusive access to the page. When sent during creation of an - * exclusive range the owner will be initialised to the value provided by the - * caller of make_device_exclusive(), otherwise the owner will be NULL. + * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive. + * The owner is initialized to the value provided by the caller of + * make_device_exclusive(), such that this caller can filter out these + * events. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, diff --git a/mm/memory.c b/mm/memory.c index 5b475d31ce41..4c12a05fabd9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4003,7 +4003,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) folio_put(folio); return ret; } - mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); mmu_notifier_invalidate_range_start(&range); From 9a4f9e2a81d1d8d159110e135dddd708266241f1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 26 Feb 2025 17:54:00 +0530 Subject: [PATCH 222/431] configs: drop GENERIC_PTDUMP from debug.config Patch series "mm: Rework generic PTDUMP configs", v3. The series reworks generic PTDUMP configs before eventually renaming them after some basic cleanups first. This patch (of 5): The platforms that support GENERIC_PTDUMP select the config explicitly. But enabling this feature on platforms that don't really support - does nothing or might cause a build failure. Hence just drop GENERIC_PTDUMP from generic debug.config Link: https://lkml.kernel.org/r/20250226122404.1927473-1-anshuman.khandual@arm.com Link: https://lkml.kernel.org/r/20250226122404.1927473-2-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Steven Price Cc: Christophe Leroy Cc: Mark Rutland Cc: Catalin Marinas Cc: Heiko Carstens Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Madhavan Srinivasan Cc: Marc Zyngier Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton --- kernel/configs/debug.config | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config index 20552f163930..8aafd050b754 100644 --- a/kernel/configs/debug.config +++ b/kernel/configs/debug.config @@ -73,7 +73,6 @@ CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_VM_RB=y CONFIG_DEBUG_VM_VMACACHE=y -CONFIG_GENERIC_PTDUMP=y CONFIG_KASAN=y CONFIG_KASAN_GENERIC=y CONFIG_KASAN_INLINE=y From 2c5e6ac2db64ace51f66a9f3b3b3ab9553d748e8 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 26 Feb 2025 17:54:01 +0530 Subject: [PATCH 223/431] arch/powerpc: drop GENERIC_PTDUMP from mpc885_ads_defconfig GENERIC_PTDUMP gets selected on powerpc explicitly and hence can be dropped off from mpc885_ads_defconfig. Replace with CONFIG_PTDUMP_DEBUGFS instead. Link: https://lkml.kernel.org/r/20250226122404.1927473-3-anshuman.khandual@arm.com Fixes: e084728393a5 ("powerpc/ptdump: Convert powerpc to GENERIC_PTDUMP") Signed-off-by: Anshuman Khandual Suggested-by: Christophe Leroy Reviewed-by: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Catalin Marinas Cc: Heiko Carstens Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Marc Zyngier Cc: Mark Rutland Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Steven Price Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/powerpc/configs/mpc885_ads_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 77306be62e9e..129355f87f80 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y From a5c96dfd47d88658ac9cdece96e98c2ef17ab465 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 26 Feb 2025 17:54:02 +0530 Subject: [PATCH 224/431] docs: arm64: drop PTDUMP config options from ptdump.rst Both GENERIC_PTDUMP and PTDUMP_CORE are not user selectable config options. Just drop these from documentation. Link: https://lkml.kernel.org/r/20250226122404.1927473-4-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Suggested-by: Steven Price Acked-by: Catalin Marinas Cc: Will Deacon Cc: Jonathan Corbet Cc: Christophe Leroy Cc: Heiko Carstens Cc: Ingo Molnar Cc: Madhavan Srinivasan Cc: Marc Zyngier Cc: Mark Rutland Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Thomas Gleixner Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- Documentation/arch/arm64/ptdump.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/Documentation/arch/arm64/ptdump.rst b/Documentation/arch/arm64/ptdump.rst index 5dcfc5d7cddf..51eb902ba41a 100644 --- a/Documentation/arch/arm64/ptdump.rst +++ b/Documentation/arch/arm64/ptdump.rst @@ -22,8 +22,6 @@ offlining of memory being accessed by the ptdump code. In order to dump the kernel page tables, enable the following configurations and mount debugfs:: - CONFIG_GENERIC_PTDUMP=y - CONFIG_PTDUMP_CORE=y CONFIG_PTDUMP_DEBUGFS=y mount -t debugfs nodev /sys/kernel/debug From 3f54872454a927a2b5f9fb3e2d3cdbd51b3666b7 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 26 Feb 2025 17:54:03 +0530 Subject: [PATCH 225/431] mm: make DEBUG_WX depdendent on GENERIC_PTDUMP DEBUG_WX selects PTDUMP_CORE without even ensuring that the given platform implements GENERIC_PTDUMP. This problem has been latent until now, as all the platforms subscribing ARCH_HAS_DEBUG_WX also subscribe GENERIC_PTDUMP. Link: https://lkml.kernel.org/r/20250226122404.1927473-5-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Steven Price Reviewed-by: Christophe Leroy Cc: Catalin Marinas Cc: Heiko Carstens Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Madhavan Srinivasan Cc: Marc Zyngier Cc: Mark Rutland Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 41a58536531d..a51a1149909a 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -186,6 +186,7 @@ config ARCH_HAS_DEBUG_WX config DEBUG_WX bool "Warn on W+X mappings at boot" depends on ARCH_HAS_DEBUG_WX + depends on GENERIC_PTDUMP depends on MMU select PTDUMP_CORE help From f9aad622006bd64c28fdf73c03a1c5139fcbf049 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 26 Feb 2025 17:54:04 +0530 Subject: [PATCH 226/431] mm: rename GENERIC_PTDUMP and PTDUMP_CORE Platforms subscribe into generic ptdump implementation via GENERIC_PTDUMP. But generic ptdump gets enabled via PTDUMP_CORE. These configs combination is confusing as they sound very similar and does not differentiate between platform's feature subscription and feature enablement for ptdump. Rename the configs as ARCH_HAS_PTDUMP and PTDUMP making it more clear and improve readability. Link: https://lkml.kernel.org/r/20250226122404.1927473-6-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christophe Leroy (powerpc) Acked-by: Catalin Marinas [arm64] Cc: Will Deacon Cc: Jonathan Corbet Cc: Marc Zyngier Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Mark Rutland Cc: Steven Price Signed-off-by: Andrew Morton --- arch/arm64/Kconfig | 2 +- arch/arm64/include/asm/ptdump.h | 4 ++-- arch/arm64/kvm/Kconfig | 4 ++-- arch/arm64/mm/Makefile | 2 +- arch/powerpc/Kconfig | 2 +- arch/powerpc/mm/Makefile | 2 +- arch/riscv/Kconfig | 2 +- arch/riscv/mm/Makefile | 2 +- arch/s390/Kconfig | 2 +- arch/s390/mm/Makefile | 2 +- arch/x86/Kconfig | 2 +- arch/x86/Kconfig.debug | 2 +- arch/x86/mm/Makefile | 2 +- mm/Kconfig.debug | 12 ++++++------ mm/Makefile | 2 +- 15 files changed, 22 insertions(+), 22 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 940343beb3d4..5cf688ee01b7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -41,6 +41,7 @@ config ARM64 select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_HW_PTE_YOUNG @@ -157,7 +158,6 @@ config ARM64 select GENERIC_IRQ_SHOW_LEVEL select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP - select GENERIC_PTDUMP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 6cf4aae05219..b2931d1ae0fb 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -7,7 +7,7 @@ #include -#ifdef CONFIG_PTDUMP_CORE +#ifdef CONFIG_PTDUMP #include #include @@ -70,6 +70,6 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info, #else static inline void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) { } -#endif /* CONFIG_PTDUMP_CORE */ +#endif /* CONFIG_PTDUMP */ #endif /* __ASM_PTDUMP_H */ diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index ead632ad01b4..096e45acadb2 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -71,8 +71,8 @@ config PTDUMP_STAGE2_DEBUGFS depends on KVM depends on DEBUG_KERNEL depends on DEBUG_FS - depends on GENERIC_PTDUMP - select PTDUMP_CORE + depends on ARCH_HAS_PTDUMP + select PTDUMP default n help Say Y here if you want to show the stage-2 kernel pagetables diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index fc92170a8f37..c26489cf96cd 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -5,7 +5,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ context.o proc.o pageattr.o fixmap.o obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 424f188e62d9..6f1ae41dcf85 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -148,6 +148,7 @@ config PPC select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API select ARCH_HAS_PREEMPT_LAZY + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 @@ -206,7 +207,6 @@ config PPC select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP if PCI - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_VDSO_TIME_NS diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 0fe2f085c05a..8c1582b2987d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -15,5 +15,5 @@ obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump/ +obj-$(CONFIG_PTDUMP) += ptdump/ obj-$(CONFIG_KASAN) += kasan/ diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7612c52e9b1e..353cf41d01f4 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -43,6 +43,7 @@ config RISCV select ARCH_HAS_PMEM_API select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PREPARE_SYNC_CORE_CMD + select ARCH_HAS_PTDUMP if MMU select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU @@ -112,7 +113,6 @@ config RISCV select GENERIC_IRQ_SHOW_LEVEL select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP - select GENERIC_PTDUMP if MMU select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL if MMU && 64BIT diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index cbe4d775ef56..b916a68d324a 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -19,7 +19,7 @@ obj-y += context.o obj-y += pmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_KASAN) += kasan_init.o ifdef CONFIG_KASAN diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9c9ec08d78c7..dd9dd2f8e673 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -96,6 +96,7 @@ config S390 select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_PREEMPT_LAZY + select ARCH_HAS_PTDUMP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME select ARCH_HAS_SET_DIRECT_MAP @@ -163,7 +164,6 @@ config S390 select GENERIC_CPU_VULNERABILITIES select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_VDSO_TIME_NS diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index f6c2db7a8669..9726b91fe7e4 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -9,6 +9,6 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o +obj-$(CONFIG_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PGSTE) += gmap.o obj-$(CONFIG_PFAULT) += pfault.o diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cf49c130d1d0..bfd23a09b911 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -26,6 +26,7 @@ config X86_64 depends on 64BIT # Options that are inherently 64-bit kernel only: select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_PTDUMP select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE @@ -175,7 +176,6 @@ config X86 select GENERIC_IRQ_RESERVATION_MODE select GENERIC_IRQ_SHOW select GENERIC_PENDING_IRQ if SMP - select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select GENERIC_GETTIMEOFDAY diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 1eb4d23cdaae..c95c3aaadf97 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -59,7 +59,7 @@ config EARLY_PRINTK_USB_XDBC config EFI_PGT_DUMP bool "Dump the EFI pagetable" depends on EFI - select PTDUMP_CORE + select PTDUMP help Enable this if you want to dump the EFI page table before enabling virtual mode. This can be used to debug miscellaneous diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 690fbf48e853..e0c99a8760ca 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -39,7 +39,7 @@ CFLAGS_fault.o := -I $(src)/../include/asm/trace obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o +obj-$(CONFIG_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index a51a1149909a..32b65073d0cc 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -186,9 +186,9 @@ config ARCH_HAS_DEBUG_WX config DEBUG_WX bool "Warn on W+X mappings at boot" depends on ARCH_HAS_DEBUG_WX - depends on GENERIC_PTDUMP + depends on ARCH_HAS_PTDUMP depends on MMU - select PTDUMP_CORE + select PTDUMP help Generate a warning if any W+X mappings are found at boot. @@ -213,18 +213,18 @@ config DEBUG_WX If in doubt, say "Y". -config GENERIC_PTDUMP +config ARCH_HAS_PTDUMP bool -config PTDUMP_CORE +config PTDUMP bool config PTDUMP_DEBUGFS bool "Export kernel pagetable layout to userspace via debugfs" depends on DEBUG_KERNEL depends on DEBUG_FS - depends on GENERIC_PTDUMP - select PTDUMP_CORE + depends on ARCH_HAS_PTDUMP + select PTDUMP help Say Y here if you want to show the kernel pagetable layout in a debugfs file. This information is only useful for kernel developers diff --git a/mm/Makefile b/mm/Makefile index 84b1127e43a5..e7f6bbf8ae5f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -139,7 +139,7 @@ obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_HMM_MIRROR) += hmm.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o -obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP) += ptdump.o obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o obj-$(CONFIG_IO_MAPPING) += io-mapping.o obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o From b9585a3f3e0b30b3b60c85dc39f27ed3b06fb623 Mon Sep 17 00:00:00 2001 From: Zeng Jingxiang Date: Thu, 27 Feb 2025 16:22:23 +0800 Subject: [PATCH 227/431] mm/list_lru: make the case where mlru is NULL as unlikely In the following memcg_list_lru_alloc() function, mlru here is almost always NULL, so in most cases this should save a function call, mark mlru as unlikely to optimize the code, and reusing the mlru for the next attempt when the tree insertion fails. do { xas_lock_irqsave(&xas, flags); if (!xas_load(&xas) && !css_is_dying(&pos->css)) { xas_store(&xas, mlru); if (!xas_error(&xas)) mlru = NULL; } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, GFP_KERNEL)); > if (mlru) kfree(mlru); Link: https://lkml.kernel.org/r/20250227082223.1173847-1-jingxiangzeng.cas@gmail.com Signed-off-by: Zeng Jingxiang Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/ Suggested-by: Johannes Weiner Reviewed-by: Muchun Song Acked-by: Johannes Weiner Acked-by: Shakeel Butt Cc: Chengming Zhou Cc: Jingxiang Zeng Cc: Kairui Song Cc: Michal Hocko Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/list_lru.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/mm/list_lru.c b/mm/list_lru.c index 7d69434c70e0..490473af3122 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp) { unsigned long flags; - struct list_lru_memcg *mlru; + struct list_lru_memcg *mlru = NULL; struct mem_cgroup *pos, *parent; XA_STATE(xas, &lru->xa, 0); @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, parent = parent_mem_cgroup(pos); } - mlru = memcg_init_list_lru_one(lru, gfp); - if (!mlru) - return -ENOMEM; + if (!mlru) { + mlru = memcg_init_list_lru_one(lru, gfp); + if (!mlru) + return -ENOMEM; + } xas_set(&xas, pos->kmemcg_id); do { xas_lock_irqsave(&xas, flags); @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, gfp)); - if (mlru) - kfree(mlru); } while (pos != memcg && !css_is_dying(&pos->css)); + if (unlikely(mlru)) + kfree(mlru); + return xas_error(&xas); } #else From 1eb3471bf5749ff3769ec52723bd9b8d773c7a62 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:19 -0800 Subject: [PATCH 228/431] mm/damon: add data structure for monitoring intervals auto-tuning Patch series "mm/damon: auto-tune aggregation interval". DAMON requires time-consuming and repetitive aggregation interval tuning. Introduce a feature for automating it using a feedback loop that aims an amount of observed access events, like auto-exposing cameras. Background: Access Frequency Monitoring and Aggregation Interval ================================================================ DAMON checks if each memory element (damon_region) is accessed or not for every user-specified time interval called 'sampling interval'. It aggregates the check intervals on per-element counter called 'nr_accesses'. DAMON users can read the counters to get the access temperature of a given element. The counters are reset for every another user-specified time interval called 'aggregation interval'. This can be illustrated as DAMON continuously capturing a snapshot of access events that happen and captured within the last aggregation interval. This implies the aggregation interval plays a key role for the quality of the snapshots, like the camera exposure time. If it is too short, the amount of access events that happened and captured for each snapshot is small, so each snapshot will show no many interesting things but just a cold and dark world with hopefuly one pale blue dot or two. If it is too long, too many events are aggregated in a single shot, so each snapshot will look like world of flames, or Muspellheim. It will be difficult to find practical insights in both cases. Problem: Time Consuming and Repetitive Tuning ============================================= The appropriate length of the aggregation interval depends on how frequently the system and workloads are making access events that DAMON can observe. Hence, users have to tune the interval with excessive amount of tests with the target system and workloads. If the system and workloads are changed, the tuning should be done again. If the characteristic of the workloads is dynamic, it becomes more challenging. It is therefore time-consuming and repetitive. The tuning challenge mainly stems from the wrong question. It is not asking users what quality of monitoring results they want, but how DAMON should operate for their hidden goal. To make the right answer, users need to fully understand DAMON's mechanisms and the characteristics of their workloads. Users shouldn't be asked to understand the underlying mechanism. Understanding the characteristics of the workloads shouldn't be the role of users but DAMON. Aim-oriented Feedback-driven Auto-Tuning ========================================= Fortunately, the appropriate length of the aggregation interval can be inferred using a feedback loop. If the current snapshots are showing no much intresting information, in other words, if it shows only rare access events, increasing the aggregation interval helps, and vice versa. We tested this theory on a few real-world workloads, and documented one of the experience with an official DAMON monitoring intervals tuning guideline. Since it is a simple theory that requires repeatable tries, it can be a good job for machines. Based on the guideline's theory, we design an automation of aggregation interval tuning, in a way similar to that of camera auto-exposure feature. It defines the amount of interesting information as the ratio of DAMON-observed access events that DAMON actually observed to theoretical maximum amount of it within each snapshot. Events are accounted in byte and sampling attempts granularity. For example, let's say there is a region of 'X' bytes size. DAMON tried access check smapling for the region 'Y' times in total for a given aggregation. Among the 'Y' attempts, 'Z' times it shown positive results. Then, the theoritical maximum number of access events for the region is 'X * Y'. And the number of access events that DAMON has observed for the region is 'X * Z'. The abount of the interesting information is '(X * Z / X * Y)'. Note that each snapshot would have multiple regions. Users can set an arbitrary value of the ratio as their target. Once the target is set, the automation periodically measures the current value of the ratio and increase or decrease the aggregation interval if the ratio value is lower or higher than the target. The amount of the change is proportion to the distance between the current adn the target values. To avoid auto-tuning goes too long way, let users set the minimum and the maximum aggregation interval times. Changing only aggregation interval while sampling interval is kept makes the maximum level of access frequency in each snapshot, or discernment of regions inconsistent. Also, unnecessarily short sampling interval causes meaningless monitoring overhed. The automation therefore adjusts the sampling interval together with aggregation interval, while keeping the ratio between the two intervals. Users can set the ratio, or the discernment. Discussion ========== The modified question (aimed amount of access events, or lights, in each snapshot) is easy to answer by both the users and the kernel. If users are interested in finding more cold regions, the value should be lower, and vice versa. If users have no idea, kernel can suggest a fair default value based on some theories and experiments. For example, based on the Pareto principle (80/20 rule), we could expect 20% target ratio will capture 80% of real access events. Since 80% might be too high, applying the rule once again, 4% (20% * 20%) may capture about 56% (80% * 80%) of real access events. Sampling to aggregation intervals ratio and min/max aggregation intervals are also arguably easy to answer. What users want is discernment of regions for efficient system operation, for examples, X amount of colder regions or Y amount of warmer regions, not exactly how many times each cache line is accessed in nanoseconds degree. The appropriate min/max aggregation interval can relatively naively set, and may better to set for aimed monitoring overhead. Since sampling interval is directly deciding the overhead, setting it based on the sampling interval can be easy. With my experiences, I'd argue the intervals ratio 0.05, and 5 milliseconds to 20 seconds sampling interval range (100 milliseconds to 400 seconds aggregation interval) can be a good default suggestion. Evaluation ========== On a machine running a real world server workload, I ran DAMON to monitor its physical address space for about 23 hours, with this feature turned on. We set it to tune sampling interval in a range from 5 milliseconds to 10 seconds, aiming 4 % DAMON-observed access ratio per three aggregation intervals. The exact command I used is as below. damo start --monitoring_intervals_goal 4% 3 5ms 10s --damos_action stat During the test run, DAMON continuously updated sampling and aggregation intervals as designed, within the given range. For all the time, DAMON was able to find the intervals that meets the target access events ratio in the given intervals range (sampling interval between 5 milliseconds and 10 seconds). For most of the time, tuned sampling interval was converged in 300-400 milliseconds. It made only small amount of changes within the range. The average of the tuned sampling interval during the test was about 380 milliseconds. The workload periodically gets less load and decreases its CPU usage. Presumably this also caused it making less memory access events. Reactively to such event,s DAMON also increased the intervals as expected. It was still able to find the optimum interval that satisfying the target access ratio within the given intervals range. Usually it was converged to about 5 seconds. Once the workload gets normal amount of load again, DAMON reactively reduced the intervals to the normal range. I collected and visualized DAMON's monitoring results on the server a few times. Every time the visualized access pattern looked not biased to only cold or hot pages but diverse and balanced. Let me show some of the snapshots that I collected at the nearly end of the test (after about 23 hours have passed since starting DAMON on the server). The recency histogram looks as below. Please note that this visualization shows only a very coarse grained information. For more details about the visualization format, please refer to DAMON user-space tool documentation[1]. # ./damo report access --style recency-sz-hist --tried_regions_of 0 0 0 --access_rate 0 0 [-19 h 7 m 45.514 s, -17 h 12 m 58.963 s) 6.198 GiB |**** | [-17 h 12 m 58.963 s, -15 h 18 m 12.412 s) 0 B | | [-15 h 18 m 12.412 s, -13 h 23 m 25.860 s) 0 B | | [-13 h 23 m 25.860 s, -11 h 28 m 39.309 s) 0 B | | [-11 h 28 m 39.309 s, -9 h 33 m 52.757 s) 0 B | | [-9 h 33 m 52.757 s, -7 h 39 m 6.206 s) 0 B | | [-7 h 39 m 6.206 s, -5 h 44 m 19.654 s) 0 B | | [-5 h 44 m 19.654 s, -3 h 49 m 33.103 s) 0 B | | [-3 h 49 m 33.103 s, -1 h 54 m 46.551 s) 0 B | | [-1 h 54 m 46.551 s, -0 ns) 16.967 GiB |********* | [-0 ns, --6886551440000 ns) 38.835 GiB |********************| memory bw estimate: 9.425 GiB per second total size: 62.000 GiB It shows about 38 GiB of memory was accessed at least once within last aggregation interval (given ~300 milliseconds tuned sampling interval, this is about six seconds). This is about 61 % of the total memory. In other words, DAMON found warmest 61 % memory of the system. The number is particularly interesting given our Pareto principle based theory for the tuning goal value. We set it as 20 % of 20 % (4 %), thinking it would capture 80 % of 80 % (64 %) real access events. And it foudn 61 % hot memory, or working set. Nevertheless, to make the theory clearer, much more discussion and tests would be needed. At the moment, nonetheless, we can say making the target value higher helps finding more hot memory regions. The histogram also shows an amount of cold memory. About 17 GiB memory of the system has not accessed at least for last aggregation interval (about six seconds), and at most for about last two hours. The real longest unaccessed time of the 17 GiB memory was about 19 minutes, though. This is a limitation of this visualization format. It further found very cold 6 GiB memory. It has not accessed at least for last 17 hours and at most 19 hours. What about hot memory distribution? To see this, I capture and visualize the snapshot in access temperature histogram. Again, please refer to the DAMON user-space tool documentation[1] for the format and what access temperature mean. Both the visualization and metric shows only very coarse grained and limited information. The resulting histogram look like below. # ./damo report access --style temperature-sz-hist --tried_regions_of 0 0 0 [-6,840,763,776,000, -5,501,580,939,800) 6.198 GiB |*** | [-5,501,580,939,800, -4,162,398,103,600) 0 B | | [-4,162,398,103,600, -2,823,215,267,400) 0 B | | [-2,823,215,267,400, -1,484,032,431,200) 0 B | | [-1,484,032,431,200, -144,849,595,000) 0 B | | [-144,849,595,000, 1,194,333,241,200) 55.802 GiB |********************| [1,194,333,241,200, 2,533,516,077,400) 4.000 KiB |* | [2,533,516,077,400, 3,872,698,913,600) 4.000 KiB |* | [3,872,698,913,600, 5,211,881,749,800) 8.000 KiB |* | [5,211,881,749,800, 6,551,064,586,000) 12.000 KiB |* | [6,551,064,586,000, 7,890,247,422,200) 4.000 KiB |* | memory bw estimate: 5.178 GiB per second total size: 62.000 GiB We can see most of the memory is in similar access temperature range, and definitely some pages are extremely hot. To see the picture in more detail, let's capture and visualize the snapshot per DAMON-region, sorted by their access temperature. The total number of the regions was about 300. Due to the limited space, I'm showing only a few parts of the output here. # ./damo report access --style hot --tried_regions_of 0 0 0 heatmap: 00000000888888889999999888888888888888888888888888888888888888888888888888888888 # min/max temperatures: -6,827,258,184,000, 17,589,052,500, column size: 793.600 MiB |999999999999999999999999999999999999999| 4.000 KiB access 100 % 18 h 9 m 43.918 s |999999999999999999999999999999999999999| 8.000 KiB access 100 % 17 h 56 m 5.351 s |999999999999999999999999999999999999999| 4.000 KiB access 100 % 15 h 24 m 19.634 s |999999999999999999999999999999999999999| 4.000 KiB access 100 % 14 h 10 m 55.606 s |999999999999999999999999999999999999999| 4.000 KiB access 100 % 11 h 34 m 18.993 s [...] |99999999999999999999999999999| 8.000 KiB access 100 % 1 m 27.945 s |11111111111111111111111111111| 80.000 KiB access 15 % 1 m 21.180 s |00000000000000000000000000000| 24.000 KiB access 5 % 1 m 21.180 s |00000000000000000000000000000| 5.919 GiB access 10 % 1 m 14.415 s |99999999999999999999999999999| 12.000 KiB access 100 % 1 m 7.650 s [...] |0| 4.000 KiB access 5 % 0 ns |0| 12.000 KiB access 5 % 0 ns |0| 188.000 KiB access 0 % 0 ns |0| 24.000 KiB access 0 % 0 ns |0| 48.000 KiB access 0 % 0 ns [...] |0000000000000000000000000000000| 8.000 KiB access 0 % 6 m 45.901 s |00000000000000000000000000000000| 36.000 KiB access 0 % 7 m 26.491 s |00000000000000000000000000000000| 4.000 KiB access 0 % 12 m 37.682 s |000000000000000000000000000000000| 8.000 KiB access 0 % 18 m 9.168 s |000000000000000000000000000000000| 16.000 KiB access 0 % 19 m 3.288 s |0000000000000000000000000000000000000000| 6.198 GiB access 0 % 18 h 57 m 52.582 s memory bw estimate: 8.798 GiB per second total size: 62.000 GiB We can see DAMON found small and extremely hot regions that accessed for all access check sampling (once per about 300 milliseconds) for more than 10 hours. The access temperature rapidly decreases. DAMON was also able to find small and big regions that not accessed for up to about 19 minutes. It even found an outlier cold region of 6 GiB that not accessed for about 19 hours. It is unclear what the outlier region is, as of this writing. For the testing, DAMON was consuming about 0.1% of single CPU time. This is again expected results, since DAMON was using about 370 milliseconds sampling interval in most case. # ps -p $kdamond_pid -o %cpu %CPU 0.1 I also ran similar tests against kernel build workload and an in-memory cache workload benchmark[2]. Detialed results including tuned intervals and captured access pattern were of course different sicne those depend on the workloads. But the auto-tuning feature was always working as expected like the above results for the real world workload. To wrap up, with intervals auto-tuning feature, DAMON was able to capture access pattern snapshots of a quality on a real world server workload. The auto-tuning feature was able to adaptively react to the dynamic access patterns of the workload and reliably provide consistent monitoring results without manual human interventions. Also, the auto-tuning made DAMON consumes only necessary amount of resource for the required quality. References ========== [1] https://github.com/damonitor/damo/blob/next/USAGE.md#access-report-styles [2] https://github.com/facebookresearch/DCPerf/blob/main/packages/tao_bench/README.md This patch (of 8): Add data structures for DAMON sampling and aggregation intervals automatic tuning that aims specific amount of DAMON-observed access events per snapshot. In more detail, define the data structure for the tuning goal, link it to the monitoring attributes data structure so that DAMON kernel API callers can make the request, and update parameters setup DAMON function to respect the new parameter. Link: https://lkml.kernel.org/r/20250303221726.484227-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250303221726.484227-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 27 +++++++++++++++++++++++++++ mm/damon/core.c | 22 ++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 242910b190c9..5f2609f24761 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -659,12 +659,38 @@ struct damon_call_control { bool canceled; }; +/** + * struct damon_intervals_goal - Monitoring intervals auto-tuning goal. + * + * @access_bp: Access events observation ratio to achieve in bp. + * @aggrs: Number of aggregations to acheive @access_bp within. + * @min_sample_us: Minimum resulting sampling interval in microseconds. + * @max_sample_us: Maximum resulting sampling interval in microseconds. + * + * DAMON automatically tunes &damon_attrs->sample_interval and + * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of + * DAMON-observed access events to theoretical maximum amount within @aggrs + * aggregations be same to @access_bp. The logic increases + * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same + * ratio if the current access events observation ratio is lower than the + * target for each @aggrs aggregations, and vice versa. + * + * If @aggrs is zero, the tuning is disabled and hence this struct is ignored. + */ +struct damon_intervals_goal { + unsigned long access_bp; + unsigned long aggrs; + unsigned long min_sample_us; + unsigned long max_sample_us; +}; + /** * struct damon_attrs - Monitoring attributes for accuracy/overhead control. * * @sample_interval: The time between access samplings. * @aggr_interval: The time between monitor results aggregations. * @ops_update_interval: The time between monitoring operations updates. + * @intervals_goal: Intervals auto-tuning goal. * @min_nr_regions: The minimum number of adaptive monitoring * regions. * @max_nr_regions: The maximum number of adaptive monitoring @@ -684,6 +710,7 @@ struct damon_attrs { unsigned long sample_interval; unsigned long aggr_interval; unsigned long ops_update_interval; + struct damon_intervals_goal intervals_goal; unsigned long min_nr_regions; unsigned long max_nr_regions; }; diff --git a/mm/damon/core.c b/mm/damon/core.c index b1ce072b56f2..ad3b5c065cb8 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -615,6 +615,25 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, r, old_attrs, new_attrs); } +/* + * damon_valid_intervals_goal() - return if the intervals goal of @attrs is + * valid. + */ +static bool damon_valid_intervals_goal(struct damon_attrs *attrs) +{ + struct damon_intervals_goal *goal = &attrs->intervals_goal; + + /* tuning is disabled */ + if (!goal->aggrs) + return true; + if (goal->min_sample_us > goal->max_sample_us) + return false; + if (attrs->sample_interval < goal->min_sample_us || + goal->max_sample_us < attrs->sample_interval) + return false; + return true; +} + /** * damon_set_attrs() - Set attributes for the monitoring. * @ctx: monitoring context @@ -635,6 +654,9 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) attrs->sample_interval : 1; struct damos *s; + if (!damon_valid_intervals_goal(attrs)) + return -EINVAL; + if (attrs->min_nr_regions < 3) return -EINVAL; if (attrs->min_nr_regions > attrs->max_nr_regions) From f04b0fedbe714f822bd066b319a60faa39a985a1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:20 -0800 Subject: [PATCH 229/431] mm/damon/core: implement intervals auto-tuning Implement the DAMON sampling and aggregation intervals auto-tuning mechanism as briefly described on 'struct damon_intervals_goal'. The core part for deciding the direction and amount of the changes is implemented reusing the feedback loop function which is being used for DAMOS quotas auto-tuning. Unlike the DAMOS quotas auto-tuning use case, limit the maximum decreasing amount after the adjustment to 50% of the current value, though. This is because the intervals have no good merits at rapid reductions since it could unnecessarily increase the monitoring overhead. Link: https://lkml.kernel.org/r/20250303221726.484227-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 16 +++++++++ mm/damon/core.c | 76 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 5f2609f24761..b3e2c793c1f4 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -713,6 +713,17 @@ struct damon_attrs { struct damon_intervals_goal intervals_goal; unsigned long min_nr_regions; unsigned long max_nr_regions; +/* private: internal use only */ + /* + * @aggr_interval to @sample_interval ratio. + * Core-external components call damon_set_attrs() with &damon_attrs + * that this field is unset. In the case, damon_set_attrs() sets this + * field of resulting &damon_attrs. Core-internal components such as + * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs + * that this field is set. In the case, damon_set_attrs() just keep + * it. + */ + unsigned long aggr_samples; }; /** @@ -761,6 +772,11 @@ struct damon_ctx { * update */ unsigned long next_ops_update_sis; + /* + * number of sample intervals that should be passed before next + * intervals tuning + */ + unsigned long next_intervals_tune_sis; /* for waiting until the execution of the kdamond_fn is started */ struct completion kdamond_started; /* for scheme quotas prioritization */ diff --git a/mm/damon/core.c b/mm/damon/core.c index ad3b5c065cb8..9d37d3664030 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -664,6 +664,10 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) if (attrs->sample_interval > attrs->aggr_interval) return -EINVAL; + /* calls from core-external doesn't set this. */ + if (!attrs->aggr_samples) + attrs->aggr_samples = attrs->aggr_interval / sample_interval; + ctx->next_aggregation_sis = ctx->passed_sample_intervals + attrs->aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->passed_sample_intervals + @@ -1301,6 +1305,65 @@ static void kdamond_reset_aggregated(struct damon_ctx *c) } } +static unsigned long damon_get_intervals_score(struct damon_ctx *c) +{ + struct damon_target *t; + struct damon_region *r; + unsigned long sz_region, max_access_events = 0, access_events = 0; + unsigned long target_access_events; + unsigned long goal_bp = c->attrs.intervals_goal.access_bp; + + damon_for_each_target(t, c) { + damon_for_each_region(r, t) { + sz_region = damon_sz_region(r); + max_access_events += sz_region * c->attrs.aggr_samples; + access_events += sz_region * r->nr_accesses; + } + } + target_access_events = max_access_events * goal_bp / 10000; + return access_events * 10000 / target_access_events; +} + +static unsigned long damon_feed_loop_next_input(unsigned long last_input, + unsigned long score); + +static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) +{ + unsigned long score_bp, adaptation_bp; + + score_bp = damon_get_intervals_score(c); + adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / + 10000; + /* + * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of + * the intervals by rescaling [1,10,000] to [5000, 10,000]. + */ + if (adaptation_bp <= 10000) + adaptation_bp = 5000 + adaptation_bp / 2; + return adaptation_bp; +} + +static void kdamond_tune_intervals(struct damon_ctx *c) +{ + unsigned long adaptation_bp; + struct damon_attrs new_attrs; + struct damon_intervals_goal *goal; + + adaptation_bp = damon_get_intervals_adaptation_bp(c); + if (adaptation_bp == 10000) + return; + + new_attrs = c->attrs; + goal = &c->attrs.intervals_goal; + new_attrs.sample_interval = min(goal->max_sample_us, + c->attrs.sample_interval * adaptation_bp / 10000); + new_attrs.sample_interval = max(goal->min_sample_us, + new_attrs.sample_interval); + new_attrs.aggr_interval = new_attrs.sample_interval * + c->attrs.aggr_samples; + damon_set_attrs(c, &new_attrs); +} + static void damon_split_region_at(struct damon_target *t, struct damon_region *r, unsigned long sz_r); @@ -2209,6 +2272,8 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / sample_interval; + ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * + ctx->attrs.intervals_goal.aggrs; damon_for_each_scheme(scheme, ctx) { apply_interval = scheme->apply_interval_us ? @@ -2293,6 +2358,17 @@ static int kdamond_fn(void *data) sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; if (ctx->passed_sample_intervals >= next_aggregation_sis) { + if (ctx->attrs.intervals_goal.aggrs && + ctx->passed_sample_intervals >= + ctx->next_intervals_tune_sis) { + ctx->next_intervals_tune_sis += + ctx->attrs.aggr_samples * + ctx->attrs.intervals_goal.aggrs; + kdamond_tune_intervals(ctx); + sample_interval = ctx->attrs.sample_interval ? + ctx->attrs.sample_interval : 1; + + } ctx->next_aggregation_sis = next_aggregation_sis + ctx->attrs.aggr_interval / sample_interval; From 8fbbcbeaafeb82498fd83f58c1e5ad1aff135212 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:21 -0800 Subject: [PATCH 230/431] mm/damon/sysfs: implement intervals tuning goal directory Implement DAMON sysfs interface directory and its files for setting DAMON sampling and aggregation intervals auto-tuning goal. Link: https://lkml.kernel.org/r/20250303221726.484227-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 189 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index deeab04d3b46..a772060300b4 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -408,6 +408,164 @@ static const struct kobj_type damon_sysfs_targets_ktype = { .default_groups = damon_sysfs_targets_groups, }; +/* + * intervals goal directory + */ + +struct damon_sysfs_intervals_goal { + struct kobject kobj; + unsigned long access_bp; + unsigned long aggrs; + unsigned long min_sample_us; + unsigned long max_sample_us; +}; + +static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc( + unsigned long access_bp, unsigned long aggrs, + unsigned long min_sample_us, unsigned long max_sample_us) +{ + struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal), + GFP_KERNEL); + + if (!goal) + return NULL; + + goal->kobj = (struct kobject){}; + goal->access_bp = access_bp; + goal->aggrs = aggrs; + goal->min_sample_us = min_sample_us; + goal->max_sample_us = max_sample_us; + return goal; +} + +static ssize_t access_bp_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->access_bp); +} + +static ssize_t access_bp_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->access_bp = nr; + return count; +} + +static ssize_t aggrs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->aggrs); +} + +static ssize_t aggrs_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->aggrs = nr; + return count; +} + +static ssize_t min_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->min_sample_us); +} + +static ssize_t min_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->min_sample_us = nr; + return count; +} + +static ssize_t max_sample_us_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + + return sysfs_emit(buf, "%lu\n", goal->max_sample_us); +} + +static ssize_t max_sample_us_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_intervals_goal *goal = container_of(kobj, + struct damon_sysfs_intervals_goal, kobj); + unsigned long nr; + int err = kstrtoul(buf, 0, &nr); + + if (err) + return err; + + goal->max_sample_us = nr; + return count; +} + +static void damon_sysfs_intervals_goal_release(struct kobject *kobj) +{ + kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj)); +} + +static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr = + __ATTR_RW_MODE(access_bp, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr = + __ATTR_RW_MODE(aggrs, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr = + __ATTR_RW_MODE(min_sample_us, 0600); + +static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr = + __ATTR_RW_MODE(max_sample_us, 0600); + +static struct attribute *damon_sysfs_intervals_goal_attrs[] = { + &damon_sysfs_intervals_goal_access_bp_attr.attr, + &damon_sysfs_intervals_goal_aggrs_attr.attr, + &damon_sysfs_intervals_goal_min_sample_us_attr.attr, + &damon_sysfs_intervals_goal_max_sample_us_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal); + +static const struct kobj_type damon_sysfs_intervals_goal_ktype = { + .release = damon_sysfs_intervals_goal_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = damon_sysfs_intervals_goal_groups, +}; + /* * intervals directory */ @@ -417,6 +575,7 @@ struct damon_sysfs_intervals { unsigned long sample_us; unsigned long aggr_us; unsigned long update_us; + struct damon_sysfs_intervals_goal *intervals_goal; }; static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( @@ -436,6 +595,32 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( return intervals; } +static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals) +{ + struct damon_sysfs_intervals_goal *goal; + int err; + + goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0); + if (!goal) + return -ENOMEM; + + err = kobject_init_and_add(&goal->kobj, + &damon_sysfs_intervals_goal_ktype, &intervals->kobj, + "intervals_goal"); + if (err) { + kobject_put(&goal->kobj); + intervals->intervals_goal = NULL; + return err; + } + intervals->intervals_goal = goal; + return 0; +} + +static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals) +{ + kobject_put(&intervals->intervals_goal->kobj); +} + static ssize_t sample_us_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -569,6 +754,9 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs) err = kobject_init_and_add(&intervals->kobj, &damon_sysfs_intervals_ktype, &attrs->kobj, "intervals"); + if (err) + goto put_intervals_out; + err = damon_sysfs_intervals_add_dirs(intervals); if (err) goto put_intervals_out; attrs->intervals = intervals; @@ -599,6 +787,7 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs) static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs) { kobject_put(&attrs->nr_regions_range->kobj); + damon_sysfs_intervals_rm_dirs(attrs->intervals); kobject_put(&attrs->intervals->kobj); } From 0622c68d0a51f1268f3f9a171f4969c1bfc07c05 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:22 -0800 Subject: [PATCH 231/431] mm/damon/sysfs: commit intervals tuning goal Connect DAMON sysfs interface for sampling and aggregation intervals auto-tuning with DAMON core API, so that users can really use the feature using the sysfs files. Link: https://lkml.kernel.org/r/20250303221726.484227-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index a772060300b4..fa5f004f0670 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1273,11 +1273,18 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx, struct damon_sysfs_attrs *sys_attrs) { struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals; + struct damon_sysfs_intervals_goal *sys_goal = + sys_intervals->intervals_goal; struct damon_sysfs_ul_range *sys_nr_regions = sys_attrs->nr_regions_range; struct damon_attrs attrs = { .sample_interval = sys_intervals->sample_us, .aggr_interval = sys_intervals->aggr_us, + .intervals_goal = { + .access_bp = sys_goal->access_bp, + .aggrs = sys_goal->aggrs, + .min_sample_us = sys_goal->min_sample_us, + .max_sample_us = sys_goal->max_sample_us}, .ops_update_interval = sys_intervals->update_us, .min_nr_regions = sys_nr_regions->min, .max_nr_regions = sys_nr_regions->max, From 1077605396b4da993327ebe40eabc28478e2be94 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:23 -0800 Subject: [PATCH 232/431] mm/damon/sysfs: implement a command to update auto-tuned monitoring intervals DAMON kernel API callers can show auto-tuned sampling and aggregation intervals from the monmitoring attributes data structure. That can be useful for debugging or tuning of the feature. DAMON user-space ABI users has no way to see that, though. Implement a new DAMON sysfs interface command, namely 'update_tuned_intervals', for the purpose. If the command is written to the kdamond state file, the tuned sampling and aggregation intervals will be updated to the corresponding sysfs interface files. Link: https://lkml.kernel.org/r/20250303221726.484227-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index fa5f004f0670..ccd435d234b9 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1213,6 +1213,11 @@ enum damon_sysfs_cmd { * effective size quota of the scheme in bytes. */ DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS, + /* + * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring + * intevals. + */ + DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS, /* * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands. */ @@ -1230,6 +1235,7 @@ static const char * const damon_sysfs_cmd_strs[] = { "update_schemes_tried_regions", "clear_schemes_tried_regions", "update_schemes_effective_quotas", + "update_tuned_intervals", }; /* @@ -1502,6 +1508,17 @@ static int damon_sysfs_upd_schemes_effective_quotas(void *data) return 0; } +static int damon_sysfs_upd_tuned_intervals(void *data) +{ + struct damon_sysfs_kdamond *kdamond = data; + struct damon_ctx *ctx = kdamond->damon_ctx; + + kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us = + ctx->attrs.sample_interval; + kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us = + ctx->attrs.aggr_interval; + return 0; +} /* * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. @@ -1723,6 +1740,9 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_damon_call( damon_sysfs_upd_schemes_effective_quotas, kdamond); + case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: + return damon_sysfs_damon_call( + damon_sysfs_upd_tuned_intervals, kdamond); default: break; } From af03edb521f1ea5f66a2fa7cd3e4af7d9a1984e2 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:24 -0800 Subject: [PATCH 233/431] Docs/mm/damon/design: document for intervals auto-tuning Document the design of DAMON sampling and aggregation intervals auto-tuning. [sj@kernel.org: fix a typo on 'intervals auto-tuning' section] Link: https://lkml.kernel.org/r/20250305182744.56125-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250303221726.484227-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: Stephen Rothwell Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 46 +++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 5af991551a86..5a8c1752dc8a 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -313,6 +313,10 @@ sufficient for the given purpose, it shouldn't be unnecessarily further lowered. It is recommended to be set proportional to ``aggregation interval``. By default, the ratio is set as ``1/20``, and it is still recommended. +Based on the manual tuning guide, DAMON provides more intuitive knob-based +intervals auto tuning mechanism. Please refer to :ref:`the design document of +the feature ` for detail. + Refer to below documents for an example tuning based on the above guide. .. toctree:: @@ -321,6 +325,48 @@ Refer to below documents for an example tuning based on the above guide. monitoring_intervals_tuning_example +.. _damon_design_monitoring_intervals_autotuning: + +Monitoring Intervals Auto-tuning +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DAMON provides automatic tuning of the ``sampling interval`` and ``aggregation +interval`` based on the :ref:`the tuning guide idea +`. The tuning mechanism allows +users to set the aimed amount of access events to observe via DAMON within +given time interval. The target can be specified by the user as a ratio of +DAMON-observed access events to the theoretical maximum amount of the events +(``access_bp``) that measured within a given number of aggregations +(``aggrs``). + +The DAMON-observed access events are calculated in byte granularity based on +DAMON :ref:`region assumption `. For +example, if a region of size ``X`` bytes of ``Y`` ``nr_accesses`` is found, it +means ``X * Y`` access events are observed by DAMON. Theoretical maximum +access events for the region is calculated in same way, but replacing ``Y`` +with theoretical maximum ``nr_accesses``, which can be calculated as +``aggregation interval / sampling interval``. + +The mechanism calculates the ratio of access events for ``aggrs`` aggregations, +and increases or decrease the ``sampleing interval`` and ``aggregation +interval`` in same ratio, if the observed access ratio is lower or higher than +the target, respectively. The ratio of the intervals change is decided in +proportion to the distance between current samples ratio and the target ratio. + +The user can further set the minimum and maximum ``sampling interval`` that can +be set by the tuning mechanism using two parameters (``min_sample_us`` and +``max_sample_us``). Because the tuning mechanism changes ``sampling interval`` +and ``aggregation interval`` in same ratio always, the minimum and maximum +``aggregation interval`` after each of the tuning changes can automatically set +together. + +The tuning is turned off by default, and need to be set explicitly by the user. +As a rule of thumbs and the Parreto principle, 4% access samples ratio target +is recommended. Note that Parreto principle (80/20 rule) has applied twice. +That is, assumes 4% (20% of 20%) DAMON-observed access events ratio (source) +to capture 64% (80% multipled by 80%) real access events (outcomes). + + .. _damon_design_damos: Operation Schemes From e2b23dc62369b76b68d8354f12baeaff14b6e24f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:25 -0800 Subject: [PATCH 234/431] Docs/ABI/damon: document intervals auto-tuning ABI Document the DAMON user-space ABI for DAMON sampling and aggregation intervals auto-tuning. Link: https://lkml.kernel.org/r/20250303221726.484227-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- .../ABI/testing/sysfs-kernel-mm-damon | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index ccd13ca668c8..76da77d7f7b6 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -91,6 +91,36 @@ Description: Writing a value to this file sets the update interval of the DAMON context in microseconds as the value. Reading this file returns the value. +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//monitoring_attrs/intervals/intrvals_goal/access_bp +Date: Feb 2025 +Contact: SeongJae Park +Description: Writing a value to this file sets the monitoring intervals + auto-tuning target DAMON-observed access events ratio within + the given time interval (aggrs in same directory), in bp + (1/10,000). Reading this file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//monitoring_attrs/intervals/intrvals_goal/aggrs +Date: Feb 2025 +Contact: SeongJae Park +Description: Writing a value to this file sets the time interval to achieve + the monitoring intervals auto-tuning target DAMON-observed + access events ratio (access_bp in same directory) within. + Reading this file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//monitoring_attrs/intervals/intrvals_goal/min_sample_us +Date: Feb 2025 +Contact: SeongJae Park +Description: Writing a value to this file sets the minimum value of + auto-tuned sampling interval in microseconds. Reading this + file returns the value. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//monitoring_attrs/intervals/intrvals_goal/max_sample_us +Date: Feb 2025 +Contact: SeongJae Park +Description: Writing a value to this file sets the maximum value of + auto-tuned sampling interval in microseconds. Reading this + file returns the value. + What: /sys/kernel/mm/damon/admin/kdamonds//contexts//monitoring_attrs/nr_regions/min WDate: Mar 2022 From b243d666d1079587daa3f41fffdabbabad8dd075 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 3 Mar 2025 14:17:26 -0800 Subject: [PATCH 235/431] Docs/admin-guide/mm/damon/usage: add intervals_goal directory on the hierarchy Document DAMON sysfs interface usage for DAMON sampling and aggregation intervals auto-tuning. Link: https://lkml.kernel.org/r/20250303221726.484227-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 25 ++++++++++++++++++++ Documentation/mm/damon/design.rst | 4 ++++ 2 files changed, 29 insertions(+) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index dc37bba96273..de549dd18107 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -64,6 +64,7 @@ comma (","). │ │ │ │ :ref:`0 `/avail_operations,operations │ │ │ │ │ :ref:`monitoring_attrs `/ │ │ │ │ │ │ intervals/sample_us,aggr_us,update_us + │ │ │ │ │ │ │ intervals_goal/access_bp,aggrs,min_sample_us,max_sample_us │ │ │ │ │ │ nr_regions/min,max │ │ │ │ │ :ref:`targets `/nr_targets │ │ │ │ │ │ :ref:`0 `/pid_target @@ -132,6 +133,11 @@ Users can write below commands for the kdamond to the ``state`` file. - ``off``: Stop running. - ``commit``: Read the user inputs in the sysfs files except ``state`` file again. +- ``update_tuned_intervals``: Update the contents of ``sample_us`` and + ``aggr_us`` files of the kdamond with the auto-tuning applied ``sampling + interval`` and ``aggregation interval`` for the files. Please refer to + :ref:`intervals_goal section ` + for more details. - ``commit_schemes_quota_goals``: Read the DAMON-based operation schemes' :ref:`quota goals `. - ``update_schemes_stats``: Update the contents of stats files for each @@ -213,6 +219,25 @@ writing to and rading from the files. For more details about the intervals and monitoring regions range, please refer to the Design document (:doc:`/mm/damon/design`). +.. _damon_usage_sysfs_monitoring_intervals_goal: + +contexts//monitoring_attrs/intervals/intervals_goal/ +------------------------------------------------------- + +Under the ``intervals`` directory, one directory for automated tuning of +``sample_us`` and ``aggr_us``, namely ``intervals_goal`` directory also exists. +Under the directory, four files for the auto-tuning control, namely +``access_bp``, ``aggrs``, ``min_sample_us`` and ``max_sample_us`` exist. +Please refer to the :ref:`design document of the feature +` for the internal of the tuning +mechanism. Reading and writing the four files under ``intervals_goal`` +directory shows and updates the tuning parameters that described in the +:ref:design doc ` with the same +names. The tuning starts with the user-set ``sample_us`` and ``aggr_us``. The +tuning-applied current values of the two intervals can be read from the +``sample_us`` and ``aggr_us`` files after writing ``update_tuned_intervals`` to +the ``state`` file. + .. _sysfs_targets: contexts//targets/ diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 5a8c1752dc8a..e6fd3b604e70 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -366,6 +366,10 @@ is recommended. Note that Parreto principle (80/20 rule) has applied twice. That is, assumes 4% (20% of 20%) DAMON-observed access events ratio (source) to capture 64% (80% multipled by 80%) real access events (outcomes). +To know how user-space can use this feature via :ref:`DAMON sysfs interface +`, refer to :ref:`intervals_goal ` part of +the documentation. + .. _damon_design_damos: From 691ee97e1a9de0cdb3efb893c1f180e3f4a35e32 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 3 Mar 2025 14:15:35 +0000 Subject: [PATCH 236/431] mm: fix lazy mmu docs and usage Patch series "Fix lazy mmu mode", v2. I'm planning to implement lazy mmu mode for arm64 to optimize vmalloc. As part of that, I will extend lazy mmu mode to cover kernel mappings in vmalloc table walkers. While lazy mmu mode is already used for kernel mappings in a few places, this will extend it's use significantly. Having reviewed the existing lazy mmu implementations in powerpc, sparc and x86, it looks like there are a bunch of bugs, some of which may be more likely to trigger once I extend the use of lazy mmu. So this series attempts to clarify the requirements and fix all the bugs in advance of that series. See patch #1 commit log for all the details. This patch (of 5): The docs, implementations and use of arch_[enter|leave]_lazy_mmu_mode() is a bit of a mess (to put it politely). There are a number of issues related to nesting of lazy mmu regions and confusion over whether the task, when in a lazy mmu region, is preemptible or not. Fix all the issues relating to the core-mm. Follow up commits will fix the arch-specific implementations. 3 arches implement lazy mmu; powerpc, sparc and x86. When arch_[enter|leave]_lazy_mmu_mode() was first introduced by commit 6606c3e0da53 ("[PATCH] paravirt: lazy mmu mode hooks.patch"), it was expected that lazy mmu regions would never nest and that the appropriate page table lock(s) would be held while in the region, thus ensuring the region is non-preemptible. Additionally lazy mmu regions were only used during manipulation of user mappings. Commit 38e0edb15bd0 ("mm/apply_to_range: call pte function with lazy updates") started invoking the lazy mmu mode in apply_to_pte_range(), which is used for both user and kernel mappings. For kernel mappings the region is no longer protected by any lock so there is no longer any guarantee about non-preemptibility. Additionally, for RT configs, the holding the PTL only implies no CPU migration, it doesn't prevent preemption. Commit bcc6cc832573 ("mm: add default definition of set_ptes()") added arch_[enter|leave]_lazy_mmu_mode() to the default implementation of set_ptes(), used by x86. So after this commit, lazy mmu regions can be nested. Additionally commit 1a10a44dfc1d ("sparc64: implement the new page table range API") and commit 9fee28baa601 ("powerpc: implement the new page table range API") did the same for the sparc and powerpc set_ptes() overrides. powerpc couldn't deal with preemption so avoids it in commit b9ef323ea168 ("powerpc/64s: Disable preemption in hash lazy mmu mode"), which explicitly disables preemption for the whole region in its implementation. x86 can support preemption (or at least it could until it tried to add support nesting; more on this below). Sparc looks to be totally broken in the face of preemption, as far as I can tell. powerpc can't deal with nesting, so avoids it in commit 47b8def9358c ("powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes"), which removes the lazy mmu calls from its implementation of set_ptes(). x86 attempted to support nesting in commit 49147beb0ccb ("x86/xen: allow nesting of same lazy mode") but as far as I can tell, this breaks its support for preemption. In short, it's all a mess; the semantics for arch_[enter|leave]_lazy_mmu_mode() are not clearly defined and as a result the implementations all have different expectations, sticking plasters and bugs. arm64 is aiming to start using these hooks, so let's clean everything up before adding an arm64 implementation. Update the documentation to state that lazy mmu regions can never be nested, must not be called in interrupt context and preemption may or may not be enabled for the duration of the region. And fix the generic implementation of set_ptes() to avoid nesting. arch-specific fixes to conform to the new spec will proceed this one. These issues were spotted by code review and I have no evidence of issues being reported in the wild. Link: https://lkml.kernel.org/r/20250303141542.3371656-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20250303141542.3371656-2-ryan.roberts@arm.com Fixes: bcc6cc832573 ("mm: add default definition of set_ptes()") Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Juergen Gross Cc: Andreas Larsson Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juegren Gross Cc: Matthew Wilcow (Oracle) Cc: Thomas Gleinxer Cc: Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 94d267d02372..787c632ee2c9 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -222,10 +222,14 @@ static inline int pmd_dirty(pmd_t pmd) * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. + * up to date. + * + * In the general case, no lock is guaranteed to be held between entry and exit + * of the lazy mode. So the implementation must assume preemption may be enabled + * and cpu migration is possible; it must take steps to be robust against this. + * (In practice, for user PTE updates, the appropriate page table lock(s) are + * held, but for kernel PTE updates, no lock is held). Nesting is not permitted + * and the mode cannot be used in interrupt context. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) @@ -287,7 +291,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, { page_table_check_ptes_set(mm, ptep, pte, nr); - arch_enter_lazy_mmu_mode(); for (;;) { set_pte(ptep, pte); if (--nr == 0) @@ -295,7 +298,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, ptep++; pte = pte_next_pfn(pte); } - arch_leave_lazy_mmu_mode(); } #endif #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) From ad449d856bd7e7461ac740abb9b5d10a824e0166 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 3 Mar 2025 14:15:36 +0000 Subject: [PATCH 237/431] fs/proc/task_mmu: reduce scope of lazy mmu region Update the way arch_[enter|leave]_lazy_mmu_mode() is called in pagemap_scan_pmd_entry() to follow the normal pattern of holding the ptl for user space mappings. As a result the scope is reduced to only the pte table, but that's where most of the performance win is. While I believe there wasn't technically a bug here, the original scope made it easier to accidentally nest or, worse, accidentally call something like kmap() which would expect an immediate mode pte modification but it would end up deferred. Link: https://lkml.kernel.org/r/20250303141542.3371656-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Juergen Gross Cc: Andreas Larsson Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juegren Gross Cc: Matthew Wilcow (Oracle) Cc: Thomas Gleinxer Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c17615e21a5d..b0f189815512 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2459,22 +2459,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, spinlock_t *ptl; int ret; - arch_enter_lazy_mmu_mode(); - ret = pagemap_scan_thp_entry(pmd, start, end, walk); - if (ret != -ENOENT) { - arch_leave_lazy_mmu_mode(); + if (ret != -ENOENT) return ret; - } ret = 0; start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); if (!pte) { - arch_leave_lazy_mmu_mode(); walk->action = ACTION_AGAIN; return 0; } + arch_enter_lazy_mmu_mode(); + if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { /* Fast path for performing exclusive WP */ for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { @@ -2543,8 +2540,8 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, if (flush_end) flush_tlb_range(vma, start, addr); - pte_unmap_unlock(start_pte, ptl); arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); cond_resched(); return ret; From a1d416bf9faf4f4871cb5a943614a07f80a7d70f Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 3 Mar 2025 14:15:37 +0000 Subject: [PATCH 238/431] sparc/mm: disable preemption in lazy mmu mode Since commit 38e0edb15bd0 ("mm/apply_to_range: call pte function with lazy updates") it's been possible for arch_[enter|leave]_lazy_mmu_mode() to be called without holding a page table lock (for the kernel mappings case), and therefore it is possible that preemption may occur while in the lazy mmu mode. The Sparc lazy mmu implementation is not robust to preemption since it stores the lazy mode state in a per-cpu structure and does not attempt to manage that state on task switch. Powerpc had the same issue and fixed it by explicitly disabling preemption in arch_enter_lazy_mmu_mode() and re-enabling in arch_leave_lazy_mmu_mode(). See commit b9ef323ea168 ("powerpc/64s: Disable preemption in hash lazy mmu mode"). Given Sparc's lazy mmu mode is based on powerpc's, let's fix it in the same way here. Link: https://lkml.kernel.org/r/20250303141542.3371656-4-ryan.roberts@arm.com Fixes: 38e0edb15bd0 ("mm/apply_to_range: call pte function with lazy updates") Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Andreas Larsson Acked-by: Juergen Gross Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juegren Gross Cc: Matthew Wilcow (Oracle) Cc: Thomas Gleinxer Cc: Signed-off-by: Andrew Morton --- arch/sparc/mm/tlb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 8648a50afe88..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ void flush_tlb_pending(void) void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, From eb61ad14c459b54f71f76331ca35d12fa3eb8f98 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 3 Mar 2025 14:15:38 +0000 Subject: [PATCH 239/431] sparc/mm: avoid calling arch_enter/leave_lazy_mmu() in set_ptes With commit 1a10a44dfc1d ("sparc64: implement the new page table range API") set_ptes was added to the sparc architecture. The implementation included calling arch_enter/leave_lazy_mmu() calls. The patch removes the usage of arch_enter/leave_lazy_mmu() since this implies nesting of lazy mmu regions which is not supported. Without this fix, lazy mmu mode is effectively disabled because we exit the mode after the first set_ptes: remap_pte_range() -> arch_enter_lazy_mmu() -> set_ptes() -> arch_enter_lazy_mmu() -> arch_leave_lazy_mmu() -> arch_leave_lazy_mmu() Powerpc suffered the same problem and fixed it in a corresponding way with commit 47b8def9358c ("powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes"). Link: https://lkml.kernel.org/r/20250303141542.3371656-5-ryan.roberts@arm.com Fixes: 1a10a44dfc1d ("sparc64: implement the new page table range API") Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Andreas Larsson Acked-by: Juergen Gross Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juegren Gross Cc: Matthew Wilcow (Oracle) Cc: Thomas Gleinxer Cc: Signed-off-by: Andrew Morton --- arch/sparc/include/asm/pgtable_64.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2b7f358762c1..dc28f2c4eee3 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -936,7 +936,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - arch_enter_lazy_mmu_mode(); for (;;) { __set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) @@ -945,7 +944,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_val(pte) += PAGE_SIZE; addr += PAGE_SIZE; } - arch_leave_lazy_mmu_mode(); } #define set_ptes set_ptes From c36549ff8d8423fbf1a0c5bbd72be4f902d74700 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 3 Mar 2025 14:15:39 +0000 Subject: [PATCH 240/431] Revert "x86/xen: allow nesting of same lazy mode" Commit 49147beb0ccb ("x86/xen: allow nesting of same lazy mode") was added as a solution for a core-mm code change where arch_[enter|leave]_lazy_mmu_mode() started to be called in a nested manner; see commit bcc6cc832573 ("mm: add default definition of set_ptes()"). However, now that we have fixed the API to avoid nesting, we no longer need this capability in the x86 implementation. Additionally, from code review, I don't believe the fix was ever robust in the case of preemption occurring while in the nested lazy mode. The implementation usually deals with preemption by calling arch_leave_lazy_mmu_mode() from xen_start_context_switch() for the outgoing task if we are in the lazy mmu mode. Then in xen_end_context_switch(), it restarts the lazy mode by calling arch_enter_lazy_mmu_mode() for an incoming task that was in the lazy mode when it was switched out. But arch_leave_lazy_mmu_mode() will only unwind a single level of nesting. If we are in the double nest, then it's not fully unwound and per-cpu variables are left in a bad state. So the correct solution is to remove the possibility of nesting from the higher level (which has now been done) and remove this x86-specific solution. Link: https://lkml.kernel.org/r/20250303141542.3371656-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Juergen Gross Cc: Andreas Larsson Cc: Borislav Betkov Cc: Boris Ostrovsky Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Juegren Gross Cc: Matthew Wilcow (Oracle) Cc: Thomas Gleinxer Signed-off-by: Andrew Morton --- arch/x86/include/asm/xen/hypervisor.h | 15 ++------------- arch/x86/xen/enlighten_pv.c | 1 - 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index a9088250770f..bd0fc69a10a7 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -72,18 +72,10 @@ enum xen_lazy_mode { }; DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode); -DECLARE_PER_CPU(unsigned int, xen_lazy_nesting); static inline void enter_lazy(enum xen_lazy_mode mode) { - enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode); - - if (mode == old_mode) { - this_cpu_inc(xen_lazy_nesting); - return; - } - - BUG_ON(old_mode != XEN_LAZY_NONE); + BUG_ON(this_cpu_read(xen_lazy_mode) != XEN_LAZY_NONE); this_cpu_write(xen_lazy_mode, mode); } @@ -92,10 +84,7 @@ static inline void leave_lazy(enum xen_lazy_mode mode) { BUG_ON(this_cpu_read(xen_lazy_mode) != mode); - if (this_cpu_read(xen_lazy_nesting) == 0) - this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE); - else - this_cpu_dec(xen_lazy_nesting); + this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE); } enum xen_lazy_mode xen_get_lazy_mode(void); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 5e57835e999d..919e4df9380b 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -99,7 +99,6 @@ struct tls_descs { }; DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE; -DEFINE_PER_CPU(unsigned int, xen_lazy_nesting); enum xen_lazy_mode xen_get_lazy_mode(void) { From e47f1f56dd82cc6d91f5c4d914a534aa03cd12ca Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 28 Feb 2025 09:52:17 +0000 Subject: [PATCH 241/431] mm/page_alloc: clarify terminology in migratetype fallback code Patch series "mm/page_alloc: Some clarifications for migratetype fallback", v4. A couple of patches to try and make the code easier to follow. This patch (of 2): This code is rather confusing because: 1. "Steal" is sometimes used to refer to the general concept of allocating from a from a block of a fallback migratetype (steal_suitable_fallback()) but sometimes it refers specifically to converting a whole block's migratetype (can_steal_fallback()). 2. can_steal_fallback() sounds as though it's answering the question "am I functionally permitted to allocate from that other type" but in fact it is encoding a heuristic preference. 3. The same piece of data has different names in different places: can_steal vs whole_block. This reinforces point 2 because it looks like the different names reflect a shift in intent from "am I allowed to steal" to "do I want to steal", but no such shift exists. Fix 1. by avoiding the term "steal" in ambiguous contexts. Start using the term "claim" to refer to the special case of stealing the entire block. Fix 2. by using "should" instead of "can", and also rename its parameters and add some commentary to make it more explicit what they mean. Fix 3. by adopting the new "claim" terminology universally for this set of variables. Link: https://lkml.kernel.org/r/20250228-clarify-steal-v4-0-cb2ef1a4e610@google.com Link: https://lkml.kernel.org/r/20250228-clarify-steal-v4-1-cb2ef1a4e610@google.com Signed-off-by: Brendan Jackman Reviewed-by: Vlastimil Babka Cc: Johannes Weiner Cc: Mel Gorman Cc: Michal Hocko Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/compaction.c | 4 +-- mm/internal.h | 2 +- mm/page_alloc.c | 72 ++++++++++++++++++++++++------------------------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index a3203d97123e..2e2d4db33e68 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2332,7 +2332,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &cc->zone->free_area[order]; - bool can_steal; + bool claim_block; /* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) @@ -2349,7 +2349,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) * other migratetype buddy lists. */ if (find_suitable_fallback(area, order, migratetype, - true, &can_steal) != -1) + true, &claim_block) != -1) /* * Movable pages are OK in any pageblock. If we are * stealing for a non-movable allocation, make sure diff --git a/mm/internal.h b/mm/internal.h index 31c626130883..70fa96e61c76 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -865,7 +865,7 @@ static inline void init_cma_pageblock(struct page *page) int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal); + int migratetype, bool claim_only, bool *claim_block); static inline bool free_area_empty(struct free_area *area, int migratetype) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3c5624380b6c..0f0ecfe82f5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1942,22 +1942,22 @@ static inline bool boost_watermark(struct zone *zone) /* * When we are falling back to another migratetype during allocation, try to - * steal extra free pages from the same pageblocks to satisfy further - * allocations, instead of polluting multiple pageblocks. + * claim entire blocks to satisfy further allocations, instead of polluting + * multiple pageblocks. * - * If we are stealing a relatively large buddy page, it is likely there will - * be more free pages in the pageblock, so try to steal them all. For - * reclaimable and unmovable allocations, we steal regardless of page size, - * as fragmentation caused by those allocations polluting movable pageblocks - * is worse than movable allocations stealing from unmovable and reclaimable - * pageblocks. + * If we are stealing a relatively large buddy page, it is likely there will be + * more free pages in the pageblock, so try to claim the whole block. For + * reclaimable and unmovable allocations, we try to claim the whole block + * regardless of page size, as fragmentation caused by those allocations + * polluting movable pageblocks is worse than movable allocations stealing from + * unmovable and reclaimable pageblocks. */ -static bool can_steal_fallback(unsigned int order, int start_mt) +static bool should_try_claim_block(unsigned int order, int start_mt) { /* * Leaving this order check is intended, although there is * relaxed order check in next check. The reason is that - * we can actually steal whole pageblock if this condition met, + * we can actually claim the whole pageblock if this condition met, * but, below check doesn't guarantee it and that is just heuristic * so could be changed anytime. */ @@ -1970,7 +1970,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt) * reclaimable pages that are closest to the request size. After a * while, memory compaction may occur to form large contiguous pages, * and the next movable allocation may not need to steal. Unmovable and - * reclaimable allocations need to actually steal pages. + * reclaimable allocations need to actually claim the whole block. */ if (order >= pageblock_order / 2 || start_mt == MIGRATE_RECLAIMABLE || @@ -1983,12 +1983,14 @@ static bool can_steal_fallback(unsigned int order, int start_mt) /* * Check whether there is a suitable fallback freepage with requested order. - * If only_stealable is true, this function returns fallback_mt only if - * we can steal other freepages all together. This would help to reduce + * Sets *claim_block to instruct the caller whether it should convert a whole + * pageblock to the returned migratetype. + * If only_claim is true, this function returns fallback_mt only if + * we would do this whole-block claiming. This would help to reduce * fragmentation due to mixed migratetype pages in one pageblock. */ int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal) + int migratetype, bool only_claim, bool *claim_block) { int i; int fallback_mt; @@ -1996,19 +1998,16 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, if (area->nr_free == 0) return -1; - *can_steal = false; + *claim_block = false; for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { fallback_mt = fallbacks[migratetype][i]; if (free_area_empty(area, fallback_mt)) continue; - if (can_steal_fallback(order, migratetype)) - *can_steal = true; + if (should_try_claim_block(order, migratetype)) + *claim_block = true; - if (!only_stealable) - return fallback_mt; - - if (*can_steal) + if (*claim_block || !only_claim) return fallback_mt; } @@ -2016,14 +2015,14 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, } /* - * This function implements actual steal behaviour. If order is large enough, we - * can claim the whole pageblock for the requested migratetype. If not, we check - * the pageblock for constituent pages; if at least half of the pages are free - * or compatible, we can still claim the whole block, so pages freed in the - * future will be put on the correct free list. + * This function implements actual block claiming behaviour. If order is large + * enough, we can claim the whole pageblock for the requested migratetype. If + * not, we check the pageblock for constituent pages; if at least half of the + * pages are free or compatible, we can still claim the whole block, so pages + * freed in the future will be put on the correct free list. */ static struct page * -try_to_steal_block(struct zone *zone, struct page *page, +try_to_claim_block(struct zone *zone, struct page *page, int current_order, int order, int start_type, int block_type, unsigned int alloc_flags) { @@ -2091,11 +2090,12 @@ try_to_steal_block(struct zone *zone, struct page *page, /* * Try finding a free buddy page on the fallback list. * - * This will attempt to steal a whole pageblock for the requested type + * This will attempt to claim a whole pageblock for the requested type * to ensure grouping of such requests in the future. * - * If a whole block cannot be stolen, regress to __rmqueue_smallest() - * logic to at least break up as little contiguity as possible. + * If a whole block cannot be claimed, steal an individual page, regressing to + * __rmqueue_smallest() logic to at least break up as little contiguity as + * possible. * * The use of signed ints for order and current_order is a deliberate * deviation from the rest of this file, to make the for loop @@ -2112,7 +2112,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, int min_order = order; struct page *page; int fallback_mt; - bool can_steal; + bool claim_block; /* * Do not steal pages from freelists belonging to other pageblocks @@ -2131,15 +2131,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); + start_migratetype, false, &claim_block); if (fallback_mt == -1) continue; - if (!can_steal) + if (!claim_block) break; page = get_page_from_free_area(area, fallback_mt); - page = try_to_steal_block(zone, page, current_order, order, + page = try_to_claim_block(zone, page, current_order, order, start_migratetype, fallback_mt, alloc_flags); if (page) @@ -2149,11 +2149,11 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, if (alloc_flags & ALLOC_NOFRAGMENT) return NULL; - /* No luck stealing blocks. Find the smallest fallback page */ + /* No luck claiming pageblock. Find the smallest fallback page */ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); + start_migratetype, false, &claim_block); if (fallback_mt == -1) continue; From a14efee04796dd3f614eaf5348ca1ac099c21349 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Fri, 28 Feb 2025 09:52:18 +0000 Subject: [PATCH 242/431] mm/page_alloc: clarify should_claim_block() commentary There's lots of text here but it's a little hard to follow, this is an attempt to break it up and align its structure more closely with the code. Reword the top-level function comment to just explain what question the function answers from the point of view of the caller. Break up the internal logic into different sections that can have their own commentary describing why that part of the rationale is present. Note the page_group_by_mobility_disabled logic is not explained in the commentary, that is outside the scope of this patch... Link: https://lkml.kernel.org/r/20250228-clarify-steal-v4-2-cb2ef1a4e610@google.com Signed-off-by: Brendan Jackman Reviewed-by: Vlastimil Babka Cc: Johannes Weiner Cc: Mel Gorman Cc: Michal Hocko Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/page_alloc.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0f0ecfe82f5a..57f959af79c5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1941,16 +1941,9 @@ static inline bool boost_watermark(struct zone *zone) } /* - * When we are falling back to another migratetype during allocation, try to - * claim entire blocks to satisfy further allocations, instead of polluting - * multiple pageblocks. - * - * If we are stealing a relatively large buddy page, it is likely there will be - * more free pages in the pageblock, so try to claim the whole block. For - * reclaimable and unmovable allocations, we try to claim the whole block - * regardless of page size, as fragmentation caused by those allocations - * polluting movable pageblocks is worse than movable allocations stealing from - * unmovable and reclaimable pageblocks. + * When we are falling back to another migratetype during allocation, should we + * try to claim an entire block to satisfy further allocations, instead of + * polluting multiple pageblocks? */ static bool should_try_claim_block(unsigned int order, int start_mt) { @@ -1965,19 +1958,32 @@ static bool should_try_claim_block(unsigned int order, int start_mt) return true; /* - * Movable pages won't cause permanent fragmentation, so when you alloc - * small pages, you just need to temporarily steal unmovable or - * reclaimable pages that are closest to the request size. After a - * while, memory compaction may occur to form large contiguous pages, - * and the next movable allocation may not need to steal. Unmovable and - * reclaimable allocations need to actually claim the whole block. + * Above a certain threshold, always try to claim, as it's likely there + * will be more free pages in the pageblock. */ - if (order >= pageblock_order / 2 || - start_mt == MIGRATE_RECLAIMABLE || - start_mt == MIGRATE_UNMOVABLE || - page_group_by_mobility_disabled) + if (order >= pageblock_order / 2) return true; + /* + * Unmovable/reclaimable allocations would cause permanent + * fragmentations if they fell back to allocating from a movable block + * (polluting it), so we try to claim the whole block regardless of the + * allocation size. Later movable allocations can always steal from this + * block, which is less problematic. + */ + if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE) + return true; + + if (page_group_by_mobility_disabled) + return true; + + /* + * Movable pages won't cause permanent fragmentation, so when you alloc + * small pages, we just need to temporarily steal unmovable or + * reclaimable pages that are closest to the request size. After a + * while, memory compaction may occur to form large contiguous pages, + * and the next movable allocation may not need to steal. + */ return false; } From 645207a670a96ebd7b3cc9b85699a3a03ad35483 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 27 Feb 2025 23:58:06 -0800 Subject: [PATCH 243/431] memcg: don't call propagate_protected_usage() for v1 Patch series "page_counter cleanup and size reduction". Commit c6f53ed8f213a ("mm, memcg: cg2 memory{.swap,}.peak write handlers") accidently increased the size of struct page_counter. This series rearrange the fields to reduce its size and also has some cleanups. This patch (of 3): Memcg-v1 does not support memory protection (min/low) and thus there is no need to track protected memory usage for it. Link: https://lkml.kernel.org/r/20250228075808.207484-1-shakeel.butt@linux.dev Link: https://lkml.kernel.org/r/20250228075808.207484-2-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin (Cruise) Signed-off-by: Andrew Morton --- mm/memcontrol.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 04973c084c63..9e0e00a2c941 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3610,6 +3610,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); struct mem_cgroup *memcg, *old_memcg; + bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys); old_memcg = set_active_memcg(parent); memcg = mem_cgroup_alloc(parent); @@ -3627,7 +3628,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); - page_counter_init(&memcg->memory, &parent->memory, true); + page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); page_counter_init(&memcg->swap, &parent->swap, false); #ifdef CONFIG_MEMCG_V1 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); @@ -3647,7 +3648,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) return &memcg->css; } - if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) + if (memcg_on_dfl && !cgroup_memory_nosocket) static_branch_inc(&memcg_sockets_enabled_key); if (!cgroup_memory_nobpf) From 0e2759afcaf9bff25a63201856fa89b64181749f Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 27 Feb 2025 23:58:07 -0800 Subject: [PATCH 244/431] page_counter: track failcnt only for legacy cgroups Currently page_counter tracks failcnt for counters used by v1 and v2 controllers. However failcnt is only exported for v1 deployment and thus there is no need to maintain it in v2. The oom report does expose failcnt for memory and swap in v2 but v2 already maintains MEMCG_MAX and MEMCG_SWAP_MAX event counters which can be used. Link: https://lkml.kernel.org/r/20250228075808.207484-3-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin (Cruise) Signed-off-by: Andrew Morton --- include/linux/page_counter.h | 4 +++- mm/hugetlb_cgroup.c | 31 ++++++++++++++----------------- mm/memcontrol.c | 12 ++++++++++-- mm/page_counter.c | 4 +++- 4 files changed, 30 insertions(+), 21 deletions(-) diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 46406f3fe34d..e4bd8fd427be 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -28,12 +28,13 @@ struct page_counter { unsigned long watermark; /* Latest cg2 reset watermark */ unsigned long local_watermark; - unsigned long failcnt; + unsigned long failcnt; /* v1-only field */ /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); bool protection_support; + bool track_failcnt; unsigned long min; unsigned long low; unsigned long high; @@ -58,6 +59,7 @@ static inline void page_counter_init(struct page_counter *counter, counter->max = PAGE_COUNTER_MAX; counter->parent = parent; counter->protection_support = protection_support; + counter->track_failcnt = false; } static inline unsigned long page_counter_read(struct page_counter *counter) diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index bb9578bd99f9..58e895f3899a 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -101,10 +101,9 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, int idx; for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { - struct page_counter *fault_parent = NULL; - struct page_counter *rsvd_parent = NULL; + struct page_counter *fault, *fault_parent = NULL; + struct page_counter *rsvd, *rsvd_parent = NULL; unsigned long limit; - int ret; if (parent_h_cgroup) { fault_parent = hugetlb_cgroup_counter_from_cgroup( @@ -112,24 +111,22 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd( parent_h_cgroup, idx); } - page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup, - idx), - fault_parent, false); - page_counter_init( - hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), - rsvd_parent, false); + fault = hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx); + rsvd = hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx); + + page_counter_init(fault, fault_parent, false); + page_counter_init(rsvd, rsvd_parent, false); + + if (!cgroup_subsys_on_dfl(hugetlb_cgrp_subsys)) { + fault->track_failcnt = true; + rsvd->track_failcnt = true; + } limit = round_down(PAGE_COUNTER_MAX, pages_per_huge_page(&hstates[idx])); - ret = page_counter_set_max( - hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx), - limit); - VM_BUG_ON(ret); - ret = page_counter_set_max( - hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), - limit); - VM_BUG_ON(ret); + VM_BUG_ON(page_counter_set_max(fault, limit)); + VM_BUG_ON(page_counter_set_max(rsvd, limit)); } } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9e0e00a2c941..1c496b79de97 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1572,16 +1572,23 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) /* Use static buffer, for the caller is holding oom_lock. */ static char buf[SEQ_BUF_SIZE]; struct seq_buf s; + unsigned long memory_failcnt; lockdep_assert_held(&oom_lock); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); + else + memory_failcnt = memcg->memory.failcnt; + pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", K((u64)page_counter_read(&memcg->memory)), - K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); + K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", K((u64)page_counter_read(&memcg->swap)), - K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); + K((u64)READ_ONCE(memcg->swap.max)), + atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); #ifdef CONFIG_MEMCG_V1 else { pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", @@ -3631,6 +3638,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); page_counter_init(&memcg->swap, &parent->swap, false); #ifdef CONFIG_MEMCG_V1 + memcg->memory.track_failcnt = !memcg_on_dfl; WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); page_counter_init(&memcg->kmem, &parent->kmem, false); page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); diff --git a/mm/page_counter.c b/mm/page_counter.c index af23f927611b..661e0f2a5127 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -121,6 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, { struct page_counter *c; bool protection = track_protection(counter); + bool track_failcnt = counter->track_failcnt; for (c = counter; c; c = c->parent) { long new; @@ -146,7 +147,8 @@ bool page_counter_try_charge(struct page_counter *counter, * inaccuracy in the failcnt which is only used * to report stats. */ - data_race(c->failcnt++); + if (track_failcnt) + data_race(c->failcnt++); *fail = c; goto failed; } From f7b0797d36e75d2a622580b56b9bfd3130d5d0e9 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 27 Feb 2025 23:58:08 -0800 Subject: [PATCH 245/431] page_counter: reduce struct page_counter size The struct page_counter has explicit padding for better cache alignment. The commit c6f53ed8f213a ("mm, memcg: cg2 memory{.swap,}.peak write handlers") added a field to the struct page_counter and accidently increased its size. Let's move the failcnt field which is v1-only field to the same cacheline of usage to reduce the size of struct page_counter. Link: https://lkml.kernel.org/r/20250228075808.207484-4-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin (Cruise) Signed-off-by: Andrew Morton --- include/linux/page_counter.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index e4bd8fd427be..d649b6bbbc87 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -9,10 +9,12 @@ struct page_counter { /* - * Make sure 'usage' does not share cacheline with any other field. The - * memcg->memory.usage is a hot member of struct mem_cgroup. + * Make sure 'usage' does not share cacheline with any other field in + * v2. The memcg->memory.usage is a hot member of struct mem_cgroup. */ atomic_long_t usage; + unsigned long failcnt; /* v1-only field */ + CACHELINE_PADDING(_pad1_); /* effective memory.min and memory.min usage tracking */ @@ -28,7 +30,6 @@ struct page_counter { unsigned long watermark; /* Latest cg2 reset watermark */ unsigned long local_watermark; - unsigned long failcnt; /* v1-only field */ /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); From 3dc30ef64ba6b0f4c2a38aec7e87691f2d859b84 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 27 Feb 2025 18:23:54 -0800 Subject: [PATCH 246/431] memcg: bypass root memcg check for skmem charging The root memcg is never associated with a socket in mem_cgroup_sk_alloc, so there is no need to check if the given memcg is root for the skmem charging code path. Link: https://lkml.kernel.org/r/20250228022354.2624249-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin (Cruise) Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1c496b79de97..b07eff78414b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4881,7 +4881,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); - if (try_charge(memcg, gfp_mask, nr_pages) == 0) { + if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) { mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); return true; } From c34b3eceeac64d59f8b475046501faa5d8daa5a4 Mon Sep 17 00:00:00 2001 From: Thomas Prescher Date: Thu, 27 Feb 2025 23:45:05 +0100 Subject: [PATCH 247/431] mm: hugetlb: improve parallel huge page allocation time Patch series "Add a command line option that enables control of how many threads should be used to allocate huge pages", v2. Allocating huge pages can take a very long time on servers with terabytes of memory even when they are allocated at boot time where the allocation happens in parallel. Before this series, the kernel used a hard coded value of 2 threads per NUMA node for these allocations. This value might have been good enough in the past but it is not sufficient to fully utilize newer systems. This series changes the default so the kernel uses 25% of the available hardware threads for these allocations. In addition, we allow the user that wish to micro-optimize the allocation time to override this value via a new kernel parameter. We tested this on 2 generations of Xeon CPUs and the results show a big improvement of the overall allocation time. +-----------------------+-------+-------+-------+-------+-------+ | threads | 8 | 16 | 32 | 64 | 128 | +-----------------------+-------+-------+-------+-------+-------+ | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | +-----------------------+-------+-------+-------+-------+-------+ On skylake, we see an improvment of 2.75x when using 32 threads, on cascade lake we can get even better at 4.3x when we use 128 threads. This speedup is quite significant and users of large machines like these should have the option to make the machines boot as fast as possible. This patch (of 3): Before this patch, the kernel currently used a hard coded value of 2 threads per NUMA node for these allocations. This patch changes this policy and the kernel now uses 25% of the available hardware threads for the allocations. Link: https://lkml.kernel.org/r/20250227-hugepage-parameter-v2-0-7db8c6dc0453@cyberus-technology.de Link: https://lkml.kernel.org/r/20250227-hugepage-parameter-v2-1-7db8c6dc0453@cyberus-technology.de Signed-off-by: Thomas Prescher Cc: Jonathan Corbet Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7a96c6edeaef..edb846452791 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -14,9 +14,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -3605,31 +3607,31 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) .numa_aware = true }; + unsigned int num_allocation_threads = max(num_online_cpus() / 4, 1); + job.thread_fn = hugetlb_pages_alloc_boot_node; job.start = 0; job.size = h->max_huge_pages; /* - * job.max_threads is twice the num_node_state(N_MEMORY), + * job.max_threads is 25% of the available cpu threads by default. * - * Tests below indicate that a multiplier of 2 significantly improves - * performance, and although larger values also provide improvements, - * the gains are marginal. + * On large servers with terabytes of memory, huge page allocation + * can consume a considerably amount of time. * - * Therefore, choosing 2 as the multiplier strikes a good balance between - * enhancing parallel processing capabilities and maintaining efficient - * resource management. + * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages. + * 2MiB huge pages. Using more threads can significantly improve allocation time. * - * +------------+-------+-------+-------+-------+-------+ - * | multiplier | 1 | 2 | 3 | 4 | 5 | - * +------------+-------+-------+-------+-------+-------+ - * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms | - * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms | - * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms | - * +------------+-------+-------+-------+-------+-------+ + * +-----------------------+-------+-------+-------+-------+-------+ + * | threads | 8 | 16 | 32 | 64 | 128 | + * +-----------------------+-------+-------+-------+-------+-------+ + * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s | + * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | + * +-----------------------+-------+-------+-------+-------+-------+ */ - job.max_threads = num_node_state(N_MEMORY) * 2; - job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2; + + job.max_threads = num_allocation_threads; + job.min_chunk = h->max_huge_pages / num_allocation_threads; padata_do_multithreaded(&job); return h->nr_huge_pages; From 71f745688985c59eee41ffe88497a9e62774a9bf Mon Sep 17 00:00:00 2001 From: Thomas Prescher Date: Thu, 27 Feb 2025 23:45:06 +0100 Subject: [PATCH 248/431] mm: hugetlb: add hugetlb_alloc_threads cmdline option Add a command line option that enables control of how many threads should be used to allocate huge pages. [akpm@linux-foundation.org: tidy up a comment] Link: https://lkml.kernel.org/r/20250227-hugepage-parameter-v2-2-7db8c6dc0453@cyberus-technology.de Signed-off-by: Thomas Prescher Cc: Jonathan Corbet Cc: Muchun Song Signed-off-by: Andrew Morton --- .../admin-guide/kernel-parameters.txt | 9 +++++ Documentation/admin-guide/mm/hugetlbpage.rst | 10 ++++++ mm/hugetlb.c | 33 ++++++++++++++++--- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 491628ac071a..2758bc124f16 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1882,6 +1882,15 @@ Documentation/admin-guide/mm/hugetlbpage.rst. Format: size[KMG] + hugepage_alloc_threads= + [HW] The number of threads that should be used to + allocate hugepages during boot. This option can be + used to improve system bootup time when allocating + a large amount of huge pages. + The default value is 25% of the available hardware threads. + + Note that this parameter only applies to non-gigantic huge pages. + hugetlb_cma= [HW,CMA,EARLY] The size of a CMA area used for allocation of gigantic hugepages. Or using node format, the size of a CMA area per node can be specified. diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst index f34a0d798d5b..67a941903fd2 100644 --- a/Documentation/admin-guide/mm/hugetlbpage.rst +++ b/Documentation/admin-guide/mm/hugetlbpage.rst @@ -145,7 +145,17 @@ hugepages It will allocate 1 2M hugepage on node0 and 2 2M hugepages on node1. If the node number is invalid, the parameter will be ignored. +hugepage_alloc_threads + Specify the number of threads that should be used to allocate hugepages + during boot. This parameter can be used to improve system bootup time + when allocating a large amount of huge pages. + The default value is 25% of the available hardware threads. + Example to use 8 allocation threads:: + + hugepage_alloc_threads=8 + + Note that this parameter only applies to non-gigantic huge pages. default_hugepagesz Specify the default huge page size. This parameter can only be specified once on the command line. default_hugepagesz can diff --git a/mm/hugetlb.c b/mm/hugetlb.c index edb846452791..486365cb2ece 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -85,6 +85,7 @@ static unsigned long __initdata default_hstate_max_huge_pages; static bool __initdata parsed_valid_hugepagesz = true; static bool __initdata parsed_default_hugepagesz; static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; +static unsigned long hugepage_allocation_threads __initdata; static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata; static int hstate_cmdline_index __initdata; @@ -3607,8 +3608,6 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) .numa_aware = true }; - unsigned int num_allocation_threads = max(num_online_cpus() / 4, 1); - job.thread_fn = hugetlb_pages_alloc_boot_node; job.start = 0; job.size = h->max_huge_pages; @@ -3629,9 +3628,13 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s | * +-----------------------+-------+-------+-------+-------+-------+ */ + if (hugepage_allocation_threads == 0) { + hugepage_allocation_threads = num_online_cpus() / 4; + hugepage_allocation_threads = max(hugepage_allocation_threads, 1); + } - job.max_threads = num_allocation_threads; - job.min_chunk = h->max_huge_pages / num_allocation_threads; + job.max_threads = hugepage_allocation_threads; + job.min_chunk = h->max_huge_pages / hugepage_allocation_threads; padata_do_multithreaded(&job); return h->nr_huge_pages; @@ -5000,6 +5003,28 @@ void __init hugetlb_bootmem_alloc(void) __hugetlb_bootmem_allocated = true; } +/* + * hugepage_alloc_threads command line parsing. + * + * When set, use this specific number of threads for the boot + * allocation of hugepages. + */ +static int __init hugepage_alloc_threads_setup(char *s) +{ + unsigned long allocation_threads; + + if (kstrtoul(s, 0, &allocation_threads) != 0) + return 1; + + if (allocation_threads == 0) + return 1; + + hugepage_allocation_threads = allocation_threads; + + return 1; +} +__setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup); + static unsigned int allowed_mems_nr(struct hstate *h) { int node; From 70478a5534af757f9bbc65bbb25b607bd0e69890 Mon Sep 17 00:00:00 2001 From: Thomas Prescher Date: Thu, 27 Feb 2025 23:45:07 +0100 Subject: [PATCH 249/431] mm: hugetlb: log time needed to allocate hugepages Having this information allows users to easily tune the hugepages_node_threads parameter. Link: https://lkml.kernel.org/r/20250227-hugepage-parameter-v2-3-7db8c6dc0453@cyberus-technology.de Signed-off-by: Thomas Prescher Cc: Jonathan Corbet Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 486365cb2ece..7a47a08e8526 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3608,6 +3608,9 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) .numa_aware = true }; + unsigned long jiffies_start; + unsigned long jiffies_end; + job.thread_fn = hugetlb_pages_alloc_boot_node; job.start = 0; job.size = h->max_huge_pages; @@ -3635,7 +3638,14 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h) job.max_threads = hugepage_allocation_threads; job.min_chunk = h->max_huge_pages / hugepage_allocation_threads; + + jiffies_start = jiffies; padata_do_multithreaded(&job); + jiffies_end = jiffies; + + pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n", + jiffies_to_msecs(jiffies_end - jiffies_start), + hugepage_allocation_threads); return h->nr_huge_pages; } From f1ab2831e2a4312046bca79256b2efc41d373eaf Mon Sep 17 00:00:00 2001 From: Tang Yizhou Date: Tue, 4 Mar 2025 19:03:16 +0800 Subject: [PATCH 250/431] writeback: let trace_balance_dirty_pages() take struct dtc as parameter Patch series "Fix calculations in trace_balance_dirty_pages() for cgwb", v2. In my experiment, I found that the output of trace_balance_dirty_pages() in the cgroup writeback scenario was strange because trace_balance_dirty_pages() always uses global_wb_domain.dirty_limit for related calculations instead of the dirty_limit of the corresponding memcg's wb_domain. The basic idea of the fix is to store the hard dirty limit value computed in wb_position_ratio() into struct dirty_throttle_control and use it for calculations in trace_balance_dirty_pages(). This patch (of 3): Currently, trace_balance_dirty_pages() already has 12 parameters. In the patch #3, I initially attempted to introduce an additional parameter. However, in include/linux/trace_events.h, bpf_trace_run12() only supports up to 12 parameters and bpf_trace_run13() does not exist. To reduce the number of parameters in trace_balance_dirty_pages(), we can make it accept a pointer to struct dirty_throttle_control as a parameter. To achieve this, we need to move the definition of struct dirty_throttle_control from mm/page-writeback.c to include/linux/writeback.h. Link: https://lkml.kernel.org/r/20250304110318.159567-1-yizhou.tang@shopee.com Link: https://lkml.kernel.org/r/20250304110318.159567-2-yizhou.tang@shopee.com Signed-off-by: Tang Yizhou Cc: Alexei Starovoitov Cc: Christian Brauner Cc: Steven Rostedt Cc: Jan Kara Cc: "Masami Hiramatsu (Google)" Cc: Matthew Wilcow (Oracle) Cc: Tang Yizhou Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/linux/writeback.h | 23 +++++++++++++++++++++ include/trace/events/writeback.h | 16 ++++++--------- mm/page-writeback.c | 35 ++------------------------------ 3 files changed, 31 insertions(+), 43 deletions(-) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d11b903c2edb..32095928365c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -313,6 +313,29 @@ static inline void cgroup_writeback_umount(struct super_block *sb) /* * mm/page-writeback.c */ +/* consolidated parameters for balance_dirty_pages() and its subroutines */ +struct dirty_throttle_control { +#ifdef CONFIG_CGROUP_WRITEBACK + struct wb_domain *dom; + struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ +#endif + struct bdi_writeback *wb; + struct fprop_local_percpu *wb_completions; + + unsigned long avail; /* dirtyable */ + unsigned long dirty; /* file_dirty + write + nfs */ + unsigned long thresh; /* dirty threshold */ + unsigned long bg_thresh; /* dirty background threshold */ + + unsigned long wb_dirty; /* per-wb counterparts */ + unsigned long wb_thresh; + unsigned long wb_bg_thresh; + + unsigned long pos_ratio; + bool freerun; + bool dirty_exceeded; +}; + void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); void laptop_mode_timer_fn(struct timer_list *t); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index a261e86e61fa..3213b9023794 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -629,11 +629,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, TRACE_EVENT(balance_dirty_pages, TP_PROTO(struct bdi_writeback *wb, - unsigned long thresh, - unsigned long bg_thresh, - unsigned long dirty, - unsigned long bdi_thresh, - unsigned long bdi_dirty, + struct dirty_throttle_control *dtc, unsigned long dirty_ratelimit, unsigned long task_ratelimit, unsigned long dirtied, @@ -641,7 +637,7 @@ TRACE_EVENT(balance_dirty_pages, long pause, unsigned long start_time), - TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, + TP_ARGS(wb, dtc, dirty_ratelimit, task_ratelimit, dirtied, period, pause, start_time), @@ -664,16 +660,16 @@ TRACE_EVENT(balance_dirty_pages, ), TP_fast_assign( - unsigned long freerun = (thresh + bg_thresh) / 2; + unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + freerun) / 2; - __entry->dirty = dirty; + __entry->dirty = dtc->dirty; __entry->bdi_setpoint = __entry->setpoint * - bdi_thresh / (thresh + 1); - __entry->bdi_dirty = bdi_dirty; + dtc->wb_thresh / (dtc->thresh + 1); + __entry->bdi_dirty = dtc->wb_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8b325aa525eb..149f8b815904 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -120,29 +120,6 @@ EXPORT_SYMBOL(laptop_mode); struct wb_domain global_wb_domain; -/* consolidated parameters for balance_dirty_pages() and its subroutines */ -struct dirty_throttle_control { -#ifdef CONFIG_CGROUP_WRITEBACK - struct wb_domain *dom; - struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ -#endif - struct bdi_writeback *wb; - struct fprop_local_percpu *wb_completions; - - unsigned long avail; /* dirtyable */ - unsigned long dirty; /* file_dirty + write + nfs */ - unsigned long thresh; /* dirty threshold */ - unsigned long bg_thresh; /* dirty background threshold */ - - unsigned long wb_dirty; /* per-wb counterparts */ - unsigned long wb_thresh; - unsigned long wb_bg_thresh; - - unsigned long pos_ratio; - bool freerun; - bool dirty_exceeded; -}; - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -1962,11 +1939,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, */ if (pause < min_pause) { trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, @@ -1991,11 +1964,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, pause: trace_balance_dirty_pages(wb, - sdtc->thresh, - sdtc->bg_thresh, - sdtc->dirty, - sdtc->wb_thresh, - sdtc->wb_dirty, + sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, From 28c24ef9e04f95672b72b1297eff7dae91cceea8 Mon Sep 17 00:00:00 2001 From: Tang Yizhou Date: Tue, 4 Mar 2025 19:03:17 +0800 Subject: [PATCH 251/431] writeback: rename variables in trace_balance_dirty_pages() Rename bdi_setpoint and bdi_dirty in the tracepoint to wb_setpoint and wb_dirty, respectively. These changes were omitted by Tejun in the cgroup writeback patchset. Link: https://lkml.kernel.org/r/20250304110318.159567-3-yizhou.tang@shopee.com Signed-off-by: Tang Yizhou Cc: Alexei Starovoitov Cc: Christian Brauner Cc: Jan Kara Cc: "Masami Hiramatsu (Google)" Cc: Matthew Wilcow (Oracle) Cc: Steven Rostedt Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/trace/events/writeback.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 3213b9023794..3046ca6b08ea 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -646,8 +646,8 @@ TRACE_EVENT(balance_dirty_pages, __field(unsigned long, limit) __field(unsigned long, setpoint) __field(unsigned long, dirty) - __field(unsigned long, bdi_setpoint) - __field(unsigned long, bdi_dirty) + __field(unsigned long, wb_setpoint) + __field(unsigned long, wb_dirty) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned int, dirtied) @@ -667,9 +667,9 @@ TRACE_EVENT(balance_dirty_pages, __entry->setpoint = (global_wb_domain.dirty_limit + freerun) / 2; __entry->dirty = dtc->dirty; - __entry->bdi_setpoint = __entry->setpoint * + __entry->wb_setpoint = __entry->setpoint * dtc->wb_thresh / (dtc->thresh + 1); - __entry->bdi_dirty = dtc->wb_dirty; + __entry->wb_dirty = dtc->wb_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; @@ -685,7 +685,7 @@ TRACE_EVENT(balance_dirty_pages, TP_printk("bdi %s: " "limit=%lu setpoint=%lu dirty=%lu " - "bdi_setpoint=%lu bdi_dirty=%lu " + "wb_setpoint=%lu wb_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", @@ -693,8 +693,8 @@ TRACE_EVENT(balance_dirty_pages, __entry->limit, __entry->setpoint, __entry->dirty, - __entry->bdi_setpoint, - __entry->bdi_dirty, + __entry->wb_setpoint, + __entry->wb_dirty, __entry->dirty_ratelimit, __entry->task_ratelimit, __entry->dirtied, From 6cc4c3aa714bc58ec5d20f3054ca5f23534984d1 Mon Sep 17 00:00:00 2001 From: Tang Yizhou Date: Tue, 4 Mar 2025 19:03:18 +0800 Subject: [PATCH 252/431] writeback: fix calculations in trace_balance_dirty_pages() for cgwb In the commit dcc25ae76eb7 ("writeback: move global_dirty_limit into wb_domain") of the cgroup writeback backpressure propagation patchset, Tejun made some adaptations to trace_balance_dirty_pages() for cgroup writeback. However, this adaptation was incomplete and Tejun missed further adaptation in the subsequent patches. In the cgroup writeback scenario, if sdtc in balance_dirty_pages() is assigned to mdtc, then upon entering trace_balance_dirty_pages(), __entry->limit should be assigned based on the dirty_limit of the corresponding memcg's wb_domain, rather than global_wb_domain. To address this issue and simplify the implementation, introduce a 'limit' field in struct dirty_throttle_control to store the hard_limit value computed in wb_position_ratio() by calling hard_dirty_limit(). This field will then be used in trace_balance_dirty_pages() to assign the value to __entry->limit. Link: https://lkml.kernel.org/r/20250304110318.159567-4-yizhou.tang@shopee.com Fixes: dcc25ae76eb7 ("writeback: move global_dirty_limit into wb_domain") Signed-off-by: Tang Yizhou Acked-by: Tejun Heo Cc: Alexei Starovoitov Cc: Christian Brauner Cc: Jan Kara Cc: "Masami Hiramatsu (Google)" Cc: Matthew Wilcow (Oracle) Cc: Steven Rostedt Signed-off-by: Andrew Morton --- include/linux/writeback.h | 1 + include/trace/events/writeback.h | 5 ++--- mm/page-writeback.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 32095928365c..58bda3347914 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -326,6 +326,7 @@ struct dirty_throttle_control { unsigned long dirty; /* file_dirty + write + nfs */ unsigned long thresh; /* dirty threshold */ unsigned long bg_thresh; /* dirty background threshold */ + unsigned long limit; /* hard dirty limit */ unsigned long wb_dirty; /* per-wb counterparts */ unsigned long wb_thresh; diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 3046ca6b08ea..0ff388131fc9 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -663,9 +663,8 @@ TRACE_EVENT(balance_dirty_pages, unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); - __entry->limit = global_wb_domain.dirty_limit; - __entry->setpoint = (global_wb_domain.dirty_limit + - freerun) / 2; + __entry->limit = dtc->limit; + __entry->setpoint = (dtc->limit + freerun) / 2; __entry->dirty = dtc->dirty; __entry->wb_setpoint = __entry->setpoint * dtc->wb_thresh / (dtc->thresh + 1); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 149f8b815904..18456ddd463b 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1072,7 +1072,7 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc) struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); - unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); + unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long wb_thresh = dtc->wb_thresh; unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ From ab82e57981d0e4cb46d2817e7b65b9d5fdcf3832 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:05 -0800 Subject: [PATCH 253/431] mm/damon/core: introduce damos->ops_filters Patch series "mm/damon: make allow filters after reject filters useful and intuitive". DAMOS filters do allow or reject elements of memory for given DAMOS scheme only if those match the filter criterias. For elements that don't match any DAMOS filter, 'allowing' is the default behavior. This makes allow-filters that don't have any reject-filter after them meaningless sources of overhead. The decision was made to keep the behavior consistent with that before the introduction of allow-filters. This, however, makes usage of DAMOS filters confusing and inefficient. It is more intuitive and still consistent behavior to reject by default unless there is no filter at all or the last filter is a reject filter. Update the filtering logic in the way and update documents to clarify the behavior. Note that this is changing the old behavior. But the old behavior for the problematic filter combination was definitely confusing, inefficient and anyway useless. Also, the behavior has relatively recently introduced. It is difficult to anticipate any user that depends on the behavior. Hence this is not a user-breaking behavior change but an obvious improvement. This patch (of 9): DAMOS filters can be categorized into two groups depending on which layer they are handled, namely core layer and ops layer. The groups are important because the filtering behavior depends on evaluation sequence of filters, and core layer-handled filters are evaluated before operations layer-handled ones. The behavior is clearly documented, but the implementation is bit inefficient and complicated. All filters are maintained in a single list (damos->filters) in mix. Filters evaluation logics in core layer and operations layer iterates all the filters on the list, while skipping filters that should be not handled by the layer of the logic. It is inefficient. Making future extensions having differentiations for filters of different handling layers will also be complicated. Add a new list that will be used for having all operations layer-handled DAMOS filters to DAMOS scheme data structure. Also add the support of its initialization and basic traversal functions. Link: https://lkml.kernel.org/r/20250304211913.53574-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250304211913.53574-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 8 ++++++++ mm/damon/core.c | 1 + 2 files changed, 9 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index b3e2c793c1f4..7f76e2e99f37 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -448,6 +448,7 @@ struct damos_access_pattern { * @wmarks: Watermarks for automated (in)activation of this scheme. * @target_nid: Destination node if @action is "migrate_{hot,cold}". * @filters: Additional set of &struct damos_filter for &action. + * @ops_filters: ops layer handling &struct damos_filter objects list. * @last_applied: Last @action applied ops-managing entity. * @stat: Statistics of this scheme. * @list: List head for siblings. @@ -508,6 +509,7 @@ struct damos { int target_nid; }; struct list_head filters; + struct list_head ops_filters; void *last_applied; struct damos_stat stat; struct list_head list; @@ -858,6 +860,12 @@ static inline unsigned long damon_sz_region(struct damon_region *r) #define damos_for_each_filter_safe(f, next, scheme) \ list_for_each_entry_safe(f, next, &(scheme)->filters, list) +#define damos_for_each_ops_filter(f, scheme) \ + list_for_each_entry(f, &(scheme)->ops_filters, list) + +#define damos_for_each_ops_filter_safe(f, next, scheme) \ + list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list) + #ifdef CONFIG_DAMON struct damon_region *damon_new_region(unsigned long start, unsigned long end); diff --git a/mm/damon/core.c b/mm/damon/core.c index 9d37d3664030..5415b7603d01 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -375,6 +375,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, scheme->next_apply_sis = 0; scheme->walk_completed = false; INIT_LIST_HEAD(&scheme->filters); + INIT_LIST_HEAD(&scheme->ops_filters); scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); From ac7b094bf4d6bd34cea84d1f97f4fe5c45984b6a Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:06 -0800 Subject: [PATCH 254/431] mm/damon/paddr: support ops_filters DAMON keeps all DAMOS filters in damos->filters. Upcoming changes will make it to use damos->ops_filters for all operations layer handled DAMOS filters, though. DAMON physical address space operations set implementation (paddr) is not ready for the changes, since it handles only damos->filters. To avoid any breakage during the upcoming changes, make paddr to handle both lists. After the change is made, ->filters support on paddr can be safely removed. Link: https://lkml.kernel.org/r/20250304211913.53574-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index d5db313ca717..2b1ea568a431 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -260,6 +260,10 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) if (damos_pa_filter_match(filter, folio)) return !filter->allow; } + damos_for_each_ops_filter(filter, scheme) { + if (damos_pa_filter_match(filter, folio)) + return !filter->allow; + } return false; } @@ -290,6 +294,12 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, break; } } + damos_for_each_ops_filter(filter, s) { + if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { + install_young_filter = false; + break; + } + } if (install_young_filter) { filter = damos_new_filter( DAMOS_FILTER_TYPE_YOUNG, true, false); @@ -538,6 +548,8 @@ static bool damon_pa_scheme_has_filter(struct damos *s) damos_for_each_filter(f, s) return true; + damos_for_each_ops_filter(f, s) + return true; return false; } From 3607cc590f183179dd804faac27ee7284f6b6bf8 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:07 -0800 Subject: [PATCH 255/431] mm/damon/core: support committing ops_filters DAMON kernel API callers should use damon_commit_ctx() to install DAMON parameters including DAMOS filters. But damos_commit_ops_filters(), which is called by damon_commit_ctx() for filters installing, is not handling damos->ops_filters. Hence, no DAMON kernel API caller can use damos->ops_filters. Do the committing of the ops_filters to make it usable. Link: https://lkml.kernel.org/r/20250304211913.53574-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/core.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 5415b7603d01..1daccccb5d67 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -820,7 +820,7 @@ static void damos_commit_filter( damos_commit_filter_arg(dst, src); } -static int damos_commit_filters(struct damos *dst, struct damos *src) +static int damos_commit_core_filters(struct damos *dst, struct damos *src) { struct damos_filter *dst_filter, *next, *src_filter, *new_filter; int i = 0, j = 0; @@ -848,6 +848,44 @@ static int damos_commit_filters(struct damos *dst, struct damos *src) return 0; } +static int damos_commit_ops_filters(struct damos *dst, struct damos *src) +{ + struct damos_filter *dst_filter, *next, *src_filter, *new_filter; + int i = 0, j = 0; + + damos_for_each_ops_filter_safe(dst_filter, next, dst) { + src_filter = damos_nth_filter(i++, src); + if (src_filter) + damos_commit_filter(dst_filter, src_filter); + else + damos_destroy_filter(dst_filter); + } + + damos_for_each_ops_filter_safe(src_filter, next, src) { + if (j++ < i) + continue; + + new_filter = damos_new_filter( + src_filter->type, src_filter->matching, + src_filter->allow); + if (!new_filter) + return -ENOMEM; + damos_commit_filter_arg(new_filter, src_filter); + damos_add_filter(dst, new_filter); + } + return 0; +} + +static int damos_commit_filters(struct damos *dst, struct damos *src) +{ + int err; + + err = damos_commit_core_filters(dst, src); + if (err) + return err; + return damos_commit_ops_filters(dst, src); +} + static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) { struct damos *s; From 2a689e4e83bdc90cd00ca21aa28d337d202f4950 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:08 -0800 Subject: [PATCH 256/431] mm/damon/core: put ops-handled filters to damos->ops_filters damos->ops_filters has introduced to be used for all operations layer handled filters. But DAMON kernel API callers can put any type of DAMOS filters to any of damos->filters and damos->ops_filters. DAMON user-space ABI users have no way to use ->ops_filters at all. Update damos_add_filter(), which should be used by API callers to install DAMOS filters, to add filters to ->filters and ->ops_filters depending on their handling layer. The change forces both API callers and ABI users to use proper lists since ABI users use the API internally. Link: https://lkml.kernel.org/r/20250304211913.53574-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/core.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 1daccccb5d67..3fbc31d17239 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -281,9 +281,24 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return filter; } +static bool damos_filter_for_ops(enum damos_filter_type type) +{ + switch (type) { + case DAMOS_FILTER_TYPE_ADDR: + case DAMOS_FILTER_TYPE_TARGET: + return false; + default: + break; + } + return true; +} + void damos_add_filter(struct damos *s, struct damos_filter *f) { - list_add_tail(&f->list, &s->filters); + if (damos_filter_for_ops(f->type)) + list_add_tail(&f->list, &s->ops_filters); + else + list_add_tail(&f->list, &s->filters); } static void damos_del_filter(struct damos_filter *f) From 627983a55221d429db4fe9ecb75c4ef2f04acd15 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:09 -0800 Subject: [PATCH 257/431] mm/damon/paddr: support only damos->ops_filters DAMON physical address space operation set implementation (paddr) started handling both damos->filters and damos->ops_filters to avoid breakage during the change for the ->ops_filters setup. Now the change is done, so paddr's support of ->filters is only a waste that can safely be dropped. Remove it. Link: https://lkml.kernel.org/r/20250304211913.53574-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 2b1ea568a431..dded659bb110 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -256,10 +256,6 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) if (scheme->core_filters_allowed) return false; - damos_for_each_filter(filter, scheme) { - if (damos_pa_filter_match(filter, folio)) - return !filter->allow; - } damos_for_each_ops_filter(filter, scheme) { if (damos_pa_filter_match(filter, folio)) return !filter->allow; @@ -288,12 +284,6 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, struct folio *folio; /* check access in page level again by default */ - damos_for_each_filter(filter, s) { - if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { - install_young_filter = false; - break; - } - } damos_for_each_ops_filter(filter, s) { if (filter->type == DAMOS_FILTER_TYPE_YOUNG) { install_young_filter = false; @@ -546,8 +536,6 @@ static bool damon_pa_scheme_has_filter(struct damos *s) { struct damos_filter *f; - damos_for_each_filter(f, s) - return true; damos_for_each_ops_filter(f, s) return true; return false; From dd038b728c8a2a0e1a632b767a50f09f076dab79 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:10 -0800 Subject: [PATCH 258/431] mm/damon: add default allow/reject behavior fields to struct damos Current default allow/reject behavior of filters handling stage has made before introduction of the allow behavior. For allow-filters usage, it is confusing and inefficient. It is more intuitive to decide the default filtering stage allow/reject behavior as opposite to the last filter's behavior. The decision should be made separately for core and operations layers' filtering stages, since last core layer-handled filter is not really a last filter if there are operations layer handling filters. Keeping separate decisions for the two categories can make the logic simpler. Add fields for storing the two decisions. Link: https://lkml.kernel.org/r/20250304211913.53574-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 7f76e2e99f37..52559475dbe7 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -502,6 +502,9 @@ struct damos { * layer-handled filters. If true, operations layer allows it, too. */ bool core_filters_allowed; + /* whether to reject core/ops filters umatched regions */ + bool core_filters_default_reject; + bool ops_filters_default_reject; /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; From 961df88e4688bf94cfa49d644e49b74d34806d3d Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:11 -0800 Subject: [PATCH 259/431] mm/damon/core: set damos_filter default allowance behavior based on installed filters Decide whether to allow or reject by default on core and opertions layer handled filters evaluation stages. It is decided as the opposite of the last installed filter's behavior. If there is no filter at all, allow by default. If there is any operations layer handled filters, core layer's filtering stage sets allowing as the default behavior regardless of the last filter of core layer-handling ones, since the last filter of core layer handled filters in the case is not really the last filter of the entire filtering stage. Also, make the core layer's DAMOS filters handling stage uses the newly set behavior field. [sj@kernel.org: setup damos->{core,ops}_filters_default_reject for initial start] Link: https://lkml.kernel.org/r/20250315222610.35245-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250304211913.53574-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/core.c | 41 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 3fbc31d17239..511c464adcc5 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -518,7 +518,7 @@ struct damon_ctx *damon_new_ctx(void) ctx->attrs.ops_update_interval = 60 * 1000 * 1000; ctx->passed_sample_intervals = 0; - /* These will be set from kdamond_init_intervals_sis() */ + /* These will be set from kdamond_init_ctx() */ ctx->next_aggregation_sis = 0; ctx->next_ops_update_sis = 0; @@ -891,6 +891,32 @@ static int damos_commit_ops_filters(struct damos *dst, struct damos *src) return 0; } +/** + * damos_filters_default_reject() - decide whether to reject memory that didn't + * match with any given filter. + * @filters: Given DAMOS filters of a group. + */ +static bool damos_filters_default_reject(struct list_head *filters) +{ + struct damos_filter *last_filter; + + if (list_empty(filters)) + return false; + last_filter = list_last_entry(filters, struct damos_filter, list); + return last_filter->allow; +} + +static void damos_set_filters_default_reject(struct damos *s) +{ + if (!list_empty(&s->ops_filters)) + s->core_filters_default_reject = false; + else + s->core_filters_default_reject = + damos_filters_default_reject(&s->filters); + s->ops_filters_default_reject = + damos_filters_default_reject(&s->ops_filters); +} + static int damos_commit_filters(struct damos *dst, struct damos *src) { int err; @@ -898,7 +924,11 @@ static int damos_commit_filters(struct damos *dst, struct damos *src) err = damos_commit_core_filters(dst, src); if (err) return err; - return damos_commit_ops_filters(dst, src); + err = damos_commit_ops_filters(dst, src); + if (err) + return err; + damos_set_filters_default_reject(dst); + return 0; } static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) @@ -1580,7 +1610,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, return !filter->allow; } } - return false; + return s->core_filters_default_reject; } /* @@ -2315,7 +2345,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) return -EBUSY; } -static void kdamond_init_intervals_sis(struct damon_ctx *ctx) +static void kdamond_init_ctx(struct damon_ctx *ctx) { unsigned long sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; @@ -2333,6 +2363,7 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) apply_interval = scheme->apply_interval_us ? scheme->apply_interval_us : ctx->attrs.aggr_interval; scheme->next_apply_sis = apply_interval / sample_interval; + damos_set_filters_default_reject(scheme); } } @@ -2350,7 +2381,7 @@ static int kdamond_fn(void *data) pr_debug("kdamond (%d) starts\n", current->pid); complete(&ctx->kdamond_started); - kdamond_init_intervals_sis(ctx); + kdamond_init_ctx(ctx); if (ctx->ops.init) ctx->ops.init(ctx); From a54c42f6873d0fc9d7667433112e34a732c3b228 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:12 -0800 Subject: [PATCH 260/431] mm/damon/paddr: respect ops_filters_default_reject Use damos->ops_filters_default_reject, which is set based on the installed filters' behaviors, from physical address space DAMON operations set. Link: https://lkml.kernel.org/r/20250304211913.53574-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index dded659bb110..fba8b3c8ba30 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -260,7 +260,7 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) if (damos_pa_filter_match(filter, folio)) return !filter->allow; } - return false; + return scheme->ops_filters_default_reject; } static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) From 9ea705a54badbc3f33daf60c2da989c24c467e77 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Mar 2025 13:19:13 -0800 Subject: [PATCH 261/431] Docs/mm/damon/design: update for changed filter-default behavior Update the design documentation for changed DAMOS filters default allowance behaviors. Link: https://lkml.kernel.org/r/20250304211913.53574-10-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index e6fd3b604e70..aae3a691ee69 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -631,9 +631,10 @@ When multiple filters are installed, the group of filters that handled by the core layer are evaluated first. After that, the group of filters that handled by the operations layer are evaluated. Filters in each of the groups are evaluated in the installed order. If a part of memory is matched to one of the -filter, next filters are ignored. If the memory passes through the filters +filter, next filters are ignored. If the part passes through the filters evaluation stage because it is not matched to any of the filters, applying the -scheme's action to it is allowed, same to the behavior when no filter exists. +scheme's action to it depends on the last filter's allowance type. If the last +filter was for allowing, the part of memory will be rejected, and vice versa. For example, let's assume 1) a filter for allowing anonymous pages and 2) another filter for rejecting young pages are installed in the order. If a page @@ -645,11 +646,6 @@ second reject-filter blocks it. If the page is neither anonymous nor young, the page will pass through the filters evaluation stage since there is no matching filter, and the action will be applied to the page. -Note that the action can equally be applied to memory that either explicitly -filter-allowed or filters evaluation stage passed. It means that installing -allow-filters at the end of the list makes no practical change but only -filters-checking overhead. - Below ``type`` of filters are currently supported. - Core layer handled From ac55b38fe2f9b486031439c5c4ed7fce07d0d838 Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Wed, 5 Mar 2025 15:17:59 +0800 Subject: [PATCH 262/431] mm/shrinker: fix name consistency issue in shrinker_debugfs_rename() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After calling debugfs_change_name function, the return value should be checked and the old name restored. If debugfs_change_name fails, the new name memory should be freed. The effect is that the shrinker->name is not consistent with the name displayed in debugfs. Link: https://lkml.kernel.org/r/20250305071759.661055-1-liuye@kylinos.cn Signed-off-by: Liu Ye Reviewed-by: Muchun Song Reviewed-by:Qi Zheng Cc: Dave Chinner Cc: Muchun Song Cc: Qi Zheng Signed-off-by: Andrew Morton --- mm/shrinker_debug.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index 794bd433cce0..20eaee3e97f7 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -214,10 +214,14 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) ret = debugfs_change_name(shrinker->debugfs_entry, "%s-%d", shrinker->name, shrinker->debugfs_id); + if (ret) { + shrinker->name = old; + kfree_const(new); + } else { + kfree_const(old); + } mutex_unlock(&shrinker_mutex); - kfree_const(old); - return ret; } EXPORT_SYMBOL(shrinker_debugfs_rename); From 9bbe033c75a56d72fc35e7c8ca6f3258d9782fa5 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 5 Mar 2025 06:11:29 +0000 Subject: [PATCH 263/431] mm: zpool: add interfaces for object read/write APIs Patch series "Switch zswap to object read/write APIs". This patch series updates zswap to use the new object read/write APIs defined by zsmalloc in [1], and remove the old object mapping APIs and the related code from zpool and zsmalloc. This patch (of 5): Zsmalloc introduced new APIs to read/write objects besides mapping them. Add the necessary zpool interfaces. Link: https://lkml.kernel.org/r/20250305061134.4105762-1-yosry.ahmed@linux.dev Link: https://lkml.kernel.org/r/20250305061134.4105762-2-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Reviewed-by: Sergey Senozhatsky Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Chengming Zhou Cc: Herbert Xu Cc: Minchan Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/zpool.h | 17 +++++++++++++++ mm/zpool.c | 48 +++++++++++++++++++++++++++++++++++++++++++ mm/zsmalloc.c | 21 +++++++++++++++++++ 3 files changed, 86 insertions(+) diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 5e6dc46b8cc4..1784e735ee04 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -52,6 +52,16 @@ void *zpool_map_handle(struct zpool *pool, unsigned long handle, void zpool_unmap_handle(struct zpool *pool, unsigned long handle); + +void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle, + void *local_copy); + +void zpool_obj_read_end(struct zpool *zpool, unsigned long handle, + void *handle_mem); + +void zpool_obj_write(struct zpool *zpool, unsigned long handle, + void *handle_mem, size_t mem_len); + u64 zpool_get_total_pages(struct zpool *pool); @@ -90,6 +100,13 @@ struct zpool_driver { enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); + void *(*obj_read_begin)(void *pool, unsigned long handle, + void *local_copy); + void (*obj_read_end)(void *pool, unsigned long handle, + void *handle_mem); + void (*obj_write)(void *pool, unsigned long handle, + void *handle_mem, size_t mem_len); + u64 (*total_pages)(void *pool); }; diff --git a/mm/zpool.c b/mm/zpool.c index 4bbd12d4b659..378c2d1e5638 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -320,6 +320,54 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) zpool->driver->unmap(zpool->pool, handle); } +/** + * zpool_obj_read_begin() - Start reading from a previously allocated handle. + * @zpool: The zpool that the handle was allocated from + * @handle: The handle to read from + * @local_copy: A local buffer to use if needed. + * + * This starts a read operation of a previously allocated handle. The passed + * @local_copy buffer may be used if needed by copying the memory into. + * zpool_obj_read_end() MUST be called after the read is completed to undo any + * actions taken (e.g. release locks). + * + * Returns: A pointer to the handle memory to be read, if @local_copy is used, + * the returned pointer is @local_copy. + */ +void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle, + void *local_copy) +{ + return zpool->driver->obj_read_begin(zpool->pool, handle, local_copy); +} + +/** + * zpool_obj_read_end() - Finish reading from a previously allocated handle. + * @zpool: The zpool that the handle was allocated from + * @handle: The handle to read from + * @handle_mem: The pointer returned by zpool_obj_read_begin() + * + * Finishes a read operation previously started by zpool_obj_read_begin(). + */ +void zpool_obj_read_end(struct zpool *zpool, unsigned long handle, + void *handle_mem) +{ + zpool->driver->obj_read_end(zpool->pool, handle, handle_mem); +} + +/** + * zpool_obj_write() - Write to a previously allocated handle. + * @zpool: The zpool that the handle was allocated from + * @handle: The handle to read from + * @handle_mem: The memory to copy from into the handle. + * @mem_len: The length of memory to be written. + * + */ +void zpool_obj_write(struct zpool *zpool, unsigned long handle, + void *handle_mem, size_t mem_len) +{ + zpool->driver->obj_write(zpool->pool, handle, handle_mem, mem_len); +} + /** * zpool_get_total_pages() - The total size of the pool * @zpool: The zpool to check diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 63c99db71dc1..d84b300db64e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -507,6 +507,24 @@ static void zs_zpool_unmap(void *pool, unsigned long handle) zs_unmap_object(pool, handle); } +static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle, + void *local_copy) +{ + return zs_obj_read_begin(pool, handle, local_copy); +} + +static void zs_zpool_obj_read_end(void *pool, unsigned long handle, + void *handle_mem) +{ + zs_obj_read_end(pool, handle, handle_mem); +} + +static void zs_zpool_obj_write(void *pool, unsigned long handle, + void *handle_mem, size_t mem_len) +{ + zs_obj_write(pool, handle, handle_mem, mem_len); +} + static u64 zs_zpool_total_pages(void *pool) { return zs_get_total_pages(pool); @@ -522,6 +540,9 @@ static struct zpool_driver zs_zpool_driver = { .free = zs_zpool_free, .map = zs_zpool_map, .unmap = zs_zpool_unmap, + .obj_read_begin = zs_zpool_obj_read_begin, + .obj_read_end = zs_zpool_obj_read_end, + .obj_write = zs_zpool_obj_write, .total_pages = zs_zpool_total_pages, }; From 7d4c9629b74ff7ad3b58e57324e235d710e55c21 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 5 Mar 2025 06:11:30 +0000 Subject: [PATCH 264/431] mm: zswap: use object read/write APIs instead of object mapping APIs Use the new object read/write APIs instead of mapping APIs. On compress side, zpool_obj_write() is more concise and provides exactly what zswap needs to write the compressed object to the zpool, instead of map->copy->unmap. On the decompress side, zpool_obj_read_begin() is sleepable, which allows avoiding the memcpy() for zsmalloc and slightly simplifying the code by: - Avoiding checking if the zpool driver is sleepable, reducing special cases and shrinking the huge comment. - Having a single zpool_obj_read_end() call rather than multiple conditional zpool_unmap_handle() calls. The !virt_addr_valid() case can be removed in the future if the crypto API supports kmap addresses or by using kmap_to_page(), completely eliminating the memcpy() path in zswap_decompress(). This a step toward that. In that spirit, opportunistically make the comment more specific about the kmap case instead of generic non-linear addresses. This is the only case that needs to be handled in practice, and the generic comment makes it seem like a bigger problem that it actually is. Link: https://lkml.kernel.org/r/20250305061134.4105762-3-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Chengming Zhou Cc: Herbert Xu Cc: Minchan Kim Cc: Peter Zijlstra Cc: Sergey Senozhatsky Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- mm/zswap.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index 8a1ded8fa973..7de54f105d04 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -930,7 +930,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, unsigned int dlen = PAGE_SIZE; unsigned long handle; struct zpool *zpool; - char *buf; gfp_t gfp; u8 *dst; @@ -972,10 +971,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, if (alloc_ret) goto unlock; - buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); - memcpy(buf, dst, dlen); - zpool_unmap_handle(zpool, handle); - + zpool_obj_write(zpool, handle, dst, dlen); entry->handle = handle; entry->length = dlen; @@ -996,24 +992,22 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) struct zpool *zpool = entry->pool->zpool; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src; + u8 *src, *obj; acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool); - src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); + obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer); + /* - * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer - * to do crypto_acomp_decompress() which might sleep. In such cases, we must - * resort to copying the buffer to a temporary one. - * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer, - * such as a kmap address of high memory or even ever a vmap address. - * However, sg_init_one is only equipped to handle linearly mapped low memory. - * In such cases, we also must copy the buffer to a temporary and lowmem one. + * zpool_obj_read_begin() might return a kmap address of highmem when + * acomp_ctx->buffer is not used. However, sg_init_one() does not + * handle highmem addresses, so copy the object to acomp_ctx->buffer. */ - if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) || - !virt_addr_valid(src)) { - memcpy(acomp_ctx->buffer, src, entry->length); + if (virt_addr_valid(obj)) { + src = obj; + } else { + WARN_ON_ONCE(obj == acomp_ctx->buffer); + memcpy(acomp_ctx->buffer, obj, entry->length); src = acomp_ctx->buffer; - zpool_unmap_handle(zpool, entry->handle); } sg_init_one(&input, src, entry->length); @@ -1023,8 +1017,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); - if (src != acomp_ctx->buffer) - zpool_unmap_handle(zpool, entry->handle); + zpool_obj_read_end(zpool, entry->handle, obj); acomp_ctx_put_unlock(acomp_ctx); } From fcbea574754c63f7035d0c4ef7dfb161b60b5bde Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 5 Mar 2025 06:11:31 +0000 Subject: [PATCH 265/431] mm: zpool: remove object mapping APIs zpool_map_handle(), zpool_unmap_handle(), and zpool_can_sleep_mapped() are no longer used. Remove them with the underlying driver callbacks. Link: https://lkml.kernel.org/r/20250305061134.4105762-4-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Reviewed-by: Sergey Senozhatsky Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Chengming Zhou Cc: Herbert Xu Cc: Minchan Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/zpool.h | 30 --------------------- mm/zpool.c | 61 ------------------------------------------- mm/zsmalloc.c | 27 ------------------- 3 files changed, 118 deletions(-) diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 1784e735ee04..2c8a9d2654f6 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -13,25 +13,6 @@ struct zpool; -/* - * Control how a handle is mapped. It will be ignored if the - * implementation does not support it. Its use is optional. - * Note that this does not refer to memory protection, it - * refers to how the memory will be copied in/out if copying - * is necessary during mapping; read-write is the safest as - * it copies the existing memory in on map, and copies the - * changed memory back out on unmap. Write-only does not copy - * in the memory and should only be used for initialization. - * If in doubt, use ZPOOL_MM_DEFAULT which is read-write. - */ -enum zpool_mapmode { - ZPOOL_MM_RW, /* normal read-write mapping */ - ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */ - ZPOOL_MM_WO, /* write-only (no copy-in at map time) */ - - ZPOOL_MM_DEFAULT = ZPOOL_MM_RW -}; - bool zpool_has_pool(char *type); struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp); @@ -47,12 +28,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, void zpool_free(struct zpool *pool, unsigned long handle); -void *zpool_map_handle(struct zpool *pool, unsigned long handle, - enum zpool_mapmode mm); - -void zpool_unmap_handle(struct zpool *pool, unsigned long handle); - - void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle, void *local_copy); @@ -95,11 +70,6 @@ struct zpool_driver { unsigned long *handle); void (*free)(void *pool, unsigned long handle); - bool sleep_mapped; - void *(*map)(void *pool, unsigned long handle, - enum zpool_mapmode mm); - void (*unmap)(void *pool, unsigned long handle); - void *(*obj_read_begin)(void *pool, unsigned long handle, void *local_copy); void (*obj_read_end)(void *pool, unsigned long handle, diff --git a/mm/zpool.c b/mm/zpool.c index 378c2d1e5638..4fc665b42f5e 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -277,49 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle) zpool->driver->free(zpool->pool, handle); } -/** - * zpool_map_handle() - Map a previously allocated handle into memory - * @zpool: The zpool that the handle was allocated from - * @handle: The handle to map - * @mapmode: How the memory should be mapped - * - * This maps a previously allocated handle into memory. The @mapmode - * param indicates to the implementation how the memory will be - * used, i.e. read-only, write-only, read-write. If the - * implementation does not support it, the memory will be treated - * as read-write. - * - * This may hold locks, disable interrupts, and/or preemption, - * and the zpool_unmap_handle() must be called to undo those - * actions. The code that uses the mapped handle should complete - * its operations on the mapped handle memory quickly and unmap - * as soon as possible. As the implementation may use per-cpu - * data, multiple handles should not be mapped concurrently on - * any cpu. - * - * Returns: A pointer to the handle's mapped memory area. - */ -void *zpool_map_handle(struct zpool *zpool, unsigned long handle, - enum zpool_mapmode mapmode) -{ - return zpool->driver->map(zpool->pool, handle, mapmode); -} - -/** - * zpool_unmap_handle() - Unmap a previously mapped handle - * @zpool: The zpool that the handle was allocated from - * @handle: The handle to unmap - * - * This unmaps a previously mapped handle. Any locks or other - * actions that the implementation took in zpool_map_handle() - * will be undone here. The memory area returned from - * zpool_map_handle() should no longer be used after this. - */ -void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) -{ - zpool->driver->unmap(zpool->pool, handle); -} - /** * zpool_obj_read_begin() - Start reading from a previously allocated handle. * @zpool: The zpool that the handle was allocated from @@ -381,23 +338,5 @@ u64 zpool_get_total_pages(struct zpool *zpool) return zpool->driver->total_pages(zpool->pool); } -/** - * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. - * @zpool: The zpool to test - * - * Some allocators enter non-preemptible context in ->map() callback (e.g. - * disable pagefaults) and exit that context in ->unmap(), which limits what - * we can do with the mapped object. For instance, we cannot wait for - * asynchronous crypto API to decompress such an object or take mutexes - * since those will call into the scheduler. This function tells us whether - * we use such an allocator. - * - * Returns: true if zpool can sleep; false otherwise. - */ -bool zpool_can_sleep_mapped(struct zpool *zpool) -{ - return zpool->driver->sleep_mapped; -} - MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("Common API for compressed memory storage"); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index d84b300db64e..56d6ed5c675b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -482,31 +482,6 @@ static void zs_zpool_free(void *pool, unsigned long handle) zs_free(pool, handle); } -static void *zs_zpool_map(void *pool, unsigned long handle, - enum zpool_mapmode mm) -{ - enum zs_mapmode zs_mm; - - switch (mm) { - case ZPOOL_MM_RO: - zs_mm = ZS_MM_RO; - break; - case ZPOOL_MM_WO: - zs_mm = ZS_MM_WO; - break; - case ZPOOL_MM_RW: - default: - zs_mm = ZS_MM_RW; - break; - } - - return zs_map_object(pool, handle, zs_mm); -} -static void zs_zpool_unmap(void *pool, unsigned long handle) -{ - zs_unmap_object(pool, handle); -} - static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle, void *local_copy) { @@ -538,8 +513,6 @@ static struct zpool_driver zs_zpool_driver = { .malloc_support_movable = true, .malloc = zs_zpool_malloc, .free = zs_zpool_free, - .map = zs_zpool_map, - .unmap = zs_zpool_unmap, .obj_read_begin = zs_zpool_obj_read_begin, .obj_read_end = zs_zpool_obj_read_end, .obj_write = zs_zpool_obj_write, From 07864f1a57fb1f798a7d21f13e4929c9cb52daf7 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 5 Mar 2025 06:11:32 +0000 Subject: [PATCH 266/431] mm: zsmalloc: remove object mapping APIs and per-CPU map areas zs_map_object() and zs_unmap_object() are no longer used, remove them. Since these are the only users of per-CPU mapping_areas, remove them and the associated CPU hotplug callbacks too. [yosry.ahmed@linux.dev: update the docs] Link: https://lkml.kernel.org/r/Z8ier-ZZp8T6MOTH@google.com Link: https://lkml.kernel.org/r/20250305061134.4105762-5-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Acked-by: Sergey Senozhatsky Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Chengming Zhou Cc: Herbert Xu Cc: Minchan Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- Documentation/mm/zsmalloc.rst | 5 +- include/linux/cpuhotplug.h | 1 - include/linux/zsmalloc.h | 21 ---- mm/zsmalloc.c | 226 +--------------------------------- 4 files changed, 3 insertions(+), 250 deletions(-) diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst index 76902835e68e..d2bbecd78e14 100644 --- a/Documentation/mm/zsmalloc.rst +++ b/Documentation/mm/zsmalloc.rst @@ -27,9 +27,8 @@ Instead, it returns an opaque handle (unsigned long) which encodes actual location of the allocated object. The reason for this indirection is that zsmalloc does not keep zspages permanently mapped since that would cause issues on 32-bit systems where the VA region for kernel space mappings -is very small. So, before using the allocating memory, the object has to -be mapped using zs_map_object() to get a usable pointer and subsequently -unmapped using zs_unmap_object(). +is very small. So, using the allocated memory should be done through the +proper handle-based APIs. stat ==== diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6cc5e484547c..1987400000b4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -116,7 +116,6 @@ enum cpuhp_state { CPUHP_NET_IUCV_PREPARE, CPUHP_ARM_BL_PREPARE, CPUHP_TRACE_RB_PREPARE, - CPUHP_MM_ZS_PREPARE, CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 7d70983cf398..c26baf9fb331 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -16,23 +16,6 @@ #include -/* - * zsmalloc mapping modes - * - * NOTE: These only make a difference when a mapped object spans pages. - */ -enum zs_mapmode { - ZS_MM_RW, /* normal read-write mapping */ - ZS_MM_RO, /* read-only (no copy-out at unmap time) */ - ZS_MM_WO /* write-only (no copy-in at map time) */ - /* - * NOTE: ZS_MM_WO should only be used for initializing new - * (uninitialized) allocations. Partial writes to already - * initialized allocations should use ZS_MM_RW to preserve the - * existing data. - */ -}; - struct zs_pool_stats { /* How many pages were migrated (freed) */ atomic_long_t pages_compacted; @@ -48,10 +31,6 @@ void zs_free(struct zs_pool *pool, unsigned long obj); size_t zs_huge_class_size(struct zs_pool *pool); -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm); -void zs_unmap_object(struct zs_pool *pool, unsigned long handle); - unsigned long zs_get_total_pages(struct zs_pool *pool); unsigned long zs_compact(struct zs_pool *pool); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 56d6ed5c675b..cd1c2a8ffef0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -281,13 +281,6 @@ struct zspage { struct zspage_lock zsl; }; -struct mapping_area { - local_lock_t lock; - char *vm_buf; /* copy buffer for objects that span pages */ - char *vm_addr; /* address of kmap_local_page()'ed pages */ - enum zs_mapmode vm_mm; /* mapping mode */ -}; - static void zspage_lock_init(struct zspage *zspage) { static struct lock_class_key __key; @@ -522,11 +515,6 @@ static struct zpool_driver zs_zpool_driver = { MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ -/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ -static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { - .lock = INIT_LOCAL_LOCK(lock), -}; - static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) { return PagePrivate(zpdesc_page(zpdesc)); @@ -1111,93 +1099,6 @@ static struct zspage *find_get_zspage(struct size_class *class) return zspage; } -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm_buf) - return 0; - area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); - if (!area->vm_buf) - return -ENOMEM; - return 0; -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - kfree(area->vm_buf); - area->vm_buf = NULL; -} - -static void *__zs_map_object(struct mapping_area *area, - struct zpdesc *zpdescs[2], int off, int size) -{ - size_t sizes[2]; - char *buf = area->vm_buf; - - /* disable page faults to match kmap_local_page() return conditions */ - pagefault_disable(); - - /* no read fastpath */ - if (area->vm_mm == ZS_MM_WO) - goto out; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy object to per-cpu buffer */ - memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]); - memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]); -out: - return area->vm_buf; -} - -static void __zs_unmap_object(struct mapping_area *area, - struct zpdesc *zpdescs[2], int off, int size) -{ - size_t sizes[2]; - char *buf; - - /* no write fastpath */ - if (area->vm_mm == ZS_MM_RO) - goto out; - - buf = area->vm_buf; - buf = buf + ZS_HANDLE_SIZE; - size -= ZS_HANDLE_SIZE; - off += ZS_HANDLE_SIZE; - - sizes[0] = PAGE_SIZE - off; - sizes[1] = size - sizes[0]; - - /* copy per-cpu buffer to object */ - memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]); - memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]); - -out: - /* enable page faults to match kunmap_local() return conditions */ - pagefault_enable(); -} - -static int zs_cpu_prepare(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - return __zs_cpu_up(area); -} - -static int zs_cpu_dead(unsigned int cpu) -{ - struct mapping_area *area; - - area = &per_cpu(zs_map_area, cpu); - __zs_cpu_down(area); - return 0; -} - static bool can_merge(struct size_class *prev, int pages_per_zspage, int objs_per_zspage) { @@ -1245,117 +1146,6 @@ unsigned long zs_get_total_pages(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_get_total_pages); -/** - * zs_map_object - get address of allocated object from handle. - * @pool: pool from which the object was allocated - * @handle: handle returned from zs_malloc - * @mm: mapping mode to use - * - * Before using an object allocated from zs_malloc, it must be mapped using - * this function. When done with the object, it must be unmapped using - * zs_unmap_object. - * - * Only one object can be mapped per cpu at a time. There is no protection - * against nested mappings. - * - * This function returns with preemption and page faults disabled. - */ -void *zs_map_object(struct zs_pool *pool, unsigned long handle, - enum zs_mapmode mm) -{ - struct zspage *zspage; - struct zpdesc *zpdesc; - unsigned long obj, off; - unsigned int obj_idx; - - struct size_class *class; - struct mapping_area *area; - struct zpdesc *zpdescs[2]; - void *ret; - - /* - * Because we use per-cpu mapping areas shared among the - * pools/users, we can't allow mapping in interrupt context - * because it can corrupt another users mappings. - */ - BUG_ON(in_interrupt()); - - /* It guarantees it can get zspage from handle safely */ - read_lock(&pool->lock); - obj = handle_to_obj(handle); - obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc); - - /* - * migration cannot move any zpages in this zspage. Here, class->lock - * is too heavy since callers would take some time until they calls - * zs_unmap_object API so delegate the locking from class to zspage - * which is smaller granularity. - */ - zspage_read_lock(zspage); - read_unlock(&pool->lock); - - class = zspage_class(pool, zspage); - off = offset_in_page(class->size * obj_idx); - - local_lock(&zs_map_area.lock); - area = this_cpu_ptr(&zs_map_area); - area->vm_mm = mm; - if (off + class->size <= PAGE_SIZE) { - /* this object is contained entirely within a page */ - area->vm_addr = kmap_local_zpdesc(zpdesc); - ret = area->vm_addr + off; - goto out; - } - - /* this object spans two pages */ - zpdescs[0] = zpdesc; - zpdescs[1] = get_next_zpdesc(zpdesc); - BUG_ON(!zpdescs[1]); - - ret = __zs_map_object(area, zpdescs, off, class->size); -out: - if (likely(!ZsHugePage(zspage))) - ret += ZS_HANDLE_SIZE; - - return ret; -} -EXPORT_SYMBOL_GPL(zs_map_object); - -void zs_unmap_object(struct zs_pool *pool, unsigned long handle) -{ - struct zspage *zspage; - struct zpdesc *zpdesc; - unsigned long obj, off; - unsigned int obj_idx; - - struct size_class *class; - struct mapping_area *area; - - obj = handle_to_obj(handle); - obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc); - class = zspage_class(pool, zspage); - off = offset_in_page(class->size * obj_idx); - - area = this_cpu_ptr(&zs_map_area); - if (off + class->size <= PAGE_SIZE) - kunmap_local(area->vm_addr); - else { - struct zpdesc *zpdescs[2]; - - zpdescs[0] = zpdesc; - zpdescs[1] = get_next_zpdesc(zpdesc); - BUG_ON(!zpdescs[1]); - - __zs_unmap_object(area, zpdescs, off, class->size); - } - local_unlock(&zs_map_area.lock); - - zspage_read_unlock(zspage); -} -EXPORT_SYMBOL_GPL(zs_unmap_object); - void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle, void *local_copy) { @@ -1975,7 +1765,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * the class lock protects zpage alloc/free in the zspage. */ spin_lock(&class->lock); - /* the zspage write_lock protects zpage access via zs_map_object */ + /* the zspage write_lock protects zpage access via zs_obj_read/write() */ if (!zspage_write_trylock(zspage)) { spin_unlock(&class->lock); write_unlock(&pool->lock); @@ -2459,23 +2249,11 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool); static int __init zs_init(void) { - int ret; - - ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", - zs_cpu_prepare, zs_cpu_dead); - if (ret) - goto out; - #ifdef CONFIG_ZPOOL zpool_register_driver(&zs_zpool_driver); #endif - zs_stat_init(); - return 0; - -out: - return ret; } static void __exit zs_exit(void) @@ -2483,8 +2261,6 @@ static void __exit zs_exit(void) #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif - cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); - zs_stat_exit(); } From 7b60041156079f256a7df3f7de469de7db618580 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 5 Mar 2025 06:11:33 +0000 Subject: [PATCH 267/431] mm: zpool: remove zpool_malloc_support_movable() zpool_malloc_support_movable() always returns true for zsmalloc, the only remaining zpool driver. Remove it and set the gfp flags in zswap_compress() accordingly. Opportunistically use GFP_NOWAIT instead of __GFP_NOWARN | __GFP_KSWAPD_RECLAIM for conciseness as they are equivalent. Link: https://lkml.kernel.org/r/20250305061134.4105762-6-yosry.ahmed@linux.dev Signed-off-by: Yosry Ahmed Reviewed-by: Sergey Senozhatsky Acked-by: Johannes Weiner Acked-by: Nhat Pham Cc: Thomas Gleixner Cc: Chengming Zhou Cc: Herbert Xu Cc: Minchan Kim Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- include/linux/zpool.h | 3 --- mm/zpool.c | 16 ---------------- mm/zsmalloc.c | 1 - mm/zswap.c | 4 +--- 4 files changed, 1 insertion(+), 23 deletions(-) diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 2c8a9d2654f6..52f30e526607 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -21,8 +21,6 @@ const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); -bool zpool_malloc_support_movable(struct zpool *pool); - int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, unsigned long *handle); @@ -65,7 +63,6 @@ struct zpool_driver { void *(*create)(const char *name, gfp_t gfp); void (*destroy)(void *pool); - bool malloc_support_movable; int (*malloc)(void *pool, size_t size, gfp_t gfp, unsigned long *handle); void (*free)(void *pool, unsigned long handle); diff --git a/mm/zpool.c b/mm/zpool.c index 4fc665b42f5e..6d6d88930932 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -220,22 +220,6 @@ const char *zpool_get_type(struct zpool *zpool) return zpool->driver->type; } -/** - * zpool_malloc_support_movable() - Check if the zpool supports - * allocating movable memory - * @zpool: The zpool to check - * - * This returns if the zpool supports allocating movable memory. - * - * Implementations must guarantee this to be thread-safe. - * - * Returns: true if the zpool supports allocating movable memory, false if not - */ -bool zpool_malloc_support_movable(struct zpool *zpool) -{ - return zpool->driver->malloc_support_movable; -} - /** * zpool_malloc() - Allocate memory * @zpool: The zpool to allocate from. diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index cd1c2a8ffef0..961b270f023c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -503,7 +503,6 @@ static struct zpool_driver zs_zpool_driver = { .owner = THIS_MODULE, .create = zs_zpool_create, .destroy = zs_zpool_destroy, - .malloc_support_movable = true, .malloc = zs_zpool_malloc, .free = zs_zpool_free, .obj_read_begin = zs_zpool_obj_read_begin, diff --git a/mm/zswap.c b/mm/zswap.c index 7de54f105d04..5f0e62289444 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -964,9 +964,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, goto unlock; zpool = pool->zpool; - gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; - if (zpool_malloc_support_movable(zpool)) - gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; + gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE; alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle); if (alloc_ret) goto unlock; From c0d017896b72d7dd251dabf64765196ff9a46a0f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:17 +0800 Subject: [PATCH 268/431] mm: shmem: drop the unused macro Patch series "Some trivial cleanups for shmem". Patch 1 - Patch 5 do some trivial cleanups and refactoring for shmem. Patch 6 adds myself as shmem reviewer. This patch (of 6): Drop the unused 'BLOCKS_PER_PAGE' macro. Link: https://lkml.kernel.org/r/cover.1738918357.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/69264cee1d938442477e657004e4924f8a5c4dd4.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index b276ae233dfa..6d6d5fce2120 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -86,7 +86,6 @@ static struct vfsmount *shm_mnt __ro_after_init; #include "internal.h" -#define BLOCKS_PER_PAGE (PAGE_SIZE/512) #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) /* Pretend that each entry is of this size in directory's i_size */ From 6d26a149f5483acdc8a9a7a8fcf5e737a324a2b6 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:18 +0800 Subject: [PATCH 269/431] mm: shmem: remove 'fadvise()' comments Similar to commit 255ff62d1586 ("docs: tmpfs: drop 'fadvise()' from the documentation"), fadvise() has no HUGEPAGE advise currently. Remove the confusing fadvise() comments. Link: https://lkml.kernel.org/r/fae702b9775f58b55b45be5eaad22d8586d0290a.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 6d6d5fce2120..c63fd18cea50 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -525,9 +525,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, * enables huge pages for the mount; * SHMEM_HUGE_WITHIN_SIZE: * only allocate huge pages if the page will be fully within i_size, - * also respect fadvise()/madvise() hints; + * also respect madvise() hints; * SHMEM_HUGE_ADVISE: - * only allocate huge pages if requested with fadvise()/madvise(); + * only allocate huge pages if requested with madvise(); */ #define SHMEM_HUGE_NEVER 0 From d5e4e147c0f59259cd527d58295ba1bfefbad481 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:19 +0800 Subject: [PATCH 270/431] mm: shmem: remove duplicate error validation Remove duplicate error code checks for 'start' and 'end', as the get_order_from_str() will only return -EINVAL if the cmdline string is configured incorrectly. Link: https://lkml.kernel.org/r/dfadaba4c8b24c5ae1467fe8b6744b654c65ec91.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index c63fd18cea50..51bdeea828a0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5664,19 +5664,19 @@ static int __init setup_thp_shmem(char *str) THP_ORDERS_ALL_FILE_DEFAULT); } - if (start == -EINVAL) { + if (start < 0) { pr_err("invalid size %s in thp_shmem boot parameter\n", start_size); goto err; } - if (end == -EINVAL) { + if (end < 0) { pr_err("invalid size %s in thp_shmem boot parameter\n", end_size); goto err; } - if (start < 0 || end < 0 || start > end) + if (start > end) goto err; nr = end - start + 1; From cd81c424b53fa174df1e18b021806bc978c8fb7f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:20 +0800 Subject: [PATCH 271/431] mm: shmem: change the return value of shmem_find_swap_entries() The shmem_find_swap_entries() originally returned the index corresponding to the swap entry, but no callers used this return value. It should return the number of entries that were found like other functions, which can be used by the callers. No functional changes. Link: https://lkml.kernel.org/r/070489b5946b8379b2a2d25f78115cef167cd145.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 51bdeea828a0..f5a081563022 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1379,9 +1379,9 @@ static void shmem_evict_inode(struct inode *inode) #endif } -static int shmem_find_swap_entries(struct address_space *mapping, - pgoff_t start, struct folio_batch *fbatch, - pgoff_t *indices, unsigned int type) +static unsigned int shmem_find_swap_entries(struct address_space *mapping, + pgoff_t start, struct folio_batch *fbatch, + pgoff_t *indices, unsigned int type) { XA_STATE(xas, &mapping->i_pages, start); struct folio *folio; @@ -1414,7 +1414,7 @@ static int shmem_find_swap_entries(struct address_space *mapping, } rcu_read_unlock(); - return xas.xa_index; + return folio_batch_count(fbatch); } /* @@ -1461,8 +1461,8 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type) do { folio_batch_init(&fbatch); - shmem_find_swap_entries(mapping, start, &fbatch, indices, type); - if (folio_batch_count(&fbatch) == 0) { + if (!shmem_find_swap_entries(mapping, start, &fbatch, + indices, type)) { ret = 0; break; } From 086e66b690ae41c1c8c0ef0366cadeb089dff990 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:21 +0800 Subject: [PATCH 272/431] mm: shmem: factor out the within_size logic into a new helper Factor out the within_size logic into a new helper to remove duplicate code. Link: https://lkml.kernel.org/r/527dea9d7e32fe6b94c7fe00df2c126203017911.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 53 +++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index f5a081563022..8de9b4a07a8a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -590,6 +590,28 @@ shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t w return order > 0 ? BIT(order + 1) - 1 : 0; } +static unsigned int shmem_get_orders_within_size(struct inode *inode, + unsigned long within_size_orders, pgoff_t index, + loff_t write_end) +{ + pgoff_t aligned_index; + unsigned long order; + loff_t i_size; + + order = highest_order(within_size_orders); + while (within_size_orders) { + aligned_index = round_up(index + 1, 1 << order); + i_size = max(write_end, i_size_read(inode)); + i_size = round_up(i_size, PAGE_SIZE); + if (i_size >> PAGE_SHIFT >= aligned_index) + return within_size_orders; + + order = next_order(&within_size_orders, order); + } + + return 0; +} + static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, loff_t write_end, bool shmem_huge_force, struct vm_area_struct *vma, @@ -598,9 +620,6 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 0 : BIT(HPAGE_PMD_ORDER); unsigned long within_size_orders; - unsigned int order; - pgoff_t aligned_index; - loff_t i_size; if (!S_ISREG(inode->i_mode)) return 0; @@ -634,16 +653,11 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index within_size_orders = shmem_mapping_size_orders(inode->i_mapping, index, write_end); - order = highest_order(within_size_orders); - while (within_size_orders) { - aligned_index = round_up(index + 1, 1 << order); - i_size = max(write_end, i_size_read(inode)); - i_size = round_up(i_size, PAGE_SIZE); - if (i_size >> PAGE_SHIFT >= aligned_index) - return within_size_orders; + within_size_orders = shmem_get_orders_within_size(inode, within_size_orders, + index, write_end); + if (within_size_orders > 0) + return within_size_orders; - order = next_order(&within_size_orders, order); - } fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) @@ -1747,10 +1761,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma ? vma->vm_flags : 0; - pgoff_t aligned_index; unsigned int global_orders; - loff_t i_size; - int order; if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) return 0; @@ -1776,17 +1787,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, return READ_ONCE(huge_shmem_orders_inherit); /* Allow mTHP that will be fully within i_size. */ - order = highest_order(within_size_orders); - while (within_size_orders) { - aligned_index = round_up(index + 1, 1 << order); - i_size = round_up(i_size_read(inode), PAGE_SIZE); - if (i_size >> PAGE_SHIFT >= aligned_index) { - mask |= within_size_orders; - break; - } - - order = next_order(&within_size_orders, order); - } + mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0); if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_shmem_orders_madvise); From a91aaf8dd549dcee9caab227ecaa6cbc243bbc5a Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 7 Feb 2025 17:44:22 +0800 Subject: [PATCH 273/431] MAINTAINERS: add Baolin as shmem reviewer In the past year, I've primarily focused on shmem and added several features to it, such as support for mTHP, large folio swap-out and swap-in support, mTHP collapse support, skipping swapcache, and tmpfs support for large folios, and so on. Meanwhile I've also been helping with testing and reviewing shmem related patches. So I am willing to continue assisting with testing and reviewing shmem related patches. Let me be Cc'd on patches related to shmem. Link: https://lkml.kernel.org/r/bcefbba9b2b44d4e661e6cc2c4187292a5beb467.1738918357.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Signed-off-by: Andrew Morton --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index c13201979633..fb408698086e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23952,6 +23952,7 @@ F: drivers/hwmon/tmp513.c TMPFS (SHMEM FILESYSTEM) M: Hugh Dickins +R: Baolin Wang L: linux-mm@kvack.org S: Maintained F: include/linux/shmem_fs.h From 995abaaadd30e2f9e49127694d41403839d3e1bd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 16 Dec 2024 15:53:55 +0000 Subject: [PATCH 274/431] dax: remove access to page->index This looks like a complete mess (why are we setting page->index at page fault time?), but I no longer care about DAX, and there's no reason to let DAX hold us back from removing page->index. Link: https://lkml.kernel.org/r/20241216155408.8102-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jane Chu Reviewed-by: Dan Williams Cc: Alistair Popple Cc: Dave Jiang Cc: Vishal Verma Signed-off-by: Andrew Morton --- drivers/dax/device.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6d74e62bbee0..bc871a34b9cd 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -89,14 +89,13 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, ALIGN_DOWN(vmf->address, fault_size)); for (i = 0; i < nr_pages; i++) { - struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i); + struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i); - page = compound_head(page); - if (page->mapping) + if (folio->mapping) continue; - page->mapping = filp->f_mapping; - page->index = pgoff + i; + folio->mapping = filp->f_mapping; + folio->index = pgoff + i; } } From 37cd93fc61037889461d6437a1e1a5caa730ef38 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 16 Dec 2024 15:53:56 +0000 Subject: [PATCH 275/431] dax: use folios more widely within DAX Convert from pfn to folio instead of page and use those folios throughout to avoid accesses to page->index and page->mapping. Link: https://lkml.kernel.org/r/20241216155408.8102-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jane Chu Cc: Dan Willaims Cc: Dave Jiang Cc: Vishal Verma Cc: Alistair Popple Signed-off-by: Andrew Morton --- fs/dax.c | 53 +++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 21b47402b3dc..972febc6fb9d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -320,38 +320,39 @@ static unsigned long dax_end_pfn(void *entry) for (pfn = dax_to_pfn(entry); \ pfn < dax_end_pfn(entry); pfn++) -static inline bool dax_page_is_shared(struct page *page) +static inline bool dax_folio_is_shared(struct folio *folio) { - return page->mapping == PAGE_MAPPING_DAX_SHARED; + return folio->mapping == PAGE_MAPPING_DAX_SHARED; } /* - * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the + * Set the folio->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the * refcount. */ -static inline void dax_page_share_get(struct page *page) +static inline void dax_folio_share_get(struct folio *folio) { - if (page->mapping != PAGE_MAPPING_DAX_SHARED) { + if (folio->mapping != PAGE_MAPPING_DAX_SHARED) { /* * Reset the index if the page was already mapped * regularly before. */ - if (page->mapping) - page->share = 1; - page->mapping = PAGE_MAPPING_DAX_SHARED; + if (folio->mapping) + folio->page.share = 1; + folio->mapping = PAGE_MAPPING_DAX_SHARED; } - page->share++; + folio->page.share++; } -static inline unsigned long dax_page_share_put(struct page *page) +static inline unsigned long dax_folio_share_put(struct folio *folio) { - return --page->share; + return --folio->page.share; } /* - * When it is called in dax_insert_entry(), the shared flag will indicate that - * whether this entry is shared by multiple files. If so, set the page->mapping - * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. + * When it is called in dax_insert_entry(), the shared flag will indicate + * that whether this entry is shared by multiple files. If so, set + * the folio->mapping PAGE_MAPPING_DAX_SHARED, and use page->share + * as refcount. */ static void dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) @@ -364,14 +365,14 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, index = linear_page_index(vma, address & ~(size - 1)); for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + struct folio *folio = pfn_folio(pfn); if (shared) { - dax_page_share_get(page); + dax_folio_share_get(folio); } else { - WARN_ON_ONCE(page->mapping); - page->mapping = mapping; - page->index = index + i++; + WARN_ON_ONCE(folio->mapping); + folio->mapping = mapping; + folio->index = index + i++; } } } @@ -385,17 +386,17 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, return; for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + struct folio *folio = pfn_folio(pfn); - WARN_ON_ONCE(trunc && page_ref_count(page) > 1); - if (dax_page_is_shared(page)) { + WARN_ON_ONCE(trunc && folio_ref_count(folio) > 1); + if (dax_folio_is_shared(folio)) { /* keep the shared flag if this page is still shared */ - if (dax_page_share_put(page) > 0) + if (dax_folio_share_put(folio) > 0) continue; } else - WARN_ON_ONCE(page->mapping && page->mapping != mapping); - page->mapping = NULL; - page->index = 0; + WARN_ON_ONCE(folio->mapping && folio->mapping != mapping); + folio->mapping = NULL; + folio->index = 0; } } From 7851bf649d423edd7286b292739f2eefded3d35c Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:30:56 +1100 Subject: [PATCH 276/431] fuse: fix dax truncate/punch_hole fault path Patch series "fs/dax: Fix ZONE_DEVICE page reference counts", v9. Device and FS DAX pages have always maintained their own page reference counts without following the normal rules for page reference counting. In particular pages are considered free when the refcount hits one rather than zero and refcounts are not added when mapping the page. Tracking this requires special PTE bits (PTE_DEVMAP) and a secondary mechanism for allowing GUP to hold references on the page (see get_dev_pagemap). However there doesn't seem to be any reason why FS DAX pages need their own reference counting scheme. By treating the refcounts on these pages the same way as normal pages we can remove a lot of special checks. In particular pXd_trans_huge() becomes the same as pXd_leaf(), although I haven't made that change here. It also frees up a valuable SW define PTE bit on architectures that have devmap PTE bits defined. It also almost certainly allows further clean-up of the devmap managed functions, but I have left that as a future improvment. It also enables support for compound ZONE_DEVICE pages which is one of my primary motivators for doing this work. This patch (of 20): FS DAX requires file systems to call into the DAX layout prior to unlinking inodes to ensure there is no ongoing DMA or other remote access to the direct mapped page. The fuse file system implements fuse_dax_break_layouts() to do this which includes a comment indicating that passing dmap_end == 0 leads to unmapping of the whole file. However this is not true - passing dmap_end == 0 will not unmap anything before dmap_start, and further more dax_layout_busy_page_range() will not scan any of the range to see if there maybe ongoing DMA access to the range. Fix this by passing -1 for dmap_end to fuse_dax_break_layouts() which will invalidate the entire file range to dax_layout_busy_page_range(). Link: https://lkml.kernel.org/r/cover.8068ad144a7eea4a813670301f4d2a86a8e68ec4.1740713401.git-series.apopple@nvidia.com Link: https://lkml.kernel.org/r/f09a34b6c40032022e4ddee6fadb7cc676f08867.1740713401.git-series.apopple@nvidia.com Fixes: 6ae330cad6ef ("virtiofs: serialize truncate/punch_hole and dax fault path") Signed-off-by: Alistair Popple Co-developed-by: Dan Williams Signed-off-by: Dan Williams Reviewed-by: Balbir Singh Tested-by: Alison Schofield Cc: Vivek Goyal Cc: Alexander Gordeev Cc: Asahi Lina Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/fuse/dax.c | 1 - fs/fuse/dir.c | 2 +- fs/fuse/file.c | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 0b6ee6dd1fd6..b7f805d2a14f 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -682,7 +682,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, 0, 0, fuse_wait_dax_page(inode)); } -/* dmap_end == 0 leads to unmapping of whole file */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 3805f9b06c9d..3b031d24d369 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1940,7 +1940,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d63e56fd3dd2..754378dd9f71 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file) if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3205,7 +3205,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } From cee91fa13a8e95f49d0ee6be020e68a71a209117 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:30:57 +1100 Subject: [PATCH 277/431] fs/dax: return unmapped busy pages from dax_layout_busy_page_range() dax_layout_busy_page_range() is used by file systems to scan the DAX page-cache to unmap mapping pages from user-space and to determine if any pages in the given range are busy, either due to ongoing DMA or other get_user_pages() usage. Currently it checks to see the file mapping is mapped into user-space with mapping_mapped() and returns early if not, skipping the check for DMA busy pages. This is wrong as pages may still be undergoing DMA access even if they have subsequently been unmapped from user-space. Fix this by dropping the check for mapping_mapped(). Link: https://lkml.kernel.org/r/d85ce6c2d1400ff111ed7302d9eef223d0243c57.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Suggested-by: Dan Williams Reviewed-by: Dan Williams Reviewed-by: Balbir Singh Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/dax.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dax.c b/fs/dax.c index 972febc6fb9d..b35f538c4330 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -691,7 +691,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return NULL; - if (!dax_mapping(mapping) || !mapping_mapped(mapping)) + if (!dax_mapping(mapping)) return NULL; /* If end == LLONG_MAX, all pages from start to till end of file */ From 6be3e21d25ca2dbb7ca4f3f7db808a3e1a944bd1 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:30:58 +1100 Subject: [PATCH 278/431] fs/dax: don't skip locked entries when scanning entries Several functions internal to FS DAX use the following pattern when trying to obtain an unlocked entry: xas_for_each(&xas, entry, end_idx) { if (dax_is_locked(entry)) entry = get_unlocked_entry(&xas, 0); This is problematic because get_unlocked_entry() will get the next present entry in the range, and the next entry may not be locked. Therefore any processing of the original locked entry will be skipped. This can cause dax_layout_busy_page_range() to miss DMA-busy pages in the range, leading file systems to free blocks whilst DMA operations are ongoing which can lead to file system corruption. Instead callers from within a xas_for_each() loop should be waiting for the current entry to be unlocked without advancing the XArray state so a new function is introduced to wait. Also while we are here rename get_unlocked_entry() to get_next_unlocked_entry() to make it clear that it may advance the iterator state. Link: https://lkml.kernel.org/r/b11b2baed7157dc900bf07a4571bf71b7cd82d97.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/dax.c | 50 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 9 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index b35f538c4330..f5fdb43f5de3 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -206,7 +206,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, * * Must be called with the i_pages lock held. */ -static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) +static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) { void *entry; struct wait_exceptional_entry_queue ewait; @@ -235,6 +235,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) } } +/* + * Wait for the given entry to become unlocked. Caller must hold the i_pages + * lock and call either put_unlocked_entry() if it did not lock the entry or + * dax_unlock_entry() if it did. Returns an unlocked entry if still present. + */ +static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) +{ + struct wait_exceptional_entry_queue ewait; + wait_queue_head_t *wq; + + init_wait(&ewait.wait); + ewait.wait.func = wake_exceptional_entry_func; + + while (unlikely(dax_is_locked(entry))) { + wq = dax_entry_waitqueue(xas, entry, &ewait.key); + prepare_to_wait_exclusive(wq, &ewait.wait, + TASK_UNINTERRUPTIBLE); + xas_pause(xas); + xas_unlock_irq(xas); + schedule(); + finish_wait(wq, &ewait.wait); + xas_lock_irq(xas); + entry = xas_load(xas); + } + + if (xa_is_internal(entry)) + return NULL; + + return entry; +} + /* * The only thing keeping the address space around is the i_pages lock * (it's cycled in clear_inode() after removing the entries from i_pages) @@ -250,7 +281,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) wq = dax_entry_waitqueue(xas, entry, &ewait.key); /* - * Unlike get_unlocked_entry() there is no guarantee that this + * Unlike get_next_unlocked_entry() there is no guarantee that this * path ever successfully retrieves an unlocked entry before an * inode dies. Perform a non-exclusive wait in case this path * never successfully performs its own wake up. @@ -581,7 +612,7 @@ static void *grab_mapping_entry(struct xa_state *xas, retry: pmd_downgrade = false; xas_lock_irq(xas); - entry = get_unlocked_entry(xas, order); + entry = get_next_unlocked_entry(xas, order); if (entry) { if (dax_is_conflict(entry)) @@ -717,8 +748,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, xas_for_each(&xas, entry, end_idx) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; - if (unlikely(dax_is_locked(entry))) - entry = get_unlocked_entry(&xas, 0); + entry = wait_entry_unlocked_exclusive(&xas, entry); if (entry) page = dax_busy_page(entry); put_unlocked_entry(&xas, entry, WAKE_NEXT); @@ -751,7 +781,7 @@ static int __dax_invalidate_entry(struct address_space *mapping, void *entry; xas_lock_irq(&xas); - entry = get_unlocked_entry(&xas, 0); + entry = get_next_unlocked_entry(&xas, 0); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && @@ -777,7 +807,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping, xas_lock_irq(&xas); xas_for_each(&xas, entry, end) { - entry = get_unlocked_entry(&xas, 0); + entry = wait_entry_unlocked_exclusive(&xas, entry); + if (!entry) + continue; xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); put_unlocked_entry(&xas, entry, WAKE_NEXT); @@ -941,7 +973,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, if (unlikely(dax_is_locked(entry))) { void *old_entry = entry; - entry = get_unlocked_entry(xas, 0); + entry = get_next_unlocked_entry(xas, 0); /* Entry got punched out / reallocated? */ if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) @@ -1950,7 +1982,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) vm_fault_t ret; xas_lock_irq(&xas); - entry = get_unlocked_entry(&xas, order); + entry = get_next_unlocked_entry(&xas, order); /* Did we race with someone splitting entry or so? */ if (!entry || dax_is_conflict(entry) || (order == 0 && !dax_is_pte_entry(entry))) { From e6fa3963a30d53427d33a577c5ba4bfd3373bc93 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:30:59 +1100 Subject: [PATCH 279/431] fs/dax: refactor wait for dax idle page A FS DAX page is considered idle when its refcount drops to one. This is currently open-coded in all file systems supporting FS DAX. Move the idle detection to a common function to make future changes easier. Link: https://lkml.kernel.org/r/c2c9d269110b90224eeb1dc661ffbc1d82aa20c9.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Reviewed-by: Dan Williams Acked-by: Theodore Ts'o Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/ext4/inode.c | 5 +---- fs/fuse/dax.c | 4 +--- fs/xfs/xfs_inode.c | 4 +--- include/linux/dax.h | 8 ++++++++ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7c54ae5fcbd4..cc1acb1fdec6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3922,10 +3922,7 @@ int ext4_break_layouts(struct inode *inode) if (!page) return 0; - error = ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, - TASK_INTERRUPTIBLE, 0, 0, - ext4_wait_dax_page(inode)); + error = dax_wait_page_idle(page, ext4_wait_dax_page, inode); } while (error == 0); return error; diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index b7f805d2a14f..bf6faa3536a4 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -677,9 +677,7 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, return 0; *retry = true; - return ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, - 0, 0, fuse_wait_dax_page(inode)); + return dax_wait_page_idle(page, fuse_wait_dax_page, inode); } int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b1f9f156ec88..1b5613dfed7f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3020,9 +3020,7 @@ xfs_break_dax_layouts( return 0; *retry = true; - return ___wait_var_event(&page->_refcount, - atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, - 0, 0, xfs_wait_dax_page(inode)); + return dax_wait_page_idle(page, xfs_wait_dax_page, inode); } int diff --git a/include/linux/dax.h b/include/linux/dax.h index df41a0017b31..9b1ce984d410 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -207,6 +207,14 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); +static inline int dax_wait_page_idle(struct page *page, + void (cb)(struct inode *), + struct inode *inode) +{ + return ___wait_var_event(page, page_ref_count(page) == 1, + TASK_INTERRUPTIBLE, 0, 0, cb(inode)); +} + #if IS_ENABLED(CONFIG_DAX) int dax_read_lock(void); void dax_read_unlock(int id); From d5b3afea22a52517e6bc835432e6dfd079b8bf7c Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:00 +1100 Subject: [PATCH 280/431] fs/dax: create a common implementation to break DAX layouts Prior to freeing a block file systems supporting FS DAX must check that the associated pages are both unmapped from user-space and not undergoing DMA or other access from eg. get_user_pages(). This is achieved by unmapping the file range and scanning the FS DAX page-cache to see if any pages within the mapping have an elevated refcount. This is done using two functions - dax_layout_busy_page_range() which returns a page to wait for the refcount to become idle on. Rather than open-code this introduce a common implementation to both unmap and wait for the page to become idle. Link: https://lkml.kernel.org/r/c4d381e41fc618296cee2820403c166d80599d5c.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/dax.c | 33 +++++++++++++++++++++++++++++++++ fs/ext4/inode.c | 13 +------------ fs/fuse/dax.c | 27 +++------------------------ fs/xfs/xfs_inode.c | 26 +++++++------------------- fs/xfs/xfs_inode.h | 2 +- include/linux/dax.h | 23 ++++++++++++++++++----- 6 files changed, 63 insertions(+), 61 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index f5fdb43f5de3..f1945aa65eb0 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -846,6 +846,39 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) return ret; } +static int wait_page_idle(struct page *page, + void (cb)(struct inode *), + struct inode *inode) +{ + return ___wait_var_event(page, dax_page_is_idle(page), + TASK_INTERRUPTIBLE, 0, 0, cb(inode)); +} + +/* + * Unmaps the inode and waits for any DMA to complete prior to deleting the + * DAX mapping entries for the range. + */ +int dax_break_layout(struct inode *inode, loff_t start, loff_t end, + void (cb)(struct inode *)) +{ + struct page *page; + int error = 0; + + if (!dax_mapping(inode->i_mapping)) + return 0; + + do { + page = dax_layout_busy_page_range(inode->i_mapping, start, end); + if (!page) + break; + + error = wait_page_idle(page, cb, inode); + } while (error == 0); + + return error; +} +EXPORT_SYMBOL_GPL(dax_break_layout); + /* * Invalidate DAX entry if it is clean. */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cc1acb1fdec6..2342bac14a9e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3911,21 +3911,10 @@ static void ext4_wait_dax_page(struct inode *inode) int ext4_break_layouts(struct inode *inode) { - struct page *page; - int error; - if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock))) return -EINVAL; - do { - page = dax_layout_busy_page(inode->i_mapping); - if (!page) - return 0; - - error = dax_wait_page_idle(page, ext4_wait_dax_page, inode); - } while (error == 0); - - return error; + return dax_break_layout_inode(inode, ext4_wait_dax_page); } /* diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index bf6faa3536a4..0502bf3cdf6a 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -666,33 +666,12 @@ static void fuse_wait_dax_page(struct inode *inode) filemap_invalidate_lock(inode->i_mapping); } -/* Should be called with mapping->invalidate_lock held exclusively */ -static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, - loff_t start, loff_t end) -{ - struct page *page; - - page = dax_layout_busy_page_range(inode->i_mapping, start, end); - if (!page) - return 0; - - *retry = true; - return dax_wait_page_idle(page, fuse_wait_dax_page, inode); -} - +/* Should be called with mapping->invalidate_lock held exclusively. */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { - bool retry; - int ret; - - do { - retry = false; - ret = __fuse_dax_break_layouts(inode, &retry, dmap_start, - dmap_end); - } while (ret == 0 && retry); - - return ret; + return dax_break_layout(inode, dmap_start, dmap_end, + fuse_wait_dax_page); } ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1b5613dfed7f..d4f07e02b28b 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2735,21 +2735,17 @@ xfs_mmaplock_two_inodes_and_break_dax_layout( struct xfs_inode *ip2) { int error; - bool retry; struct page *page; if (ip1->i_ino > ip2->i_ino) swap(ip1, ip2); again: - retry = false; /* Lock the first inode */ xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); - error = xfs_break_dax_layouts(VFS_I(ip1), &retry); - if (error || retry) { + error = xfs_break_dax_layouts(VFS_I(ip1)); + if (error) { xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); - if (error == 0 && retry) - goto again; return error; } @@ -2764,7 +2760,7 @@ xfs_mmaplock_two_inodes_and_break_dax_layout( * for this nested lock case. */ page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); - if (page && page_ref_count(page) != 1) { + if (!dax_page_is_idle(page)) { xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); goto again; @@ -3008,19 +3004,11 @@ xfs_wait_dax_page( int xfs_break_dax_layouts( - struct inode *inode, - bool *retry) + struct inode *inode) { - struct page *page; - xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL); - page = dax_layout_busy_page(inode->i_mapping); - if (!page) - return 0; - - *retry = true; - return dax_wait_page_idle(page, xfs_wait_dax_page, inode); + return dax_break_layout_inode(inode, xfs_wait_dax_page); } int @@ -3038,8 +3026,8 @@ xfs_break_layouts( retry = false; switch (reason) { case BREAK_UNMAP: - error = xfs_break_dax_layouts(inode, &retry); - if (error || retry) + error = xfs_break_dax_layouts(inode); + if (error) break; fallthrough; case BREAK_WRITE: diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index c08093a65352..123dfa965c6e 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -603,7 +603,7 @@ xfs_itruncate_extents( return xfs_itruncate_extents_flags(tpp, ip, whichfork, new_size, 0); } -int xfs_break_dax_layouts(struct inode *inode, bool *retry); +int xfs_break_dax_layouts(struct inode *inode); int xfs_break_layouts(struct inode *inode, uint *iolock, enum layout_break_reason reason); diff --git a/include/linux/dax.h b/include/linux/dax.h index 9b1ce984d410..a6b277f1e13a 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -207,12 +207,9 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); -static inline int dax_wait_page_idle(struct page *page, - void (cb)(struct inode *), - struct inode *inode) +static inline bool dax_page_is_idle(struct page *page) { - return ___wait_var_event(page, page_ref_count(page) == 1, - TASK_INTERRUPTIBLE, 0, 0, cb(inode)); + return page && page_ref_count(page) == 1; } #if IS_ENABLED(CONFIG_DAX) @@ -228,6 +225,15 @@ static inline void dax_read_unlock(int id) { } #endif /* CONFIG_DAX */ + +#if !IS_ENABLED(CONFIG_FS_DAX) +static inline int __must_check dax_break_layout(struct inode *inode, + loff_t start, loff_t end, void (cb)(struct inode *)) +{ + return 0; +} +#endif + bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, @@ -251,6 +257,13 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int __must_check dax_break_layout(struct inode *inode, loff_t start, + loff_t end, void (cb)(struct inode *)); +static inline int __must_check dax_break_layout_inode(struct inode *inode, + void (cb)(struct inode *)) +{ + return dax_break_layout(inode, 0, LLONG_MAX, cb); +} int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same, From bde708f1a65d025c45575bfe1e7bf7bdf7e71e87 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:01 +1100 Subject: [PATCH 281/431] fs/dax: always remove DAX page-cache entries when breaking layouts Prior to any truncation operations file systems call dax_break_mapping() to ensure pages in the range are not under going DMA. Later DAX page-cache entries will be removed by truncate_folio_batch_exceptionals() in the generic page-cache code. However this makes it possible for folios to be removed from the page-cache even though they are still DMA busy if the file-system hasn't called dax_break_mapping(). It also means they can never be waited on in future because FS DAX will lose track of them once the page-cache entry has been deleted. Instead it is better to delete the FS DAX entry when the file-system calls dax_break_mapping() as part of it's truncate operation. This ensures only idle pages can be removed from the FS DAX page-cache and makes it easy to detect if a file-system hasn't called dax_break_mapping() prior to a truncate operation. Link: https://lkml.kernel.org/r/3be6115eaaa8d28fee37fcba3287be4f226a7d24.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/dax.c | 40 ++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_inode.c | 5 ++--- include/linux/dax.h | 2 ++ mm/truncate.c | 16 +++++++++++++++- 4 files changed, 59 insertions(+), 4 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index f1945aa65eb0..14fbe5163037 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -846,6 +846,36 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) return ret; } +void dax_delete_mapping_range(struct address_space *mapping, + loff_t start, loff_t end) +{ + void *entry; + pgoff_t start_idx = start >> PAGE_SHIFT; + pgoff_t end_idx; + XA_STATE(xas, &mapping->i_pages, start_idx); + + /* If end == LLONG_MAX, all pages from start to till end of file */ + if (end == LLONG_MAX) + end_idx = ULONG_MAX; + else + end_idx = end >> PAGE_SHIFT; + + xas_lock_irq(&xas); + xas_for_each(&xas, entry, end_idx) { + if (!xa_is_value(entry)) + continue; + entry = wait_entry_unlocked_exclusive(&xas, entry); + if (!entry) + continue; + dax_disassociate_entry(entry, mapping, true); + xas_store(&xas, NULL); + mapping->nrpages -= 1UL << dax_entry_order(entry); + put_unlocked_entry(&xas, entry, WAKE_ALL); + } + xas_unlock_irq(&xas); +} +EXPORT_SYMBOL_GPL(dax_delete_mapping_range); + static int wait_page_idle(struct page *page, void (cb)(struct inode *), struct inode *inode) @@ -857,6 +887,9 @@ static int wait_page_idle(struct page *page, /* * Unmaps the inode and waits for any DMA to complete prior to deleting the * DAX mapping entries for the range. + * + * For NOWAIT behavior, pass @cb as NULL to early-exit on first found + * busy page */ int dax_break_layout(struct inode *inode, loff_t start, loff_t end, void (cb)(struct inode *)) @@ -871,10 +904,17 @@ int dax_break_layout(struct inode *inode, loff_t start, loff_t end, page = dax_layout_busy_page_range(inode->i_mapping, start, end); if (!page) break; + if (!cb) { + error = -ERESTARTSYS; + break; + } error = wait_page_idle(page, cb, inode); } while (error == 0); + if (!page) + dax_delete_mapping_range(inode->i_mapping, start, end); + return error; } EXPORT_SYMBOL_GPL(dax_break_layout); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d4f07e02b28b..80083376a1d0 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2735,7 +2735,6 @@ xfs_mmaplock_two_inodes_and_break_dax_layout( struct xfs_inode *ip2) { int error; - struct page *page; if (ip1->i_ino > ip2->i_ino) swap(ip1, ip2); @@ -2759,8 +2758,8 @@ xfs_mmaplock_two_inodes_and_break_dax_layout( * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable * for this nested lock case. */ - page = dax_layout_busy_page(VFS_I(ip2)->i_mapping); - if (!dax_page_is_idle(page)) { + error = dax_break_layout(VFS_I(ip2), 0, -1, NULL); + if (error) { xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); goto again; diff --git a/include/linux/dax.h b/include/linux/dax.h index a6b277f1e13a..2fbb262092ca 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -255,6 +255,8 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +void dax_delete_mapping_range(struct address_space *mapping, + loff_t start, loff_t end); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); int __must_check dax_break_layout(struct inode *inode, loff_t start, diff --git a/mm/truncate.c b/mm/truncate.c index 76d8fcd89bd0..79570045071c 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -78,8 +78,22 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping, if (dax_mapping(mapping)) { for (i = j; i < nr; i++) { - if (xa_is_value(fbatch->folios[i])) + if (xa_is_value(fbatch->folios[i])) { + /* + * File systems should already have called + * dax_break_layout_entry() to remove all DAX + * entries while holding a lock to prevent + * establishing new entries. Therefore we + * shouldn't find any here. + */ + WARN_ON_ONCE(1); + + /* + * Delete the mapping so truncate_pagecache() + * doesn't loop forever. + */ dax_delete_mapping_entry(mapping, indices[i]); + } } goto out; } From 0e2f80afcfa699ce722c01afc9286a942bd57211 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:02 +1100 Subject: [PATCH 282/431] fs/dax: ensure all pages are idle prior to filesystem unmount File systems call dax_break_mapping() prior to reallocating file system blocks to ensure the page is not undergoing any DMA or other accesses. Generally this is needed when a file is truncated to ensure that if a block is reallocated nothing is writing to it. However filesystems currently don't call this when an FS DAX inode is evicted. This can cause problems when the file system is unmounted as a page can continue to be under going DMA or other remote access after unmount. This means if the file system is remounted any truncate or other operation which requires the underlying file system block to be freed will not wait for the remote access to complete. Therefore a busy block may be reallocated to a new file leading to corruption. Link: https://lkml.kernel.org/r/2d3cf575bbd095084993154be2f0aa7442e5cd28.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- fs/dax.c | 27 +++++++++++++++++++++++++++ fs/ext4/inode.c | 2 ++ fs/xfs/xfs_super.c | 12 ++++++++++++ include/linux/dax.h | 5 +++++ 4 files changed, 46 insertions(+) diff --git a/fs/dax.c b/fs/dax.c index 14fbe5163037..bc538ba56058 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -884,6 +884,13 @@ static int wait_page_idle(struct page *page, TASK_INTERRUPTIBLE, 0, 0, cb(inode)); } +static void wait_page_idle_uninterruptible(struct page *page, + struct inode *inode) +{ + ___wait_var_event(page, dax_page_is_idle(page), + TASK_UNINTERRUPTIBLE, 0, 0, schedule()); +} + /* * Unmaps the inode and waits for any DMA to complete prior to deleting the * DAX mapping entries for the range. @@ -919,6 +926,26 @@ int dax_break_layout(struct inode *inode, loff_t start, loff_t end, } EXPORT_SYMBOL_GPL(dax_break_layout); +void dax_break_layout_final(struct inode *inode) +{ + struct page *page; + + if (!dax_mapping(inode->i_mapping)) + return; + + do { + page = dax_layout_busy_page_range(inode->i_mapping, 0, + LLONG_MAX); + if (!page) + break; + + wait_page_idle_uninterruptible(page, inode); + } while (true); + + dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); +} +EXPORT_SYMBOL_GPL(dax_break_layout_final); + /* * Invalidate DAX entry if it is clean. */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2342bac14a9e..3cc8da6357aa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -181,6 +181,8 @@ void ext4_evict_inode(struct inode *inode) trace_ext4_evict_inode(inode); + dax_break_layout_final(inode); + if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) ext4_evict_ea_inode(inode); if (inode->i_nlink) { diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 0055066fb1d9..37898f89b3ea 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -751,6 +751,17 @@ xfs_fs_drop_inode( return generic_drop_inode(inode); } +STATIC void +xfs_fs_evict_inode( + struct inode *inode) +{ + if (IS_DAX(inode)) + dax_break_layout_final(inode); + + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); +} + static void xfs_mount_free( struct xfs_mount *mp) @@ -1215,6 +1226,7 @@ static const struct super_operations xfs_super_operations = { .destroy_inode = xfs_fs_destroy_inode, .dirty_inode = xfs_fs_dirty_inode, .drop_inode = xfs_fs_drop_inode, + .evict_inode = xfs_fs_evict_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, diff --git a/include/linux/dax.h b/include/linux/dax.h index 2fbb262092ca..2333c30f6d36 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -232,6 +232,10 @@ static inline int __must_check dax_break_layout(struct inode *inode, { return 0; } + +static inline void dax_break_layout_final(struct inode *inode) +{ +} #endif bool dax_alive(struct dax_device *dax_dev); @@ -266,6 +270,7 @@ static inline int __must_check dax_break_layout_inode(struct inode *inode, { return dax_break_layout(inode, 0, LLONG_MAX, cb); } +void dax_break_layout_final(struct inode *inode); int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same, From cbe298d82cf70aa6bb16cfc8ae4db522f39a0034 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:03 +1100 Subject: [PATCH 283/431] fs/dax: remove PAGE_MAPPING_DAX_SHARED mapping flag The page ->mapping pointer can have magic values like PAGE_MAPPING_DAX_SHARED and PAGE_MAPPING_ANON for page owner specific usage. Currently PAGE_MAPPING_DAX_SHARED and PAGE_MAPPING_ANON alias to the same value. This isn't a problem because FS DAX pages are never seen by the anonymous mapping code and vice versa. However a future change will make FS DAX pages more like normal pages, so folio_test_anon() must not return true for a FS DAX page. We could explicitly test for a FS DAX page in folio_test_anon(), etc. however the PAGE_MAPPING_DAX_SHARED flag isn't actually needed. Instead we can use the page->mapping field to implicitly track the first mapping of a page. If page->mapping is non-NULL it implies the page is associated with a single mapping at page->index. If the page is associated with a second mapping clear page->mapping and set page->share to 1. This is possible because a shared mapping implies the file-system implements dax_holder_operations which makes the ->mapping and ->index, which is a union with ->share, unused. The page is considered shared when page->mapping == NULL and page->share > 0 or page->mapping != NULL, implying it is present in at least one address space. This also makes it easier for a future change to detect when a page is first mapped into an address space which requires special handling. Link: https://lkml.kernel.org/r/c22f699202db0acee2f7039eb026e68261ce42d6.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Tested-by: Alison Schofield Cc: Asahi Lina Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Ted Ts'o Cc: Vishal Verma Cc: WANG Xuerui Cc: Will Deacon Cc: Alexander Gordeev Cc: Balbir Singh Cc: Christian Borntraeger Cc: Heiko Carstens Cc: Jason Gunthorpe Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Vivek Goyal Signed-off-by: Andrew Morton --- fs/dax.c | 55 +++++++++++++++++++++++--------------- include/linux/page-flags.h | 6 ----- 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index bc538ba56058..6674540363e8 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -351,27 +351,40 @@ static unsigned long dax_end_pfn(void *entry) for (pfn = dax_to_pfn(entry); \ pfn < dax_end_pfn(entry); pfn++) +/* + * A DAX folio is considered shared if it has no mapping set and ->share (which + * shares the ->index field) is non-zero. Note this may return false even if the + * page is shared between multiple files but has not yet actually been mapped + * into multiple address spaces. + */ static inline bool dax_folio_is_shared(struct folio *folio) { - return folio->mapping == PAGE_MAPPING_DAX_SHARED; + return !folio->mapping && folio->page.share; } /* - * Set the folio->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the - * refcount. + * When it is called by dax_insert_entry(), the shared flag will indicate + * whether this entry is shared by multiple files. If the page has not + * previously been associated with any mappings the ->mapping and ->index + * fields will be set. If it has already been associated with a mapping + * the mapping will be cleared and the share count set. It's then up to + * reverse map users like memory_failure() to call back into the filesystem to + * recover ->mapping and ->index information. For example by implementing + * dax_holder_operations. */ -static inline void dax_folio_share_get(struct folio *folio) +static void dax_folio_make_shared(struct folio *folio) { - if (folio->mapping != PAGE_MAPPING_DAX_SHARED) { - /* - * Reset the index if the page was already mapped - * regularly before. - */ - if (folio->mapping) - folio->page.share = 1; - folio->mapping = PAGE_MAPPING_DAX_SHARED; - } - folio->page.share++; + /* + * folio is not currently shared so mark it as shared by clearing + * folio->mapping. + */ + folio->mapping = NULL; + + /* + * folio has previously been mapped into one address space so set the + * share count. + */ + folio->page.share = 1; } static inline unsigned long dax_folio_share_put(struct folio *folio) @@ -379,12 +392,6 @@ static inline unsigned long dax_folio_share_put(struct folio *folio) return --folio->page.share; } -/* - * When it is called in dax_insert_entry(), the shared flag will indicate - * that whether this entry is shared by multiple files. If so, set - * the folio->mapping PAGE_MAPPING_DAX_SHARED, and use page->share - * as refcount. - */ static void dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) { @@ -398,8 +405,12 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, for_each_mapped_pfn(entry, pfn) { struct folio *folio = pfn_folio(pfn); - if (shared) { - dax_folio_share_get(folio); + if (shared && (folio->mapping || folio->page.share)) { + if (folio->mapping) + dax_folio_make_shared(folio); + + WARN_ON_ONCE(!folio->page.share); + folio->page.share++; } else { WARN_ON_ONCE(folio->mapping); folio->mapping = mapping; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 36d283552f80..cab382bd965e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -673,12 +673,6 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) -/* - * Different with flags above, this flag is used only for fsdax mode. It - * indicates that this page->mapping is now under reflink case. - */ -#define PAGE_MAPPING_DAX_SHARED ((void *)0x1) - static __always_inline bool folio_mapping_flags(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; From a58c6fb6623d8aef36c0c4e0b8a48770deef3213 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:04 +1100 Subject: [PATCH 284/431] mm/gup: remove redundant check for PCI P2PDMA page PCI P2PDMA pages are not mapped with pXX_devmap PTEs therefore the check in __gup_device_huge() is redundant. Remove it Link: https://lkml.kernel.org/r/260e3dcfaf05ff1c734a49698ed4332b5dae04c2.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Jason Gunthorpe Reviewed-by: Dan Wiliams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/gup.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index e42e4fdaf765..e5d6454df41d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3013,11 +3013,6 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, break; } - if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { - gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); - break; - } - folio = try_grab_folio_fast(page, 1, flags); if (!folio) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); From b7e2823787735ca009e63f35f164b46df0ef096c Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:05 +1100 Subject: [PATCH 285/431] mm/mm_init: move p2pdma page refcount initialisation to p2pdma Currently ZONE_DEVICE page reference counts are initialised by core memory management code in __init_zone_device_page() as part of the memremap() call which driver modules make to obtain ZONE_DEVICE pages. This initialises page refcounts to 1 before returning them to the driver. This was presumably done because it drivers had a reference of sorts on the page. It also ensured the page could always be mapped with vm_insert_page() for example and would never get freed (ie. have a zero refcount), freeing drivers of manipulating page reference counts. However it complicates figuring out whether or not a page is free from the mm perspective because it is no longer possible to just look at the refcount. Instead the page type must be known and if GUP is used a secondary pgmap reference is also sometimes needed. To simplify this it is desirable to remove the page reference count for the driver, so core mm can just use the refcount without having to account for page type or do other types of tracking. This is possible because drivers can always assume the page is valid as core kernel will never offline or remove the struct page. This means it is now up to drivers to initialise the page refcount as required. P2PDMA uses vm_insert_page() to map the page, and that requires a non-zero reference count when initialising the page so set that when the page is first mapped. Link: https://lkml.kernel.org/r/6aedb0ac2886dcc4503cb705273db5b3863a0b66.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/pci/p2pdma.c | 13 +++++++++++-- mm/memremap.c | 17 +++++++++++++---- mm/mm_init.c | 22 ++++++++++++++++++---- 3 files changed, 42 insertions(+), 10 deletions(-) diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 0cb7e0aaba0e..04773a865819 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj, rcu_read_unlock(); for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { - ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr)); + struct page *page = virt_to_page(kaddr); + + /* + * Initialise the refcount for the freshly allocated page. As + * we have just allocated the page no one else should be + * using it. + */ + VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page); + set_page_count(page, 1); + ret = vm_insert_page(vma, vaddr, page); if (ret) { gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); return ret; } percpu_ref_get(ref); - put_page(virt_to_page(kaddr)); + put_page(page); kaddr += PAGE_SIZE; len -= PAGE_SIZE; } diff --git a/mm/memremap.c b/mm/memremap.c index 40d4547ce514..07bbe0eed084 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -488,15 +488,24 @@ void free_zone_device_folio(struct folio *folio) folio->mapping = NULL; folio->page.pgmap->ops->page_free(folio_page(folio, 0)); - if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE && - folio->page.pgmap->type != MEMORY_DEVICE_COHERENT) + switch (folio->page.pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: + put_dev_pagemap(folio->page.pgmap); + break; + + case MEMORY_DEVICE_FS_DAX: + case MEMORY_DEVICE_GENERIC: /* * Reset the refcount to 1 to prepare for handing out the page * again. */ folio_set_count(folio, 1); - else - put_dev_pagemap(folio->page.pgmap); + break; + + case MEMORY_DEVICE_PCI_P2PDMA: + break; + } } void zone_device_page_init(struct page *page) diff --git a/mm/mm_init.c b/mm/mm_init.c index b5047c5ef7d6..dbb92fdf36fe 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1026,12 +1026,26 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, } /* - * ZONE_DEVICE pages are released directly to the driver page allocator - * which will set the page count to 1 when allocating the page. + * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC and + * MEMORY_TYPE_FS_DAX pages are released directly to the driver page + * allocator which will set the page count to 1 when allocating the + * page. + * + * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have + * their refcount reset to one whenever they are freed (ie. after + * their refcount drops to 0). */ - if (pgmap->type == MEMORY_DEVICE_PRIVATE || - pgmap->type == MEMORY_DEVICE_COHERENT) + switch (pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_COHERENT: + case MEMORY_DEVICE_PCI_P2PDMA: set_page_count(page, 0); + break; + + case MEMORY_DEVICE_FS_DAX: + case MEMORY_DEVICE_GENERIC: + break; + } } /* From 82ba975e4c43d98afebced82d940ddb7aec42a9d Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:06 +1100 Subject: [PATCH 286/431] mm: allow compound zone device pages Zone device pages are used to represent various type of device memory managed by device drivers. Currently compound zone device pages are not supported. This is because MEMORY_DEVICE_FS_DAX pages are the only user of higher order zone device pages and have their own page reference counting. A future change will unify FS DAX reference counting with normal page reference counting rules and remove the special FS DAX reference counting. Supporting that requires compound zone device pages. Supporting compound zone device pages requires compound_head() to distinguish between head and tail pages whilst still preserving the special struct page fields that are specific to zone device pages. A tail page is distinguished by having bit zero being set in page->compound_head, with the remaining bits pointing to the head page. For zone device pages page->compound_head is shared with page->pgmap. The page->pgmap field must be common to all pages within a folio, even if the folio spans memory sections. Therefore pgmap is the same for both head and tail pages and can be moved into the folio and we can use the standard scheme to find compound_head from a tail page. Link: https://lkml.kernel.org/r/67055d772e6102accf85161d0b57b0b3944292bf.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Signed-off-by: Balbir Singh Reviewed-by: Jason Gunthorpe Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 3 ++- drivers/pci/p2pdma.c | 6 +++--- include/linux/memremap.h | 6 +++--- include/linux/migrate.h | 4 ++-- include/linux/mm_types.h | 9 +++++++-- include/linux/mmzone.h | 12 +++++++++++- lib/test_hmm.c | 3 ++- mm/hmm.c | 2 +- mm/memory.c | 4 +++- mm/memremap.c | 14 +++++++------- mm/migrate_device.c | 18 ++++++++++++------ mm/mlock.c | 2 ++ mm/mm_init.c | 2 +- 13 files changed, 56 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1a072568cef6..61d0f411ef84 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -88,7 +88,8 @@ struct nouveau_dmem { static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page) { - return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap); + return container_of(page_pgmap(page), struct nouveau_dmem_chunk, + pagemap); } static struct nouveau_drm *page_to_drm(struct page *page) diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 04773a865819..19214ec81fbb 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -202,7 +202,7 @@ static const struct attribute_group p2pmem_group = { static void p2pdma_page_free(struct page *page) { - struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap); + struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); /* safe to dereference while a reference is held to the percpu ref */ struct pci_p2pdma *p2pdma = rcu_dereference_protected(pgmap->provider->p2pdma, 1); @@ -1025,8 +1025,8 @@ enum pci_p2pdma_map_type pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, struct scatterlist *sg) { - if (state->pgmap != sg_page(sg)->pgmap) { - state->pgmap = sg_page(sg)->pgmap; + if (state->pgmap != page_pgmap(sg_page(sg))) { + state->pgmap = page_pgmap(sg_page(sg)); state->map = pci_p2pdma_map_type(state->pgmap, dev); state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset; } diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 3f7143ade32c..0256a4218dc3 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -161,7 +161,7 @@ static inline bool is_device_private_page(const struct page *page) { return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; + page_pgmap(page)->type == MEMORY_DEVICE_PRIVATE; } static inline bool folio_is_device_private(const struct folio *folio) @@ -173,13 +173,13 @@ static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; + page_pgmap(page)->type == MEMORY_DEVICE_PCI_P2PDMA; } static inline bool is_device_coherent_page(const struct page *page) { return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_COHERENT; + page_pgmap(page)->type == MEMORY_DEVICE_COHERENT; } static inline bool folio_is_device_coherent(const struct folio *folio) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 29919faea2f1..61899ec7a9a3 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -205,8 +205,8 @@ struct migrate_vma { unsigned long end; /* - * Set to the owner value also stored in page->pgmap->owner for - * migrating out of device private memory. The flags also need to + * Set to the owner value also stored in page_pgmap(page)->owner + * for migrating out of device private memory. The flags also need to * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. * The caller should always set this field when using mmu notifier * callbacks to avoid device MMU invalidations for device private diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6a93abb4452b..0fa7907d437e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -130,8 +130,11 @@ struct page { unsigned long compound_head; /* Bit zero is set */ }; struct { /* ZONE_DEVICE pages */ - /** @pgmap: Points to the hosting device page map. */ - struct dev_pagemap *pgmap; + /* + * The first word is used for compound_head or folio + * pgmap + */ + void *_unused_pgmap_compound_head; void *zone_device_data; /* * ZONE_DEVICE private pages are counted as being @@ -300,6 +303,7 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @pgmap: Metadata for ZONE_DEVICE mappings * @virtual: Virtual address in the kernel direct map. * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). @@ -338,6 +342,7 @@ struct folio { /* private: */ }; /* public: */ + struct dev_pagemap *pgmap; }; struct address_space *mapping; pgoff_t index; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 44ecb2f90db4..550dbba92521 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1158,6 +1158,12 @@ static inline bool is_zone_device_page(const struct page *page) return page_zonenum(page) == ZONE_DEVICE; } +static inline struct dev_pagemap *page_pgmap(const struct page *page) +{ + VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page); + return page_folio(page)->pgmap; +} + /* * Consecutive zone device pages should not be merged into the same sgl * or bvec segment with other types of pages or if they belong to different @@ -1173,7 +1179,7 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a, return false; if (!is_zone_device_page(a)) return true; - return a->pgmap == b->pgmap; + return page_pgmap(a) == page_pgmap(b); } extern void memmap_init_zone_device(struct zone *, unsigned long, @@ -1188,6 +1194,10 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a, { return true; } +static inline struct dev_pagemap *page_pgmap(const struct page *page) +{ + return NULL; +} #endif static inline bool folio_is_zone_device(const struct folio *folio) diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 39a2286f8592..5b144bc5c4ec 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -195,7 +195,8 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp) static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) { - return container_of(page->pgmap, struct dmirror_chunk, pagemap); + return container_of(page_pgmap(page), struct dmirror_chunk, + pagemap); } static struct dmirror_device *dmirror_page_to_device(struct page *page) diff --git a/mm/hmm.c b/mm/hmm.c index 7e0229ae4a5a..082f7b7c0b9e 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -248,7 +248,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * just report the PFN. */ if (is_device_private_entry(entry) && - pfn_swap_entry_to_page(entry)->pgmap->owner == + page_pgmap(pfn_swap_entry_to_page(entry))->owner == range->dev_private_owner) { cpu_flags = HMM_PFN_VALID; if (is_writable_device_private_entry(entry)) diff --git a/mm/memory.c b/mm/memory.c index 4c12a05fabd9..c9ddd991abda 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4338,6 +4338,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); ret = remove_device_exclusive_entry(vmf); } else if (is_device_private_entry(entry)) { + struct dev_pagemap *pgmap; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { /* * migrate_to_ram is not yet ready to operate @@ -4362,7 +4363,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); + pgmap = page_pgmap(vmf->page); + ret = pgmap->ops->migrate_to_ram(vmf); put_page(vmf->page); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; diff --git a/mm/memremap.c b/mm/memremap.c index 07bbe0eed084..68099af9df4c 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -458,8 +458,8 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!folio->page.pgmap->ops || - !folio->page.pgmap->ops->page_free)) + if (WARN_ON_ONCE(!folio->pgmap->ops || + !folio->pgmap->ops->page_free)) return; mem_cgroup_uncharge(folio); @@ -486,12 +486,12 @@ void free_zone_device_folio(struct folio *folio) * to clear folio->mapping. */ folio->mapping = NULL; - folio->page.pgmap->ops->page_free(folio_page(folio, 0)); + folio->pgmap->ops->page_free(folio_page(folio, 0)); - switch (folio->page.pgmap->type) { + switch (folio->pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: - put_dev_pagemap(folio->page.pgmap); + put_dev_pagemap(folio->pgmap); break; case MEMORY_DEVICE_FS_DAX: @@ -514,7 +514,7 @@ void zone_device_page_init(struct page *page) * Drivers shouldn't be allocating pages after calling * memunmap_pages(). */ - WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); + WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref)); set_page_count(page, 1); lock_page(page); } @@ -523,7 +523,7 @@ EXPORT_SYMBOL_GPL(zone_device_page_init); #ifdef CONFIG_FS_DAX bool __put_devmap_managed_folio_refs(struct folio *folio, int refs) { - if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX) + if (folio->pgmap->type != MEMORY_DEVICE_FS_DAX) return false; /* diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 5bd888223cc8..7d0d64f67cdf 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -106,6 +106,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, arch_enter_lazy_mmu_mode(); for (; addr < end; addr += PAGE_SIZE, ptep++) { + struct dev_pagemap *pgmap; unsigned long mpfn = 0, pfn; struct folio *folio; struct page *page; @@ -133,9 +134,10 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, goto next; page = pfn_swap_entry_to_page(entry); + pgmap = page_pgmap(page); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || - page->pgmap->owner != migrate->pgmap_owner) + pgmap->owner != migrate->pgmap_owner) goto next; mpfn = migrate_pfn(page_to_pfn(page)) | @@ -152,12 +154,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, } page = vm_normal_page(migrate->vma, addr, pte); if (page && !is_zone_device_page(page) && - !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) - goto next; - else if (page && is_device_coherent_page(page) && - (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || - page->pgmap->owner != migrate->pgmap_owner)) + !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { goto next; + } else if (page && is_device_coherent_page(page)) { + pgmap = page_pgmap(page); + + if (!(migrate->flags & + MIGRATE_VMA_SELECT_DEVICE_COHERENT) || + pgmap->owner != migrate->pgmap_owner) + goto next; + } mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; } diff --git a/mm/mlock.c b/mm/mlock.c index cde076fa7d5e..3cb72b579ffd 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -368,6 +368,8 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, if (is_huge_zero_pmd(*pmd)) goto out; folio = pmd_folio(*pmd); + if (folio_is_zone_device(folio)) + goto out; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else diff --git a/mm/mm_init.c b/mm/mm_init.c index dbb92fdf36fe..73e97ce95f58 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1007,7 +1007,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, * and zone_device_data. It is a bug if a ZONE_DEVICE page is * ever freed or placed on a driver-private list. */ - page->pgmap = pgmap; + page_folio(page)->pgmap = pgmap; page->zone_device_data = NULL; /* From 15a64311e0ae069196b7d3b88d0c11e0b2ad733e Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:07 +1100 Subject: [PATCH 287/431] mm/memory: enhance insert_page_into_pte_locked() to create writable mappings In preparation for using insert_page() for DAX, enhance insert_page_into_pte_locked() to handle establishing writable mappings. Recall that DAX returns VM_FAULT_NOPAGE after installing a PTE which bypasses the typical set_pte_range() in finish_fault. Link: https://lkml.kernel.org/r/f7354fd9c2f5d0c2fa321733039f9f87e791023e.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Suggested-by: Dan Williams Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/memory.c | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index c9ddd991abda..705c902cf56a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2145,19 +2145,39 @@ static int validate_page_before_insert(struct vm_area_struct *vma, } static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, - unsigned long addr, struct page *page, pgprot_t prot) + unsigned long addr, struct page *page, + pgprot_t prot, bool mkwrite) { struct folio *folio = page_folio(page); - pte_t pteval; + pte_t pteval = ptep_get(pte); + + if (!pte_none(pteval)) { + if (!mkwrite) + return -EBUSY; + + /* see insert_pfn(). */ + if (pte_pfn(pteval) != page_to_pfn(page)) { + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval))); + return -EFAULT; + } + pteval = maybe_mkwrite(pteval, vma); + pteval = pte_mkyoung(pteval); + if (ptep_set_access_flags(vma, addr, pte, pteval, 1)) + update_mmu_cache(vma, addr, pte); + return 0; + } - if (!pte_none(ptep_get(pte))) - return -EBUSY; /* Ok, finally just insert the thing.. */ pteval = mk_pte(page, prot); if (unlikely(is_zero_folio(folio))) { pteval = pte_mkspecial(pteval); } else { folio_get(folio); + pteval = mk_pte(page, prot); + if (mkwrite) { + pteval = pte_mkyoung(pteval); + pteval = maybe_mkwrite(pte_mkdirty(pteval), vma); + } inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); folio_add_file_rmap_pte(folio, page, vma); } @@ -2166,7 +2186,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, } static int insert_page(struct vm_area_struct *vma, unsigned long addr, - struct page *page, pgprot_t prot) + struct page *page, pgprot_t prot, bool mkwrite) { int retval; pte_t *pte; @@ -2179,7 +2199,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, pte = get_locked_pte(vma->vm_mm, addr, &ptl); if (!pte) goto out; - retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); + retval = insert_page_into_pte_locked(vma, pte, addr, page, prot, + mkwrite); pte_unmap_unlock(pte, ptl); out: return retval; @@ -2193,7 +2214,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, err = validate_page_before_insert(vma, page); if (err) return err; - return insert_page_into_pte_locked(vma, pte, addr, page, prot); + return insert_page_into_pte_locked(vma, pte, addr, page, prot, false); } /* insert_pages() amortizes the cost of spinlock operations @@ -2329,7 +2350,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } - return insert_page(vma, addr, page, vma->vm_page_prot); + return insert_page(vma, addr, page, vma->vm_page_prot, false); } EXPORT_SYMBOL(vm_insert_page); @@ -2609,7 +2630,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, * result in pfn_t_has_page() == false. */ page = pfn_to_page(pfn_t_to_pfn(pfn)); - err = insert_page(vma, addr, page, pgprot); + err = insert_page(vma, addr, page, pgprot, mkwrite); } else { return insert_pfn(vma, addr, pfn, pgprot, mkwrite); } From ec2e0cc67f9c3ed5926da390ad86b25c142a2e4a Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:08 +1100 Subject: [PATCH 288/431] mm/memory: add vmf_insert_page_mkwrite() Currently to map a DAX page the DAX driver calls vmf_insert_pfn. This creates a special devmap PTE entry for the pfn but does not take a reference on the underlying struct page for the mapping. This is because DAX page refcounts are treated specially, as indicated by the presence of a devmap entry. To allow DAX page refcounts to be managed the same as normal page refcounts introduce vmf_insert_page_mkwrite(). This will take a reference on the underlying page much the same as vmf_insert_page, except it also permits upgrading an existing mapping to be writable if requested/possible. Link: https://lkml.kernel.org/r/4ce3aa984c060f370105e0bfef1035869578be47.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 ++ mm/memory.c | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 9a74a3ee68bc..c6fb9300f6c0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3644,6 +3644,8 @@ int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num); +vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, + bool write); vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/memory.c b/mm/memory.c index 705c902cf56a..f09b4a1d09a8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2643,6 +2643,26 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, + bool write) +{ + pgprot_t pgprot = vmf->vma->vm_page_prot; + unsigned long addr = vmf->address; + int err; + + if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end) + return VM_FAULT_SIGBUS; + + err = insert_page(vmf->vma, addr, page, pgprot, write); + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite); + vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { From 349994cf61e6eaa5996d53e45ffd2272d32d5e4e Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:09 +1100 Subject: [PATCH 289/431] mm/rmap: add support for PUD sized mappings to rmap The rmap doesn't currently support adding a PUD mapping of a folio. This patch adds support for entire PUD mappings of folios, primarily to allow for more standard refcounting of device DAX folios. Currently DAX is the only user of this and it doesn't require support for partially mapped PUD-sized folios so we don't support for that for now. Link: https://lkml.kernel.org/r/248582c07896e30627d1aeaeebc6949cfd91b851.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Acked-by: David Hildenbrand Reviewed-by: Dan Williams Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/rmap.h | 15 ++++++++++ mm/rmap.c | 67 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 69e9a431a40e..6abf7960077a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -192,6 +192,7 @@ typedef int __bitwise rmap_t; enum rmap_level { RMAP_LEVEL_PTE = 0, RMAP_LEVEL_PMD, + RMAP_LEVEL_PUD, }; static inline void __folio_rmap_sanity_checks(const struct folio *folio, @@ -228,6 +229,14 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio, VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); break; + case RMAP_LEVEL_PUD: + /* + * Assume that we are creating a single "entire" mapping of the + * folio. + */ + VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio); + VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio); + break; default: VM_WARN_ON_ONCE(true); } @@ -251,12 +260,16 @@ void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, folio_add_file_rmap_ptes(folio, page, 1, vma) void folio_add_file_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); +void folio_add_file_rmap_pud(struct folio *, struct page *, + struct vm_area_struct *); void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_remove_rmap_pte(folio, page, vma) \ folio_remove_rmap_ptes(folio, page, 1, vma) void folio_remove_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); +void folio_remove_rmap_pud(struct folio *, struct page *, + struct vm_area_struct *); void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); @@ -341,6 +354,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: atomic_inc(&folio->_entire_mapcount); atomic_inc(&folio->_large_mapcount); break; @@ -437,6 +451,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: if (PageAnonExclusive(page)) { if (unlikely(maybe_pinned)) return -EBUSY; diff --git a/mm/rmap.c b/mm/rmap.c index 333ecac049b2..bcec8677f68d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1269,12 +1269,19 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { - *nr_pmdmapped = folio_nr_pages(folio); - nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + nr_pages = folio_nr_pages(folio); + /* + * We only track PMD mappings of PMD-sized + * folios separately. + */ + if (level == RMAP_LEVEL_PMD) + *nr_pmdmapped = nr_pages; + nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; @@ -1420,6 +1427,13 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, case RMAP_LEVEL_PMD: SetPageAnonExclusive(page); break; + case RMAP_LEVEL_PUD: + /* + * Keep the compiler happy, we don't support anonymous + * PUD mappings. + */ + WARN_ON_ONCE(1); + break; } } for (i = 0; i < nr_pages; i++) { @@ -1613,6 +1627,27 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, #endif } +/** + * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_add_file_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} + static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) @@ -1642,13 +1677,16 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, partially_mapped = nr && atomic_read(mapped); break; case RMAP_LEVEL_PMD: + case RMAP_LEVEL_PUD: atomic_dec(&folio->_large_mapcount); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + nr_pages = folio_nr_pages(folio); + if (level == RMAP_LEVEL_PMD) + nr_pmdmapped = nr_pages; + nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; @@ -1722,6 +1760,27 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, #endif } +/** + * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} + /* We support batch unmapping of PTEs for lazyfree large folios */ static inline bool can_batch_unmap_folio_ptes(unsigned long addr, struct folio *folio, pte_t *ptep) From dbe54153296d0931144a63295fc9367794bbe797 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:10 +1100 Subject: [PATCH 290/431] mm/huge_memory: add vmf_insert_folio_pud() Currently DAX folio/page reference counts are managed differently to normal pages. To allow these to be managed the same as normal pages introduce vmf_insert_folio_pud. This will map the entire PUD-sized folio and take references as it would for a normally mapped page. This is distinct from the current mechanism, vmf_insert_pfn_pud, which simply inserts a special devmap PUD entry into the page table without holding a reference to the page for the mapping. Link: https://lkml.kernel.org/r/649a1ef91d556593948351e94f51ef73a14f6794.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 2 + mm/huge_memory.c | 99 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 89 insertions(+), 12 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e1bea54820ff..4eeb54b50621 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -39,6 +39,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, + bool write); enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_UNSUPPORTED, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index acb12653484e..4cd79784f841 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1482,19 +1482,17 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, struct mm_struct *mm = vma->vm_mm; pgprot_t prot = vma->vm_page_prot; pud_t entry; - spinlock_t *ptl; - ptl = pud_lock(mm, pud); if (!pud_none(*pud)) { if (write) { if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn))) - goto out_unlock; + return; entry = pud_mkyoung(*pud); entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); if (pudp_set_access_flags(vma, addr, pud, entry, 1)) update_mmu_cache_pud(vma, addr, pud); } - goto out_unlock; + return; } entry = pud_mkhuge(pfn_t_pud(pfn, prot)); @@ -1508,9 +1506,6 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, } set_pud_at(mm, addr, pud, entry); update_mmu_cache_pud(vma, addr, pud); - -out_unlock: - spin_unlock(ptl); } /** @@ -1528,6 +1523,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) unsigned long addr = vmf->address & PUD_MASK; struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; + spinlock_t *ptl; /* * If we had pud_special, we could avoid all these restrictions, @@ -1545,10 +1541,57 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) track_pfn_insert(vma, &pgprot, pfn); + ptl = pud_lock(vma->vm_mm, vmf->pud); insert_pfn_pud(vma, addr, vmf->pud, pfn, write); + spin_unlock(ptl); + return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); + +/** + * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry + * @vmf: Structure describing the fault + * @folio: folio to insert + * @write: whether it's a write fault + * + * Return: vm_fault_t value. + */ +vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, + bool write) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address & PUD_MASK; + pud_t *pud = vmf->pud; + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + + if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER)) + return VM_FAULT_SIGBUS; + + ptl = pud_lock(mm, pud); + + /* + * If there is already an entry present we assume the folio is + * already mapped, hence no need to take another reference. We + * still call insert_pfn_pud() though in case the mapping needs + * upgrading to writeable. + */ + if (pud_none(*vmf->pud)) { + folio_get(folio); + folio_add_file_rmap_pud(folio, &folio->page, vma); + add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR); + } + insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)), + write); + spin_unlock(ptl); + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_folio_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ void touch_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -2146,7 +2189,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else if (is_huge_zero_pmd(orig_pmd)) { - zap_deposited_table(tlb->mm, pmd); + if (!vma_is_dax(vma) || arch_needs_pgtable_deposit()) + zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else { struct folio *folio = NULL; @@ -2646,12 +2690,24 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); arch_check_zapped_pud(vma, orig_pud); tlb_remove_pud_tlb_entry(tlb, pud, addr); - if (vma_is_special_huge(vma)) { + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { spin_unlock(ptl); /* No zero page support yet */ } else { - /* No support for anonymous PUD pages yet */ - BUG(); + struct page *page = NULL; + struct folio *folio; + + /* No support for anonymous PUD pages or migration yet */ + VM_WARN_ON_ONCE(vma_is_anonymous(vma) || + !pud_present(orig_pud)); + + page = pud_page(orig_pud); + folio = page_folio(page); + folio_remove_rmap_pud(folio, page, vma); + add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR); + + spin_unlock(ptl); + tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE); } return 1; } @@ -2659,6 +2715,10 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, unsigned long haddr) { + struct folio *folio; + struct page *page; + pud_t old_pud; + VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); @@ -2666,7 +2726,22 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, count_vm_event(THP_SPLIT_PUD); - pudp_huge_clear_flush(vma, haddr, pud); + old_pud = pudp_huge_clear_flush(vma, haddr, pud); + + if (!vma_is_dax(vma)) + return; + + page = pud_page(old_pud); + folio = page_folio(page); + + if (!folio_test_dirty(folio) && pud_dirty(old_pud)) + folio_mark_dirty(folio); + if (!folio_test_referenced(folio) && pud_young(old_pud)) + folio_set_referenced(folio); + folio_remove_rmap_pud(folio, page, vma); + folio_put(folio); + add_mm_counter(vma->vm_mm, mm_counter_file(folio), + -HPAGE_PUD_NR); } void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, From 6c88f72691f88fd1fc493a3c2eed97f91b82b3f0 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:11 +1100 Subject: [PATCH 291/431] mm/huge_memory: add vmf_insert_folio_pmd() Currently DAX folio/page reference counts are managed differently to normal pages. To allow these to be managed the same as normal pages introduce vmf_insert_folio_pmd. This will map the entire PMD-sized folio and take references as it would for a normally mapped page. This is distinct from the current mechanism, vmf_insert_pfn_pmd, which simply inserts a special devmap PMD entry into the page table without holding a reference to the page for the mapping. It is not currently useful to implement a more generic vmf_insert_folio() which selects the correct behaviour based on folio_order(). This is because PTE faults require only a subpage of the folio to be PTE mapped rather than the entire folio. It would be possible to add this context somewhere but callers already need to handle PTE faults and PMD faults separately so a more generic function is not useful. Link: https://lkml.kernel.org/r/7bf92a2e68225d13ea368d53bbfee327314d1c40.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 65 +++++++++++++++++++++++++++++++++-------- 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4eeb54b50621..e57e811cfd3c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -39,6 +39,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write); vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, bool write); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4cd79784f841..e6f4189c1c94 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1375,20 +1375,20 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) return __do_huge_pmd_anonymous_page(vmf); } -static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, +static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) { struct mm_struct *mm = vma->vm_mm; pmd_t entry; - spinlock_t *ptl; - ptl = pmd_lock(mm, pmd); + lockdep_assert_held(pmd_lockptr(mm, pmd)); + if (!pmd_none(*pmd)) { if (write) { if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); - goto out_unlock; + return -EEXIST; } entry = pmd_mkyoung(*pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); @@ -1396,7 +1396,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache_pmd(vma, addr, pmd); } - goto out_unlock; + return -EEXIST; } entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); @@ -1412,16 +1412,11 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, if (pgtable) { pgtable_trans_huge_deposit(mm, pmd, pgtable); mm_inc_nr_ptes(mm); - pgtable = NULL; } set_pmd_at(mm, addr, pmd, entry); update_mmu_cache_pmd(vma, addr, pmd); - -out_unlock: - spin_unlock(ptl); - if (pgtable) - pte_free(mm, pgtable); + return 0; } /** @@ -1440,6 +1435,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; + spinlock_t *ptl; + int error; /* * If we had pmd_special, we could avoid all these restrictions, @@ -1462,12 +1459,56 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) } track_pfn_insert(vma, &pgprot, pfn); + ptl = pmd_lock(vma->vm_mm, vmf->pmd); + error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, + pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(vma->vm_mm, pgtable); - insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address & PMD_MASK; + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pgtable_t pgtable = NULL; + int error; + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + + if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) + return VM_FAULT_SIGBUS; + + if (arch_needs_pgtable_deposit()) { + pgtable = pte_alloc_one(vma->vm_mm); + if (!pgtable) + return VM_FAULT_OOM; + } + + ptl = pmd_lock(mm, vmf->pmd); + if (pmd_none(*vmf->pmd)) { + folio_get(folio); + folio_add_file_rmap_pmd(folio, &folio->page, vma); + add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR); + } + error = insert_pfn_pmd(vma, addr, vmf->pmd, + pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot, + write, pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(mm, pgtable); + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd); + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) { From e5cb23256347469c80138a2a8804887239465d0b Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:12 +1100 Subject: [PATCH 292/431] mm/gup: don't allow FOLL_LONGTERM pinning of FS DAX pages Longterm pinning of FS DAX pages should already be disallowed by various pXX_devmap checks. However a future change will cause these checks to be invalid for FS DAX pages so make folio_is_longterm_pinnable() return false for FS DAX pages. Link: https://lkml.kernel.org/r/250a31876704b79f7c65b159f3c835e547f052df.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: John Hubbard Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/memremap.h | 11 +++++++++++ include/linux/mm.h | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 0256a4218dc3..4aa151914eab 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -187,6 +187,17 @@ static inline bool folio_is_device_coherent(const struct folio *folio) return is_device_coherent_page(&folio->page); } +static inline bool is_fsdax_page(const struct page *page) +{ + return is_zone_device_page(page) && + page_pgmap(page)->type == MEMORY_DEVICE_FS_DAX; +} + +static inline bool folio_is_fsdax(const struct folio *folio) +{ + return is_fsdax_page(&folio->page); +} + #ifdef CONFIG_ZONE_DEVICE void zone_device_page_init(struct page *page); void *memremap_pages(struct dev_pagemap *pgmap, int nid); diff --git a/include/linux/mm.h b/include/linux/mm.h index c6fb9300f6c0..009484a07074 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2115,6 +2115,13 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio) if (folio_is_device_coherent(folio)) return false; + /* + * Filesystems can only tolerate transient delays to truncate and + * hole-punch operations + */ + if (folio_is_fsdax(folio)) + return false; + /* Otherwise, non-movable zone folios can be pinned. */ return !folio_is_zone_movable(folio); From 653d7825c149932f254e0cd22153ccc945e7e545 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 28 Feb 2025 14:31:13 +1100 Subject: [PATCH 293/431] dcssblk: mark DAX broken, remove FS_DAX_LIMITED support The dcssblk driver has long needed special case supoprt to enable limited dax operation, so called CONFIG_FS_DAX_LIMITED. This mode works around the incomplete support for ZONE_DEVICE on s390 by forgoing the ability of dax-mapped pages to support GUP. Now, pending cleanups to fsdax that fix its reference counting [1] depend on the ability of all dax drivers to supply ZONE_DEVICE pages. To allow that work to move forward, dax support needs to be paused for dcssblk until ZONE_DEVICE support arrives. That work has been known for a few years [2], and the removal of "pte_devmap" requirements [3] makes the conversion easier. For now, place the support behind CONFIG_BROKEN, and remove PFN_SPECIAL (dcssblk was the only user). Link: http://lore.kernel.org/cover.9f0e45d52f5cff58807831b6b867084d0b14b61c.1725941415.git-series.apopple@nvidia.com [1] Link: http://lore.kernel.org/20210820210318.187742e8@thinkpad/ [2] Link: http://lore.kernel.org/4511465a4f8429f45e2ac70d2e65dc5e1df1eb47.1725941415.git-series.apopple@nvidia.com [3] Link: https://lkml.kernel.org/r/33eef2379c0d240f40cc15453fad2df1a4ae34c8.1740713401.git-series.apopple@nvidia.com Signed-off-by: Dan Williams Reviewed-by: Gerald Schaefer Tested-by: Alexander Gordeev Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: Sven Schnelle Cc: Jan Kara Cc: Matthew Wilcox Cc: Christoph Hellwig Cc: Alistair Popple Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Huacai Chen Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Ted Ts'o Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- Documentation/filesystems/dax.rst | 1 - drivers/s390/block/Kconfig | 12 ++++++++++-- drivers/s390/block/dcssblk.c | 27 +++++++++++++++++---------- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst index 719e90f1988e..08dd5e254cc5 100644 --- a/Documentation/filesystems/dax.rst +++ b/Documentation/filesystems/dax.rst @@ -207,7 +207,6 @@ implement direct_access. These block devices may be used for inspiration: - brd: RAM backed block device driver -- dcssblk: s390 dcss block device driver - pmem: NVDIMM persistent memory driver diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index e3710a762aba..4bfe469c04aa 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -4,13 +4,21 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m - select FS_DAX_LIMITED - select DAX prompt "DCSSBLK support" depends on S390 && BLOCK help Support for dcss block device +config DCSSBLK_DAX + def_bool y + depends on DCSSBLK + # requires S390 ZONE_DEVICE support + depends on BROKEN + select DAX + prompt "DCSSBLK DAX support" + help + Enable DAX operation for the dcss block device + config DASD def_tristate y prompt "Support for DASD devices" diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 0f14d279d30b..7248e547fefb 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -534,6 +534,21 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = { NULL, }; +static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) +{ + struct dax_device *dax_dev; + + if (!IS_ENABLED(CONFIG_DCSSBLK_DAX)) + return 0; + + dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); + if (IS_ERR(dax_dev)) + return PTR_ERR(dax_dev); + set_dax_synchronous(dax_dev); + dev_info->dax_dev = dax_dev; + return dax_add_host(dev_info->dax_dev, dev_info->gd); +} + /* * device attribute for adding devices */ @@ -547,7 +562,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; - struct dax_device *dax_dev; char *local_buf; unsigned long seg_byte_size; @@ -674,14 +688,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; - dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); - if (IS_ERR(dax_dev)) { - rc = PTR_ERR(dax_dev); - goto put_dev; - } - set_dax_synchronous(dax_dev); - dev_info->dax_dev = dax_dev; - rc = dax_add_host(dev_info->dax_dev, dev_info->gd); + rc = dcssblk_setup_dax(dev_info); if (rc) goto out_dax; @@ -917,7 +924,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, *kaddr = __va(dev_info->start + offset); if (pfn) *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV|PFN_SPECIAL); + PFN_DEV); return (dev_sz - offset) / PAGE_SIZE; } From 38607c62b34b46317c46d5baf1df03ac6e48a1c6 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:14 +1100 Subject: [PATCH 294/431] fs/dax: properly refcount fs dax pages Currently fs dax pages are considered free when the refcount drops to one and their refcounts are not increased when mapped via PTEs or decreased when unmapped. This requires special logic in mm paths to detect that these pages should not be properly refcounted, and to detect when the refcount drops to one instead of zero. On the other hand get_user_pages(), etc. will properly refcount fs dax pages by taking a reference and dropping it when the page is unpinned. Tracking this special behaviour requires extra PTE bits (eg. pte_devmap) and introduces rules that are potentially confusing and specific to FS DAX pages. To fix this, and to possibly allow removal of the special PTE bits in future, convert the fs dax page refcounts to be zero based and instead take a reference on the page each time it is mapped as is currently the case for normal pages. This may also allow a future clean-up to remove the pgmap refcounting that is currently done in mm/gup.c. Link: https://lkml.kernel.org/r/c7d886ad7468a20452ef6e0ddab6cfe220874e7c.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Tested-by: Alison Schofield Acked-by: David Hildenbrand Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/nvdimm/pmem.c | 4 +- fs/dax.c | 186 ++++++++++++++++++++++++--------------- fs/fuse/virtio_fs.c | 3 +- include/linux/dax.h | 2 +- include/linux/mm.h | 27 +----- include/linux/mm_types.h | 7 +- mm/gup.c | 9 +- mm/huge_memory.c | 6 +- mm/internal.h | 2 - mm/memory-failure.c | 6 +- mm/memory.c | 6 +- mm/memremap.c | 47 +++++----- mm/mm_init.c | 9 +- mm/swap.c | 2 - 14 files changed, 165 insertions(+), 151 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index d81faa9d89c9..785b2d2dbc82 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -513,7 +513,7 @@ static int pmem_attach_disk(struct device *dev, pmem->disk = disk; pmem->pgmap.owner = pmem; - pmem->pfn_flags = PFN_DEV; + pmem->pfn_flags = 0; if (is_nd_pfn(dev)) { pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; @@ -522,7 +522,6 @@ static int pmem_attach_disk(struct device *dev, pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->pfn_pad = resource_size(res) - range_len(&pmem->pgmap.range); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; bb_range.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { @@ -532,7 +531,6 @@ static int pmem_attach_disk(struct device *dev, pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; } else { addr = devm_memremap(dev, pmem->phys_addr, diff --git a/fs/dax.c b/fs/dax.c index 6674540363e8..cf96f3dd4e5f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -71,6 +71,11 @@ static unsigned long dax_to_pfn(void *entry) return xa_to_value(entry) >> DAX_SHIFT; } +static struct folio *dax_to_folio(void *entry) +{ + return page_folio(pfn_to_page(dax_to_pfn(entry))); +} + static void *dax_make_entry(pfn_t pfn, unsigned long flags) { return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); @@ -338,19 +343,6 @@ static unsigned long dax_entry_size(void *entry) return PAGE_SIZE; } -static unsigned long dax_end_pfn(void *entry) -{ - return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; -} - -/* - * Iterate through all mapped pfns represented by an entry, i.e. skip - * 'empty' and 'zero' entries. - */ -#define for_each_mapped_pfn(entry, pfn) \ - for (pfn = dax_to_pfn(entry); \ - pfn < dax_end_pfn(entry); pfn++) - /* * A DAX folio is considered shared if it has no mapping set and ->share (which * shares the ->index field) is non-zero. Note this may return false even if the @@ -359,7 +351,7 @@ static unsigned long dax_end_pfn(void *entry) */ static inline bool dax_folio_is_shared(struct folio *folio) { - return !folio->mapping && folio->page.share; + return !folio->mapping && folio->share; } /* @@ -384,75 +376,117 @@ static void dax_folio_make_shared(struct folio *folio) * folio has previously been mapped into one address space so set the * share count. */ - folio->page.share = 1; + folio->share = 1; } -static inline unsigned long dax_folio_share_put(struct folio *folio) +static inline unsigned long dax_folio_put(struct folio *folio) { - return --folio->page.share; + unsigned long ref; + int order, i; + + if (!dax_folio_is_shared(folio)) + ref = 0; + else + ref = --folio->share; + + if (ref) + return ref; + + folio->mapping = NULL; + order = folio_order(folio); + if (!order) + return 0; + + for (i = 0; i < (1UL << order); i++) { + struct dev_pagemap *pgmap = page_pgmap(&folio->page); + struct page *page = folio_page(folio, i); + struct folio *new_folio = (struct folio *)page; + + ClearPageHead(page); + clear_compound_head(page); + + new_folio->mapping = NULL; + /* + * Reset pgmap which was over-written by + * prep_compound_page(). + */ + new_folio->pgmap = pgmap; + new_folio->share = 0; + WARN_ON_ONCE(folio_ref_count(new_folio)); + } + + return ref; +} + +static void dax_folio_init(void *entry) +{ + struct folio *folio = dax_to_folio(entry); + int order = dax_entry_order(entry); + + /* + * Folio should have been split back to order-0 pages in + * dax_folio_put() when they were removed from their + * final mapping. + */ + WARN_ON_ONCE(folio_order(folio)); + + if (order > 0) { + prep_compound_page(&folio->page, order); + if (order > 1) + INIT_LIST_HEAD(&folio->_deferred_list); + WARN_ON_ONCE(folio_ref_count(folio)); + } } static void dax_associate_entry(void *entry, struct address_space *mapping, - struct vm_area_struct *vma, unsigned long address, bool shared) + struct vm_area_struct *vma, + unsigned long address, bool shared) { - unsigned long size = dax_entry_size(entry), pfn, index; - int i = 0; + unsigned long size = dax_entry_size(entry), index; + struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; index = linear_page_index(vma, address & ~(size - 1)); - for_each_mapped_pfn(entry, pfn) { - struct folio *folio = pfn_folio(pfn); + if (shared && (folio->mapping || dax_folio_is_shared(folio))) { + if (folio->mapping) + dax_folio_make_shared(folio); - if (shared && (folio->mapping || folio->page.share)) { - if (folio->mapping) - dax_folio_make_shared(folio); - - WARN_ON_ONCE(!folio->page.share); - folio->page.share++; - } else { - WARN_ON_ONCE(folio->mapping); - folio->mapping = mapping; - folio->index = index + i++; - } + WARN_ON_ONCE(!folio->share); + WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); + folio->share++; + } else { + WARN_ON_ONCE(folio->mapping); + dax_folio_init(entry); + folio = dax_to_folio(entry); + folio->mapping = mapping; + folio->index = index; } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, - bool trunc) + bool trunc) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; - for_each_mapped_pfn(entry, pfn) { - struct folio *folio = pfn_folio(pfn); - - WARN_ON_ONCE(trunc && folio_ref_count(folio) > 1); - if (dax_folio_is_shared(folio)) { - /* keep the shared flag if this page is still shared */ - if (dax_folio_share_put(folio) > 0) - continue; - } else - WARN_ON_ONCE(folio->mapping && folio->mapping != mapping); - folio->mapping = NULL; - folio->index = 0; - } + dax_folio_put(folio); } static struct page *dax_busy_page(void *entry) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); - for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return NULL; - if (page_ref_count(page) > 1) - return page; - } - return NULL; + if (folio_ref_count(folio) - folio_mapcount(folio)) + return &folio->page; + else + return NULL; } /** @@ -785,7 +819,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, - pgoff_t index, bool trunc) + pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; @@ -953,7 +987,8 @@ void dax_break_layout_final(struct inode *inode) wait_page_idle_uninterruptible(page, inode); } while (true); - dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); + if (!page) + dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_break_layout_final); @@ -1039,8 +1074,10 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, void *old; dax_disassociate_entry(entry, mapping, false); - dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, - shared); + if (!(flags & DAX_ZERO_PAGE)) + dax_associate_entry(new_entry, mapping, vmf->vma, + vmf->address, shared); + /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -1228,9 +1265,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, goto out; if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) goto out; - /* For larger pages we need devmap */ - if (length > 1 && !pfn_t_devmap(*pfnp)) - goto out; + rc = 0; out_check_addr: @@ -1337,7 +1372,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); - ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); trace_dax_load_hole(inode, vmf, ret); return ret; } @@ -1808,7 +1843,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; - int err = 0; + struct folio *folio; + int ret, err = 0; pfn_t pfn; void *kaddr; @@ -1840,17 +1876,19 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return dax_fault_return(err); } + folio = dax_to_folio(*entry); if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); - /* insert PMD pfn */ + folio_ref_inc(folio); if (pmd) - return vmf_insert_pfn_pmd(vmf, pfn, write); + ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), + write); + else + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); + folio_put(folio); - /* insert PTE pfn */ - if (write) - return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return ret; } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, @@ -2089,6 +2127,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); + struct folio *folio; void *entry; vm_fault_t ret; @@ -2106,14 +2145,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); + folio = pfn_folio(pfn_t_to_pfn(pfn)); + folio_ref_inc(folio); if (order == 0) - ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); + ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) - ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); + ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; + folio_put(folio); dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 82afe78ec542..2c7b24cb67ad 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1017,8 +1017,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (kaddr) *kaddr = fs->window_kaddr + offset; if (pfn) - *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, - PFN_DEV | PFN_MAP); + *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0); return nr_pages > max_nr_pages ? max_nr_pages : nr_pages; } diff --git a/include/linux/dax.h b/include/linux/dax.h index 2333c30f6d36..dcc9fcdf14e4 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -209,7 +209,7 @@ int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, static inline bool dax_page_is_idle(struct page *page) { - return page && page_ref_count(page) == 1; + return page && page_ref_count(page) == 0; } #if IS_ENABLED(CONFIG_DAX) diff --git a/include/linux/mm.h b/include/linux/mm.h index 009484a07074..de008efd96aa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1192,6 +1192,8 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; +extern void prep_compound_page(struct page *page, unsigned int order); + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -1513,25 +1515,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf); * back into memory. */ -#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) -DECLARE_STATIC_KEY_FALSE(devmap_managed_key); - -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs); -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (!static_branch_unlikely(&devmap_managed_key)) - return false; - if (!folio_is_zone_device(folio)) - return false; - return __put_devmap_managed_folio_refs(folio, refs); -} -#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - return false; -} -#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ - /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) @@ -1652,12 +1635,6 @@ static inline void put_page(struct page *page) if (folio_test_slab(folio)) return; - /* - * For some devmap managed pages we need to catch refcount transition - * from 2 to 1: - */ - if (put_devmap_managed_folio_refs(folio, 1)) - return; folio_put(folio); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0fa7907d437e..b1827d78ff89 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -296,6 +296,8 @@ typedef struct { * anonymous memory. * @index: Offset within the file, in units of pages. For anonymous memory, * this is the index from the beginning of the mmap. + * @share: number of DAX mappings that reference this folio. See + * dax_associate_entry. * @private: Filesystem per-folio data (see folio_attach_private()). * @swap: Used for swp_entry_t if folio_test_swapcache(). * @_mapcount: Do not access this member directly. Use folio_mapcount() to @@ -345,7 +347,10 @@ struct folio { struct dev_pagemap *pgmap; }; struct address_space *mapping; - pgoff_t index; + union { + pgoff_t index; + unsigned long share; + }; union { void *private; swp_entry_t swap; diff --git a/mm/gup.c b/mm/gup.c index e5d6454df41d..e5040657870e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -96,8 +96,7 @@ static inline struct folio *try_get_folio(struct page *page, int refs) * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); goto retry; } @@ -116,8 +115,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) refs *= GUP_PIN_COUNTING_BIAS; } - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); } /** @@ -565,8 +563,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, */ if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); return NULL; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e6f4189c1c94..9a15fd3453ff 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2225,7 +2225,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb->fullmm); arch_check_zapped_pmd(vma, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_special_huge(vma)) { + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); @@ -2882,13 +2882,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ if (arch_needs_pgtable_deposit()) zap_deposited_table(mm, pmd); - if (vma_is_special_huge(vma)) + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) return; if (unlikely(is_pmd_migration_entry(old_pmd))) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); folio = pfn_swap_entry_folio(entry); + } else if (is_huge_zero_pmd(old_pmd)) { + return; } else { page = pmd_page(old_pmd); folio = page_folio(page); diff --git a/mm/internal.h b/mm/internal.h index 70fa96e61c76..aa30282a774a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -737,8 +737,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) set_page_private(p, 0); } -extern void prep_compound_page(struct page *page, unsigned int order); - void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 327e02fdc029..6257c7f5e941 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -419,18 +419,18 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; - if (pud_devmap(*pud)) + if (pud_trans_huge(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; - if (pmd_devmap(*pmd)) + if (pmd_trans_huge(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte) return 0; ptent = ptep_get(pte); - if (pte_present(ptent) && pte_devmap(ptent)) + if (pte_present(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; diff --git a/mm/memory.c b/mm/memory.c index f09b4a1d09a8..8d1ea1dd6b52 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3848,13 +3848,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a - * VM_PFNMAP VMA. + * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ - if (!vmf->page) + if (!vmf->page || is_fsdax_page(vmf->page)) { + vmf->page = NULL; return wp_pfn_shared(vmf); + } return wp_page_shared(vmf, folio); } diff --git a/mm/memremap.c b/mm/memremap.c index 68099af9df4c..9a8879bf1ae4 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -458,8 +458,13 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!folio->pgmap->ops || - !folio->pgmap->ops->page_free)) + struct dev_pagemap *pgmap = folio->pgmap; + + if (WARN_ON_ONCE(!pgmap->ops)) + return; + + if (WARN_ON_ONCE(pgmap->type != MEMORY_DEVICE_FS_DAX && + !pgmap->ops->page_free)) return; mem_cgroup_uncharge(folio); @@ -484,26 +489,36 @@ void free_zone_device_folio(struct folio *folio) * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need * to clear folio->mapping. + * + * FS DAX pages clear the mapping when the folio->share count hits + * zero which indicating the page has been removed from the file + * system mapping. */ - folio->mapping = NULL; - folio->pgmap->ops->page_free(folio_page(folio, 0)); + if (pgmap->type != MEMORY_DEVICE_FS_DAX) + folio->mapping = NULL; - switch (folio->pgmap->type) { + switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: - put_dev_pagemap(folio->pgmap); + pgmap->ops->page_free(folio_page(folio, 0)); + put_dev_pagemap(pgmap); break; - case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_GENERIC: /* * Reset the refcount to 1 to prepare for handing out the page * again. */ + pgmap->ops->page_free(folio_page(folio, 0)); folio_set_count(folio, 1); break; + case MEMORY_DEVICE_FS_DAX: + wake_up_var(&folio->page); + break; + case MEMORY_DEVICE_PCI_P2PDMA: + pgmap->ops->page_free(folio_page(folio, 0)); break; } } @@ -519,21 +534,3 @@ void zone_device_page_init(struct page *page) lock_page(page); } EXPORT_SYMBOL_GPL(zone_device_page_init); - -#ifdef CONFIG_FS_DAX -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (folio->pgmap->type != MEMORY_DEVICE_FS_DAX) - return false; - - /* - * fsdax page refcounts are 1-based, rather than 0-based: if - * refcount is 1, then the page is free and the refcount is - * stable because nobody holds a reference on the page. - */ - if (folio_ref_sub_return(folio, refs) == 1) - wake_up_var(&folio->_refcount); - return true; -} -EXPORT_SYMBOL(__put_devmap_managed_folio_refs); -#endif /* CONFIG_FS_DAX */ diff --git a/mm/mm_init.c b/mm/mm_init.c index 73e97ce95f58..133640a93d1d 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1026,23 +1026,22 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, } /* - * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC and - * MEMORY_TYPE_FS_DAX pages are released directly to the driver page - * allocator which will set the page count to 1 when allocating the - * page. + * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released + * directly to the driver page allocator which will set the page count + * to 1 when allocating the page. * * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have * their refcount reset to one whenever they are freed (ie. after * their refcount drops to 0). */ switch (pgmap->type) { + case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: case MEMORY_DEVICE_PCI_P2PDMA: set_page_count(page, 0); break; - case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_GENERIC: break; } diff --git a/mm/swap.c b/mm/swap.c index fc8281ef4241..7523b65d8caa 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -956,8 +956,6 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_folio_refs(folio, nr_refs)) - continue; if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); continue; From aed877c2b4257a25b2429f165542f86125871071 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:15 +1100 Subject: [PATCH 295/431] device/dax: properly refcount device dax pages when mapping Device DAX pages are currently not reference counted when mapped, instead relying on the devmap PTE bit to ensure mapping code will not get/put references. This requires special handling in various page table walkers, particularly GUP, to manage references on the underlying pgmap to ensure the pages remain valid. However there is no reason these pages can't be refcounted properly at map time. Doning so eliminates the need for the devmap PTE bit, freeing up a precious PTE bit. It also simplifies GUP as it no longer needs to manage the special pgmap references and can instead just treat the pages normally as defined by vm_normal_page(). Link: https://lkml.kernel.org/r/968d3a8e9157e7492e85d065765c027e525f9fc9.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/dax/device.c | 15 +++++++++------ mm/memremap.c | 14 +++++++------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index bc871a34b9cd..328231cfb028 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -125,11 +125,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), + vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, @@ -168,11 +169,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -213,11 +215,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, diff --git a/mm/memremap.c b/mm/memremap.c index 9a8879bf1ae4..2aebc1b192da 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -460,11 +460,7 @@ void free_zone_device_folio(struct folio *folio) { struct dev_pagemap *pgmap = folio->pgmap; - if (WARN_ON_ONCE(!pgmap->ops)) - return; - - if (WARN_ON_ONCE(pgmap->type != MEMORY_DEVICE_FS_DAX && - !pgmap->ops->page_free)) + if (WARN_ON_ONCE(!pgmap)) return; mem_cgroup_uncharge(folio); @@ -494,12 +490,15 @@ void free_zone_device_folio(struct folio *folio) * zero which indicating the page has been removed from the file * system mapping. */ - if (pgmap->type != MEMORY_DEVICE_FS_DAX) + if (pgmap->type != MEMORY_DEVICE_FS_DAX && + pgmap->type != MEMORY_DEVICE_GENERIC) folio->mapping = NULL; switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; pgmap->ops->page_free(folio_page(folio, 0)); put_dev_pagemap(pgmap); break; @@ -509,7 +508,6 @@ void free_zone_device_folio(struct folio *folio) * Reset the refcount to 1 to prepare for handing out the page * again. */ - pgmap->ops->page_free(folio_page(folio, 0)); folio_set_count(folio, 1); break; @@ -518,6 +516,8 @@ void free_zone_device_folio(struct folio *folio) break; case MEMORY_DEVICE_PCI_P2PDMA: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; pgmap->ops->page_free(folio_page(folio, 0)); break; } From 937582ee8e8d227c30ec147629a0179131feaa80 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:34 +0000 Subject: [PATCH 296/431] mm/mremap: correctly handle partial mremap() of VMA starting at 0 Patch series "refactor mremap and fix bug", v3. The existing mremap() logic has grown organically over a very long period of time, resulting in code that is in many parts, very difficult to follow and full of subtleties and sources of confusion. In addition, it is difficult to thread state through the operation correctly, as function arguments have expanded, some parameters are expected to be temporarily altered during the operation, others are intended to remain static and some can be overridden. This series completely refactors the mremap implementation, sensibly separating functions, adding comments to explain the more subtle aspects of the implementation and making use of small structs to thread state through everything. The reason for doing so is to lay the groundwork for planned future changes to the mremap logic, changes which require the ability to easily pass around state. Additionally, it would be unhelpful to add yet more logic to code that is already difficult to follow without first refactoring it like this. The first patch in this series additionally fixes a bug when a VMA with start address zero is partially remapped. Tested on real hardware under heavy workload and all self tests are passing. This patch (of 3): Consider the case of a partial mremap() (that results in a VMA split) of an accountable VMA (i.e. which has the VM_ACCOUNT flag set) whose start address is zero, with the MREMAP_MAYMOVE flag specified and a scenario where a move does in fact occur: addr end | | v v |-------------| | vma | |-------------| 0 This move is affected by unmapping the range [addr, end). In order to prevent an incorrect decrement of accounted memory which has already been determined, the mremap() code in move_vma() clears VM_ACCOUNT from the VMA prior to doing so, before reestablishing it in each of the VMAs post-split: addr end | | v v |---| |---| | A | | B | |---| |---| Commit 6b73cff239e5 ("mm: change munmap splitting order and move_vma()") changed this logic such as to determine whether there is a need to do so by establishing account_start and account_end and, in the instance where such an operation is required, assigning them to vma->vm_start and vma->vm_end. Later the code checks if the operation is required for 'A' referenced above thusly: if (account_start) { ... } However, if the VMA described above has vma->vm_start == 0, which is now assigned to account_start, this branch will not be executed. As a result, the VMA 'A' above will remain stripped of its VM_ACCOUNT flag, incorrectly. The fix is to simply convert these variables to booleans and set them as required. Link: https://lkml.kernel.org/r/cover.1741639347.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/dc55cb6db25d97c3d9e460de4986a323fa959676.1741639347.git.lorenzo.stoakes@oracle.com Fixes: 6b73cff239e5 ("mm: change munmap splitting order and move_vma()") Signed-off-by: Lorenzo Stoakes Reviewed-by: Harry Yoo Reviewed-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- mm/mremap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index cff7f552f909..c3e4c86d0b8d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -705,8 +705,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; - unsigned long account_start = 0; - unsigned long account_end = 0; + bool account_start = false; + bool account_end = false; unsigned long hiwater_vm; int err = 0; bool need_rmap_locks; @@ -790,9 +790,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) - account_start = vma->vm_start; + account_start = true; if (vma->vm_end > old_addr + old_len) - account_end = vma->vm_end; + account_end = true; } /* @@ -832,7 +832,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = 0; + account_start = account_end = false; } if (vm_flags & VM_LOCKED) { From 85ea6bdd88a27317875088b54119bec209c6c09c Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:35 +0000 Subject: [PATCH 297/431] mm/mremap: refactor mremap() system call implementation Place checks into a separate function so the mremap() system call is less egregiously long, remove unnecessary mremap_to() offset_in_page() check and just check that earlier so we keep all such basic checks together. Separate out the VMA in-place expansion, hugetlb and expand/move logic into separate, readable functions. De-duplicate code where possible, add comments and ensure that all error handling explicitly specifies the error at the point of it occurring rather than setting a prefixed error value and implicitly setting (which is bug prone). This lays the groundwork for subsequent patches further simplifying and extending the mremap() implementation. Link: https://lkml.kernel.org/r/fc4a925396dc3cc36791ec92c4d329209e816308.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Harry Yoo Reviewed-by: Vlastimil Babka Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 405 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 251 insertions(+), 154 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index c3e4c86d0b8d..c4abda8dfc57 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -942,33 +942,14 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long ret; unsigned long map_flags = 0; - if (offset_in_page(new_addr)) - return -EINVAL; - + /* Is the new length or address silly? */ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) return -EINVAL; - /* Ensure the old/new locations do not overlap */ + /* Ensure the old/new locations do not overlap. */ if (addr + old_len > new_addr && new_addr + new_len > addr) return -EINVAL; - /* - * move_vma() need us to stay 4 maps below the threshold, otherwise - * it will bail out at the very beginning. - * That is a problem if we have already unmaped the regions here - * (new_addr, and old_addr), because userspace will not know the - * state of the vma's after it gets -ENOMEM. - * So, to avoid such scenario we can pre-compute if the whole - * operation has high chances to success map-wise. - * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmapping it. - * That means 2 more maps (1 for each) to the ones we already hold. - * Check whether current map count plus 2 still leads us to 4 maps below - * the threshold, otherwise return -ENOMEM here to be more safe. - */ - if ((mm->map_count + 2) >= sysctl_max_map_count - 3) - return -ENOMEM; - if (flags & MREMAP_FIXED) { /* * In mremap_to(). @@ -1035,6 +1016,218 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 1; } +/* Do the mremap() flags require that the new_addr parameter be specified? */ +static bool implies_new_addr(unsigned long flags) +{ + return flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); +} + +/* + * Are the parameters passed to mremap() valid? If so return 0, otherwise return + * error. + */ +static unsigned long check_mremap_params(unsigned long addr, + unsigned long flags, + unsigned long old_len, + unsigned long new_len, + unsigned long new_addr) +{ + /* Ensure no unexpected flag values. */ + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) + return -EINVAL; + + /* Start address must be page-aligned. */ + if (offset_in_page(addr)) + return -EINVAL; + + /* + * We allow a zero old-len as a special case + * for DOS-emu "duplicate shm area" thing. But + * a zero new-len is nonsensical. + */ + if (!PAGE_ALIGN(new_len)) + return -EINVAL; + + /* Remainder of checks are for cases with specific new_addr. */ + if (!implies_new_addr(flags)) + return 0; + + /* The new address must be page-aligned. */ + if (offset_in_page(new_addr)) + return -EINVAL; + + /* A fixed address implies a move. */ + if (!(flags & MREMAP_MAYMOVE)) + return -EINVAL; + + /* MREMAP_DONTUNMAP does not allow resizing in the process. */ + if (flags & MREMAP_DONTUNMAP && old_len != new_len) + return -EINVAL; + + /* + * move_vma() need us to stay 4 maps below the threshold, otherwise + * it will bail out at the very beginning. + * That is a problem if we have already unmaped the regions here + * (new_addr, and old_addr), because userspace will not know the + * state of the vma's after it gets -ENOMEM. + * So, to avoid such scenario we can pre-compute if the whole + * operation has high chances to success map-wise. + * Worst-scenario case is when both vma's (new_addr and old_addr) get + * split in 3 before unmapping it. + * That means 2 more maps (1 for each) to the ones we already hold. + * Check whether current map count plus 2 still leads us to 4 maps below + * the threshold, otherwise return -ENOMEM here to be more safe. + */ + if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) + return -ENOMEM; + + return 0; +} + +/* + * We know we can expand the VMA in-place by delta pages, so do so. + * + * If we discover the VMA is locked, update mm_struct statistics accordingly and + * indicate so to the caller. + */ +static unsigned long expand_vma_inplace(struct vm_area_struct *vma, + unsigned long delta, bool *locked) +{ + struct mm_struct *mm = current->mm; + long pages = delta >> PAGE_SHIFT; + VMA_ITERATOR(vmi, mm, vma->vm_end); + long charged = 0; + + if (vma->vm_flags & VM_ACCOUNT) { + if (security_vm_enough_memory_mm(mm, pages)) + return -ENOMEM; + + charged = pages; + } + + /* + * Function vma_merge_extend() is called on the + * extension we are adding to the already existing vma, + * vma_merge_extend() will merge this extension with the + * already existing vma (expand operation itself) and + * possibly also with the next vma if it becomes + * adjacent to the expanded vma and otherwise + * compatible. + */ + vma = vma_merge_extend(&vmi, vma, delta); + if (!vma) { + vm_unacct_memory(charged); + return -ENOMEM; + } + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + *locked = true; + } + + return 0; +} + +static bool align_hugetlb(struct vm_area_struct *vma, + unsigned long addr, + unsigned long new_addr, + unsigned long *old_len_ptr, + unsigned long *new_len_ptr, + unsigned long *delta_ptr) +{ + unsigned long old_len = *old_len_ptr; + unsigned long new_len = *new_len_ptr; + struct hstate *h __maybe_unused = hstate_vma(vma); + + old_len = ALIGN(old_len, huge_page_size(h)); + new_len = ALIGN(new_len, huge_page_size(h)); + + /* addrs must be huge page aligned */ + if (addr & ~huge_page_mask(h)) + return false; + if (new_addr & ~huge_page_mask(h)) + return false; + + /* + * Don't allow remap expansion, because the underlying hugetlb + * reservation is not yet capable to handle split reservation. + */ + if (new_len > old_len) + return false; + + *old_len_ptr = old_len; + *new_len_ptr = new_len; + *delta_ptr = abs_diff(old_len, new_len); + return true; +} + +/* + * We are mremap()'ing without specifying a fixed address to move to, but are + * requesting that the VMA's size be increased. + * + * Try to do so in-place, if this fails, then move the VMA to a new location to + * action the change. + */ +static unsigned long expand_vma(struct vm_area_struct *vma, + unsigned long addr, unsigned long old_len, + unsigned long new_len, unsigned long flags, + bool *locked_ptr, unsigned long *new_addr_ptr, + struct vm_userfaultfd_ctx *uf_ptr, + struct list_head *uf_unmap_ptr) +{ + unsigned long err; + unsigned long map_flags; + unsigned long new_addr; /* We ignore any user-supplied one. */ + pgoff_t pgoff; + + err = resize_is_valid(vma, addr, old_len, new_len, flags); + if (err) + return err; + + /* + * [addr, old_len) spans precisely to the end of the VMA, so try to + * expand it in-place. + */ + if (old_len == vma->vm_end - addr && + vma_expandable(vma, new_len - old_len)) { + err = expand_vma_inplace(vma, new_len - old_len, locked_ptr); + if (IS_ERR_VALUE(err)) + return err; + + /* + * We want to populate the newly expanded portion of the VMA to + * satisfy the expectation that mlock()'ing a VMA maintains all + * of its pages in memory. + */ + if (*locked_ptr) + *new_addr_ptr = addr; + + /* OK we're done! */ + return addr; + } + + /* + * We weren't able to just expand or shrink the area, + * we need to create a new one and move it. + */ + + /* We're not allowed to move the VMA, so error out. */ + if (!(flags & MREMAP_MAYMOVE)) + return -ENOMEM; + + /* Find a new location to move the VMA to. */ + map_flags = (vma->vm_flags & VM_MAYSHARE) ? MAP_SHARED : 0; + pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); + new_addr = get_unmapped_area(vma->vm_file, 0, new_len, pgoff, map_flags); + if (IS_ERR_VALUE(new_addr)) + return new_addr; + *new_addr_ptr = new_addr; + + return move_vma(vma, addr, old_len, new_len, new_addr, + locked_ptr, flags, uf_ptr, uf_unmap_ptr); +} + /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) @@ -1048,7 +1241,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long ret = -EINVAL; + unsigned long ret; + unsigned long delta; bool locked = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; LIST_HEAD(uf_unmap_early); @@ -1067,70 +1261,38 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, */ addr = untagged_addr(addr); - if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) - return ret; - - if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) - return ret; - - /* - * MREMAP_DONTUNMAP is always a move and it does not allow resizing - * in the process. - */ - if (flags & MREMAP_DONTUNMAP && - (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) - return ret; - - - if (offset_in_page(addr)) + ret = check_mremap_params(addr, flags, old_len, new_len, new_addr); + if (ret) return ret; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); + delta = abs_diff(old_len, new_len); - /* - * We allow a zero old-len as a special case - * for DOS-emu "duplicate shm area" thing. But - * a zero new-len is nonsensical. - */ - if (!new_len) - return ret; - - if (mmap_write_lock_killable(current->mm)) + if (mmap_write_lock_killable(mm)) return -EINTR; + vma = vma_lookup(mm, addr); if (!vma) { ret = -EFAULT; goto out; } - /* Don't allow remapping vmas when they have already been sealed */ + /* If mseal()'d, mremap() is prohibited. */ if (!can_modify_vma(vma)) { ret = -EPERM; goto out; } - if (is_vm_hugetlb_page(vma)) { - struct hstate *h __maybe_unused = hstate_vma(vma); - - old_len = ALIGN(old_len, huge_page_size(h)); - new_len = ALIGN(new_len, huge_page_size(h)); - - /* addrs must be huge page aligned */ - if (addr & ~huge_page_mask(h)) - goto out; - if (new_addr & ~huge_page_mask(h)) - goto out; - - /* - * Don't allow remap expansion, because the underlying hugetlb - * reservation is not yet capable to handle split reservation. - */ - if (new_len > old_len) - goto out; + /* Align to hugetlb page size, if required. */ + if (is_vm_hugetlb_page(vma) && + !align_hugetlb(vma, addr, new_addr, &old_len, &new_len, &delta)) { + ret = -EINVAL; + goto out; } - if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { + /* Are we RELOCATING the VMA to a SPECIFIC address? */ + if (implies_new_addr(flags)) { ret = mremap_to(addr, old_len, new_addr, new_len, &locked, flags, &uf, &uf_unmap_early, &uf_unmap); @@ -1138,109 +1300,44 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, } /* - * Always allow a shrinking remap: that just unmaps - * the unnecessary pages.. - * do_vmi_munmap does all the needed commit accounting, and - * unlocks the mmap_lock if so directed. + * From here on in we are only RESIZING the VMA, attempting to do so + * in-place, moving the VMA if we cannot. */ - if (old_len >= new_len) { + + /* NO-OP CASE - resizing to the same size. */ + if (new_len == old_len) { + ret = addr; + goto out; + } + + /* SHRINK CASE. Can always be done in-place. */ + if (new_len < old_len) { VMA_ITERATOR(vmi, mm, addr + new_len); - if (old_len == new_len) { - ret = addr; - goto out; - } - - ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, + /* + * Simply unmap the shrunken portion of the VMA. This does all + * the needed commit accounting, unlocking the mmap lock. + */ + ret = do_vmi_munmap(&vmi, mm, addr + new_len, delta, &uf_unmap, true); if (ret) goto out; + /* We succeeded, mmap lock released for us. */ ret = addr; goto out_unlocked; } - /* - * Ok, we need to grow.. - */ - ret = resize_is_valid(vma, addr, old_len, new_len, flags); - if (ret) - goto out; + /* EXPAND case. We try to do in-place, if we can't, then we move it. */ + ret = expand_vma(vma, addr, old_len, new_len, flags, &locked, &new_addr, + &uf, &uf_unmap); - /* old_len exactly to the end of the area.. - */ - if (old_len == vma->vm_end - addr) { - unsigned long delta = new_len - old_len; - - /* can we just expand the current mapping? */ - if (vma_expandable(vma, delta)) { - long pages = delta >> PAGE_SHIFT; - VMA_ITERATOR(vmi, mm, vma->vm_end); - long charged = 0; - - if (vma->vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, pages)) { - ret = -ENOMEM; - goto out; - } - charged = pages; - } - - /* - * Function vma_merge_extend() is called on the - * extension we are adding to the already existing vma, - * vma_merge_extend() will merge this extension with the - * already existing vma (expand operation itself) and - * possibly also with the next vma if it becomes - * adjacent to the expanded vma and otherwise - * compatible. - */ - vma = vma_merge_extend(&vmi, vma, delta); - if (!vma) { - vm_unacct_memory(charged); - ret = -ENOMEM; - goto out; - } - - vm_stat_account(mm, vma->vm_flags, pages); - if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; - locked = true; - new_addr = addr; - } - ret = addr; - goto out; - } - } - - /* - * We weren't able to just expand or shrink the area, - * we need to create a new one and move it.. - */ - ret = -ENOMEM; - if (flags & MREMAP_MAYMOVE) { - unsigned long map_flags = 0; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; - - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, - vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(new_addr)) { - ret = new_addr; - goto out; - } - - ret = move_vma(vma, addr, old_len, new_len, new_addr, - &locked, flags, &uf, &uf_unmap); - } out: if (offset_in_page(ret)) locked = false; - mmap_write_unlock(current->mm); + mmap_write_unlock(mm); if (locked && new_len > old_len) - mm_populate(new_addr + old_len, new_len - old_len); + mm_populate(new_addr + old_len, delta); out_unlocked: userfaultfd_unmap_complete(mm, &uf_unmap_early); mremap_userfaultfd_complete(&uf, addr, ret, old_len); From 221bf5cac5c2f3b9d8a07f4b2738225a59962945 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:36 +0000 Subject: [PATCH 298/431] mm/mremap: introduce and use vma_remap_struct threaded state A number of mremap() calls both pass around and modify a large number of parameters, making the code less readable and often repeatedly having to determine things such as VMA, size delta, and more. Avoid this by using the common pattern of passing a state object through the operation, updating it as we go. We introduce the vma_remap_struct or 'VRM' for this purpose. This also gives us the ability to accumulate further state through the operation that would otherwise require awkward and error-prone pointer passing. We can also now trivially define helper functions that operate on a VRM object. This pattern has proven itself to be very powerful when implemented for VMA merge, VMA unmapping and memory mapping operations, so it is battle-tested and functional. We both introduce the data structure and use it, introducing helper functions as needed to make things readable, we move some state such as mmap lock and mlock() status to the VRM, we introduce a means of classifying the type of mremap() operation and de-duplicate the get_unmapped_area() lookup. We also neatly thread userfaultfd state throughout the operation. Note that there is further refactoring to be done, chiefly adjust move_vma() to accept a VRM parameter. We defer this as there is pre-requisite work required to be able to do so which we will do in a subsequent patch. Link: https://lkml.kernel.org/r/27951739dc83b2b1523b81fa9c009ba348388d40.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Reviewed-by: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 592 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 374 insertions(+), 218 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index c4abda8dfc57..af022e3b89e2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -32,6 +32,44 @@ #include "internal.h" +/* Classify the kind of remap operation being performed. */ +enum mremap_type { + MREMAP_INVALID, /* Initial state. */ + MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */ + MREMAP_SHRINK, /* old_len > new_len. */ + MREMAP_EXPAND, /* old_len < new_len. */ +}; + +/* + * Describes a VMA mremap() operation and is threaded throughout it. + * + * Any of the fields may be mutated by the operation, however these values will + * always accurately reflect the remap (for instance, we may adjust lengths and + * delta to account for hugetlb alignment). + */ +struct vma_remap_struct { + /* User-provided state. */ + unsigned long addr; /* User-specified address from which we remap. */ + unsigned long old_len; /* Length of range being remapped. */ + unsigned long new_len; /* Desired new length of mapping. */ + unsigned long flags; /* user-specified MREMAP_* flags. */ + unsigned long new_addr; /* Optionally, desired new address. */ + + /* uffd state. */ + struct vm_userfaultfd_ctx *uf; + struct list_head *uf_unmap_early; + struct list_head *uf_unmap; + + /* VMA state, determined in do_mremap(). */ + struct vm_area_struct *vma; + + /* Internal state, determined in do_mremap(). */ + unsigned long delta; /* Absolute delta of old_len,new_len. */ + bool mlocked; /* Was the VMA mlock()'d? */ + enum mremap_type remap_type; /* expand, shrink, etc. */ + bool mmap_locked; /* Is mm currently write-locked? */ +}; + static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -693,10 +731,95 @@ unsigned long move_page_tables(struct vm_area_struct *vma, return len + old_addr - old_end; /* how much done */ } +/* Set vrm->delta to the difference in VMA size specified by user. */ +static void vrm_set_delta(struct vma_remap_struct *vrm) +{ + vrm->delta = abs_diff(vrm->old_len, vrm->new_len); +} + +/* Determine what kind of remap this is - shrink, expand or no resize at all. */ +static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm) +{ + if (vrm->delta == 0) + return MREMAP_NO_RESIZE; + + if (vrm->old_len > vrm->new_len) + return MREMAP_SHRINK; + + return MREMAP_EXPAND; +} + +/* + * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs + * overlapping? + */ +static bool vrm_overlaps(struct vma_remap_struct *vrm) +{ + unsigned long start_old = vrm->addr; + unsigned long start_new = vrm->new_addr; + unsigned long end_old = vrm->addr + vrm->old_len; + unsigned long end_new = vrm->new_addr + vrm->new_len; + + /* + * start_old end_old + * |-----------| + * | | + * |-----------| + * |-------------| + * | | + * |-------------| + * start_new end_new + */ + if (end_old > start_new && end_new > start_old) + return true; + + return false; +} + +/* Do the mremap() flags require that the new_addr parameter be specified? */ +static bool vrm_implies_new_addr(struct vma_remap_struct *vrm) +{ + return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); +} + +/* + * Find an unmapped area for the requested vrm->new_addr. + * + * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only + * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to + * mmap(), otherwise this is equivalent to mmap() specifying a NULL address. + * + * Returns 0 on success (with vrm->new_addr updated), or an error code upon + * failure. + */ +static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) +{ + struct vm_area_struct *vma = vrm->vma; + unsigned long map_flags = 0; + /* Page Offset _into_ the VMA. */ + pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; + pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; + unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; + unsigned long res; + + if (vrm->flags & MREMAP_FIXED) + map_flags |= MAP_FIXED; + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + + res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, + map_flags); + if (IS_ERR_VALUE(res)) + return res; + + vrm->new_addr = res; + return 0; +} + static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, - bool *locked, unsigned long flags, + bool *mlocked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) { long to_account = new_len - old_len; @@ -837,7 +960,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; - *locked = true; + *mlocked = true; } mm->hiwater_vm = hiwater_vm; @@ -860,18 +983,15 @@ static unsigned long move_vma(struct vm_area_struct *vma, * resize_is_valid() - Ensure the vma can be resized to the new length at the give * address. * - * @vma: The vma to resize - * @addr: The old address - * @old_len: The current size - * @new_len: The desired size - * @flags: The vma flags - * * Return 0 on success, error otherwise. */ -static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, - unsigned long old_len, unsigned long new_len, unsigned long flags) +static int resize_is_valid(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + unsigned long addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long new_len = vrm->new_len; unsigned long pgoff; /* @@ -883,11 +1003,12 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { - pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); + pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", + current->comm, current->pid); return -EINVAL; } - if ((flags & MREMAP_DONTUNMAP) && + if ((vrm->flags & MREMAP_DONTUNMAP) && (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) return -EINVAL; @@ -907,99 +1028,122 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return -EFAULT; - if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) return -EAGAIN; - if (!may_expand_vm(mm, vma->vm_flags, - (new_len - old_len) >> PAGE_SHIFT)) + if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) return -ENOMEM; return 0; } /* - * mremap_to() - remap a vma to a new location - * @addr: The old address - * @old_len: The old size - * @new_addr: The target address - * @new_len: The new size - * @locked: If the returned vma is locked (VM_LOCKED) - * @flags: the mremap flags - * @uf: The mremap userfaultfd context - * @uf_unmap_early: The userfaultfd unmap early context - * @uf_unmap: The userfaultfd unmap context + * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so + * execute this, optionally dropping the mmap lock when we do so. * - * Returns: The new address of the vma or an error. + * In both cases this invalidates the VMA, however if we don't drop the lock, + * then load the correct VMA into vrm->vma afterwards. */ -static unsigned long mremap_to(unsigned long addr, unsigned long old_len, - unsigned long new_addr, unsigned long new_len, bool *locked, - unsigned long flags, struct vm_userfaultfd_ctx *uf, - struct list_head *uf_unmap_early, - struct list_head *uf_unmap) +static unsigned long shrink_vma(struct vma_remap_struct *vrm, + bool drop_lock) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long ret; - unsigned long map_flags = 0; + unsigned long unmap_start = vrm->addr + vrm->new_len; + unsigned long unmap_bytes = vrm->delta; + unsigned long res; + VMA_ITERATOR(vmi, mm, unmap_start); + + VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); + + res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes, + vrm->uf_unmap, drop_lock); + vrm->vma = NULL; /* Invalidated. */ + if (res) + return res; + + /* + * If we've not dropped the lock, then we should reload the VMA to + * replace the invalidated VMA with the one that may have now been + * split. + */ + if (drop_lock) { + vrm->mmap_locked = false; + } else { + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; + } + + return 0; +} + +/* + * mremap_to() - remap a vma to a new location. + * Returns: The new address of the vma or an error. + */ +static unsigned long mremap_to(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + unsigned long err; /* Is the new length or address silly? */ - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) + if (vrm->new_len > TASK_SIZE || + vrm->new_addr > TASK_SIZE - vrm->new_len) return -EINVAL; - /* Ensure the old/new locations do not overlap. */ - if (addr + old_len > new_addr && new_addr + new_len > addr) + if (vrm_overlaps(vrm)) return -EINVAL; - if (flags & MREMAP_FIXED) { + if (vrm->flags & MREMAP_FIXED) { /* * In mremap_to(). * VMA is moved to dst address, and munmap dst first. * do_munmap will check if dst is sealed. */ - ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); - if (ret) - return ret; + err = do_munmap(mm, vrm->new_addr, vrm->new_len, + vrm->uf_unmap_early); + vrm->vma = NULL; /* Invalidated. */ + if (err) + return err; + + /* + * If we remap a portion of a VMA elsewhere in the same VMA, + * this can invalidate the old VMA. Reset. + */ + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; } - if (old_len > new_len) { - ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); - if (ret) - return ret; - old_len = new_len; + if (vrm->remap_type == MREMAP_SHRINK) { + err = shrink_vma(vrm, /* drop_lock= */false); + if (err) + return err; + + /* Set up for the move now shrink has been executed. */ + vrm->old_len = vrm->new_len; } - vma = vma_lookup(mm, addr); - if (!vma) - return -EFAULT; - - ret = resize_is_valid(vma, addr, old_len, new_len, flags); - if (ret) - return ret; + err = resize_is_valid(vrm); + if (err) + return err; /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ - if (flags & MREMAP_DONTUNMAP && - !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { - return -ENOMEM; + if (vrm->flags & MREMAP_DONTUNMAP) { + vm_flags_t vm_flags = vrm->vma->vm_flags; + unsigned long pages = vrm->old_len >> PAGE_SHIFT; + + if (!may_expand_vm(mm, vm_flags, pages)) + return -ENOMEM; } - if (flags & MREMAP_FIXED) - map_flags |= MAP_FIXED; + err = vrm_set_new_addr(vrm); + if (err) + return err; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; - - ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(ret)) - return ret; - - /* We got a new mapping */ - if (!(flags & MREMAP_FIXED)) - new_addr = ret; - - return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, - uf, uf_unmap); + return move_vma(vrm->vma, vrm->addr, vrm->old_len, vrm->new_len, + vrm->new_addr, &vrm->mlocked, vrm->flags, + vrm->uf, vrm->uf_unmap); } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) @@ -1016,22 +1160,33 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 1; } -/* Do the mremap() flags require that the new_addr parameter be specified? */ -static bool implies_new_addr(unsigned long flags) +/* Determine whether we are actually able to execute an in-place expansion. */ +static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm) { - return flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); + /* Number of bytes from vrm->addr to end of VMA. */ + unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; + + /* If end of range aligns to end of VMA, we can just expand in-place. */ + if (suffix_bytes != vrm->old_len) + return false; + + /* Check whether this is feasible. */ + if (!vma_expandable(vrm->vma, vrm->delta)) + return false; + + return true; } /* * Are the parameters passed to mremap() valid? If so return 0, otherwise return * error. */ -static unsigned long check_mremap_params(unsigned long addr, - unsigned long flags, - unsigned long old_len, - unsigned long new_len, - unsigned long new_addr) +static unsigned long check_mremap_params(struct vma_remap_struct *vrm) + { + unsigned long addr = vrm->addr; + unsigned long flags = vrm->flags; + /* Ensure no unexpected flag values. */ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) return -EINVAL; @@ -1045,15 +1200,15 @@ static unsigned long check_mremap_params(unsigned long addr, * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ - if (!PAGE_ALIGN(new_len)) + if (!PAGE_ALIGN(vrm->new_len)) return -EINVAL; /* Remainder of checks are for cases with specific new_addr. */ - if (!implies_new_addr(flags)) + if (!vrm_implies_new_addr(vrm)) return 0; /* The new address must be page-aligned. */ - if (offset_in_page(new_addr)) + if (offset_in_page(vrm->new_addr)) return -EINVAL; /* A fixed address implies a move. */ @@ -1061,7 +1216,7 @@ static unsigned long check_mremap_params(unsigned long addr, return -EINVAL; /* MREMAP_DONTUNMAP does not allow resizing in the process. */ - if (flags & MREMAP_DONTUNMAP && old_len != new_len) + if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) return -EINVAL; /* @@ -1090,11 +1245,11 @@ static unsigned long check_mremap_params(unsigned long addr, * If we discover the VMA is locked, update mm_struct statistics accordingly and * indicate so to the caller. */ -static unsigned long expand_vma_inplace(struct vm_area_struct *vma, - unsigned long delta, bool *locked) +static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; - long pages = delta >> PAGE_SHIFT; + long pages = vrm->delta >> PAGE_SHIFT; + struct vm_area_struct *vma = vrm->vma; VMA_ITERATOR(vmi, mm, vma->vm_end); long charged = 0; @@ -1114,7 +1269,7 @@ static unsigned long expand_vma_inplace(struct vm_area_struct *vma, * adjacent to the expanded vma and otherwise * compatible. */ - vma = vma_merge_extend(&vmi, vma, delta); + vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta); if (!vma) { vm_unacct_memory(charged); return -ENOMEM; @@ -1123,42 +1278,34 @@ static unsigned long expand_vma_inplace(struct vm_area_struct *vma, vm_stat_account(mm, vma->vm_flags, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; - *locked = true; + vrm->mlocked = true; } return 0; } -static bool align_hugetlb(struct vm_area_struct *vma, - unsigned long addr, - unsigned long new_addr, - unsigned long *old_len_ptr, - unsigned long *new_len_ptr, - unsigned long *delta_ptr) +static bool align_hugetlb(struct vma_remap_struct *vrm) { - unsigned long old_len = *old_len_ptr; - unsigned long new_len = *new_len_ptr; - struct hstate *h __maybe_unused = hstate_vma(vma); + struct hstate *h __maybe_unused = hstate_vma(vrm->vma); - old_len = ALIGN(old_len, huge_page_size(h)); - new_len = ALIGN(new_len, huge_page_size(h)); + vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); + vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); /* addrs must be huge page aligned */ - if (addr & ~huge_page_mask(h)) + if (vrm->addr & ~huge_page_mask(h)) return false; - if (new_addr & ~huge_page_mask(h)) + if (vrm->new_addr & ~huge_page_mask(h)) return false; /* * Don't allow remap expansion, because the underlying hugetlb * reservation is not yet capable to handle split reservation. */ - if (new_len > old_len) + if (vrm->new_len > vrm->old_len) return false; - *old_len_ptr = old_len; - *new_len_ptr = new_len; - *delta_ptr = abs_diff(old_len, new_len); + vrm_set_delta(vrm); + return true; } @@ -1169,19 +1316,16 @@ static bool align_hugetlb(struct vm_area_struct *vma, * Try to do so in-place, if this fails, then move the VMA to a new location to * action the change. */ -static unsigned long expand_vma(struct vm_area_struct *vma, - unsigned long addr, unsigned long old_len, - unsigned long new_len, unsigned long flags, - bool *locked_ptr, unsigned long *new_addr_ptr, - struct vm_userfaultfd_ctx *uf_ptr, - struct list_head *uf_unmap_ptr) +static unsigned long expand_vma(struct vma_remap_struct *vrm) { unsigned long err; - unsigned long map_flags; - unsigned long new_addr; /* We ignore any user-supplied one. */ - pgoff_t pgoff; + struct vm_area_struct *vma = vrm->vma; + unsigned long addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long new_len = vrm->new_len; + unsigned long flags = vrm->flags; - err = resize_is_valid(vma, addr, old_len, new_len, flags); + err = resize_is_valid(vrm); if (err) return err; @@ -1189,10 +1333,9 @@ static unsigned long expand_vma(struct vm_area_struct *vma, * [addr, old_len) spans precisely to the end of the VMA, so try to * expand it in-place. */ - if (old_len == vma->vm_end - addr && - vma_expandable(vma, new_len - old_len)) { - err = expand_vma_inplace(vma, new_len - old_len, locked_ptr); - if (IS_ERR_VALUE(err)) + if (vrm_can_expand_in_place(vrm)) { + err = expand_vma_in_place(vrm); + if (err) return err; /* @@ -1200,8 +1343,8 @@ static unsigned long expand_vma(struct vm_area_struct *vma, * satisfy the expectation that mlock()'ing a VMA maintains all * of its pages in memory. */ - if (*locked_ptr) - *new_addr_ptr = addr; + if (vrm->mlocked) + vrm->new_addr = addr; /* OK we're done! */ return addr; @@ -1217,15 +1360,103 @@ static unsigned long expand_vma(struct vm_area_struct *vma, return -ENOMEM; /* Find a new location to move the VMA to. */ - map_flags = (vma->vm_flags & VM_MAYSHARE) ? MAP_SHARED : 0; - pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, pgoff, map_flags); - if (IS_ERR_VALUE(new_addr)) - return new_addr; - *new_addr_ptr = new_addr; + err = vrm_set_new_addr(vrm); + if (err) + return err; - return move_vma(vma, addr, old_len, new_len, new_addr, - locked_ptr, flags, uf_ptr, uf_unmap_ptr); + return move_vma(vma, addr, old_len, new_len, vrm->new_addr, + &vrm->mlocked, flags, vrm->uf, vrm->uf_unmap); +} + +/* + * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the + * first available address to perform the operation. + */ +static unsigned long mremap_at(struct vma_remap_struct *vrm) +{ + unsigned long res; + + switch (vrm->remap_type) { + case MREMAP_INVALID: + break; + case MREMAP_NO_RESIZE: + /* NO-OP CASE - resizing to the same size. */ + return vrm->addr; + case MREMAP_SHRINK: + /* + * SHRINK CASE. Can always be done in-place. + * + * Simply unmap the shrunken portion of the VMA. This does all + * the needed commit accounting, and we indicate that the mmap + * lock should be dropped. + */ + res = shrink_vma(vrm, /* drop_lock= */true); + if (res) + return res; + + return vrm->addr; + case MREMAP_EXPAND: + return expand_vma(vrm); + } + + BUG(); +} + +static unsigned long do_mremap(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long ret; + + ret = check_mremap_params(vrm); + if (ret) + return ret; + + vrm->old_len = PAGE_ALIGN(vrm->old_len); + vrm->new_len = PAGE_ALIGN(vrm->new_len); + vrm_set_delta(vrm); + + if (mmap_write_lock_killable(mm)) + return -EINTR; + vrm->mmap_locked = true; + + vma = vrm->vma = vma_lookup(mm, vrm->addr); + if (!vma) { + ret = -EFAULT; + goto out; + } + + /* If mseal()'d, mremap() is prohibited. */ + if (!can_modify_vma(vma)) { + ret = -EPERM; + goto out; + } + + /* Align to hugetlb page size, if required. */ + if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) { + ret = -EINVAL; + goto out; + } + + vrm->remap_type = vrm_remap_type(vrm); + + /* Actually execute mremap. */ + ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); + +out: + if (vrm->mmap_locked) { + mmap_write_unlock(mm); + vrm->mmap_locked = false; + + if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len) + mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); + } + + userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); + mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len); + userfaultfd_unmap_complete(mm, vrm->uf_unmap); + + return ret; } /* @@ -1239,15 +1470,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long ret; - unsigned long delta; - bool locked = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; LIST_HEAD(uf_unmap_early); LIST_HEAD(uf_unmap); - /* * There is a deliberate asymmetry here: we strip the pointer tag * from the old address but leave the new address alone. This is @@ -1259,88 +1484,19 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, * See Documentation/arch/arm64/tagged-address-abi.rst for more * information. */ - addr = untagged_addr(addr); + struct vma_remap_struct vrm = { + .addr = untagged_addr(addr), + .old_len = old_len, + .new_len = new_len, + .flags = flags, + .new_addr = new_addr, - ret = check_mremap_params(addr, flags, old_len, new_len, new_addr); - if (ret) - return ret; + .uf = &uf, + .uf_unmap_early = &uf_unmap_early, + .uf_unmap = &uf_unmap, - old_len = PAGE_ALIGN(old_len); - new_len = PAGE_ALIGN(new_len); - delta = abs_diff(old_len, new_len); + .remap_type = MREMAP_INVALID, /* We set later. */ + }; - if (mmap_write_lock_killable(mm)) - return -EINTR; - - vma = vma_lookup(mm, addr); - if (!vma) { - ret = -EFAULT; - goto out; - } - - /* If mseal()'d, mremap() is prohibited. */ - if (!can_modify_vma(vma)) { - ret = -EPERM; - goto out; - } - - /* Align to hugetlb page size, if required. */ - if (is_vm_hugetlb_page(vma) && - !align_hugetlb(vma, addr, new_addr, &old_len, &new_len, &delta)) { - ret = -EINVAL; - goto out; - } - - /* Are we RELOCATING the VMA to a SPECIFIC address? */ - if (implies_new_addr(flags)) { - ret = mremap_to(addr, old_len, new_addr, new_len, - &locked, flags, &uf, &uf_unmap_early, - &uf_unmap); - goto out; - } - - /* - * From here on in we are only RESIZING the VMA, attempting to do so - * in-place, moving the VMA if we cannot. - */ - - /* NO-OP CASE - resizing to the same size. */ - if (new_len == old_len) { - ret = addr; - goto out; - } - - /* SHRINK CASE. Can always be done in-place. */ - if (new_len < old_len) { - VMA_ITERATOR(vmi, mm, addr + new_len); - - /* - * Simply unmap the shrunken portion of the VMA. This does all - * the needed commit accounting, unlocking the mmap lock. - */ - ret = do_vmi_munmap(&vmi, mm, addr + new_len, delta, - &uf_unmap, true); - if (ret) - goto out; - - /* We succeeded, mmap lock released for us. */ - ret = addr; - goto out_unlocked; - } - - /* EXPAND case. We try to do in-place, if we can't, then we move it. */ - ret = expand_vma(vma, addr, old_len, new_len, flags, &locked, &new_addr, - &uf, &uf_unmap); - -out: - if (offset_in_page(ret)) - locked = false; - mmap_write_unlock(mm); - if (locked && new_len > old_len) - mm_populate(new_addr + old_len, delta); -out_unlocked: - userfaultfd_unmap_complete(mm, &uf_unmap_early); - mremap_userfaultfd_complete(&uf, addr, ret, old_len); - userfaultfd_unmap_complete(mm, &uf_unmap); - return ret; + return do_mremap(&vrm); } From d5c8aec0542e2d79b64de9089b88fabdebe05c1e Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:37 +0000 Subject: [PATCH 299/431] mm/mremap: initial refactor of move_vma() Update move_vma() to use the threaded VRM object, de-duplicate code and separate into smaller functions to aid readability and debug-ability. This in turn allows further simplification of expand_vma() as we can simply thread VRM through the function. We also take the opportunity to abstract the account charging page count into the VRM in order that we can correctly thread this through the operation. We additionally do the same for tracking mm statistics - exec_vm, stack_vm, data_vm, and locked_vm. As part of this change, we slightly modify when locked pages statistics are counted for in mm_struct statistics. However this should cause no issues, as there is no chance of underflow, nor will any rlimit failures occur as a result. This is an intermediate step before a further refactoring of move_vma() in order to aid review. Link: https://lkml.kernel.org/r/ab611d6efae11bddab2db2b8bb3925b1d1954c7d.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Harry Yoo Signed-off-by: Andrew Morton --- mm/mremap.c | 186 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 122 insertions(+), 64 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index af022e3b89e2..6305cb9a86f6 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -68,6 +68,7 @@ struct vma_remap_struct { bool mlocked; /* Was the VMA mlock()'d? */ enum mremap_type remap_type; /* expand, shrink, etc. */ bool mmap_locked; /* Is mm currently write-locked? */ + unsigned long charged; /* If VM_ACCOUNT, # pages to account. */ }; static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) @@ -816,35 +817,88 @@ static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) return 0; } -static unsigned long move_vma(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long old_len, - unsigned long new_len, unsigned long new_addr, - bool *mlocked, unsigned long flags, - struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) +/* + * Keep track of pages which have been added to the memory mapping. If the VMA + * is accounted, also check to see if there is sufficient memory. + * + * Returns true on success, false if insufficient memory to charge. + */ +static bool vrm_charge(struct vma_remap_struct *vrm) { - long to_account = new_len - old_len; - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *new_vma; - unsigned long vm_flags = vma->vm_flags; - unsigned long new_pgoff; - unsigned long moved_len; - bool account_start = false; - bool account_end = false; - unsigned long hiwater_vm; - int err = 0; - bool need_rmap_locks; - struct vma_iterator vmi; + unsigned long charged; + + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return true; + + /* + * If we don't unmap the old mapping, then we account the entirety of + * the length of the new one. Otherwise it's just the delta in size. + */ + if (vrm->flags & MREMAP_DONTUNMAP) + charged = vrm->new_len >> PAGE_SHIFT; + else + charged = vrm->delta >> PAGE_SHIFT; + + + /* This accounts 'charged' pages of memory. */ + if (security_vm_enough_memory_mm(current->mm, charged)) + return false; + + vrm->charged = charged; + return true; +} + +/* + * an error has occurred so we will not be using vrm->charged memory. Unaccount + * this memory if the VMA is accounted. + */ +static void vrm_uncharge(struct vma_remap_struct *vrm) +{ + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return; + + vm_unacct_memory(vrm->charged); + vrm->charged = 0; +} + +/* + * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to + * account for 'bytes' memory used, and if locked, indicate this in the VRM so + * we can handle this correctly later. + */ +static void vrm_stat_account(struct vma_remap_struct *vrm, + unsigned long bytes) +{ + unsigned long pages = bytes >> PAGE_SHIFT; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + vrm->mlocked = true; + } +} + +/* + * Perform checks before attempting to write a VMA prior to it being + * moved. + */ +static unsigned long prep_move_vma(struct vma_remap_struct *vrm, + unsigned long *vm_flags_ptr) +{ + unsigned long err = 0; + struct vm_area_struct *vma = vrm->vma; + unsigned long old_addr = vrm->addr; + unsigned long old_len = vrm->old_len; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ - if (mm->map_count >= sysctl_max_map_count - 3) + if (current->mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; - if (unlikely(flags & MREMAP_DONTUNMAP)) - to_account = new_len; - if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_start != old_addr) err = vma->vm_ops->may_split(vma, old_addr); @@ -862,22 +916,46 @@ static unsigned long move_vma(struct vm_area_struct *vma, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, - MADV_UNMERGEABLE, &vm_flags); + MADV_UNMERGEABLE, vm_flags_ptr); if (err) return err; - if (vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) - return -ENOMEM; - } + return 0; +} + +static unsigned long move_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + struct vm_area_struct *new_vma; + unsigned long vm_flags = vma->vm_flags; + unsigned long old_addr = vrm->addr, new_addr = vrm->new_addr; + unsigned long old_len = vrm->old_len, new_len = vrm->new_len; + unsigned long new_pgoff; + unsigned long moved_len; + unsigned long account_start = false; + unsigned long account_end = false; + unsigned long hiwater_vm; + int err; + bool need_rmap_locks; + struct vma_iterator vmi; + + err = prep_move_vma(vrm, &vm_flags); + if (err) + return err; + + /* If accounted, charge the number of bytes the operation will use. */ + if (!vrm_charge(vrm)) + return -ENOMEM; vma_start_write(vma); new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, + new_vma = copy_vma(&vrm->vma, new_addr, new_len, new_pgoff, &need_rmap_locks); + /* This may have been updated. */ + vma = vrm->vma; if (!new_vma) { - if (vm_flags & VM_ACCOUNT) - vm_unacct_memory(to_account >> PAGE_SHIFT); + vrm_uncharge(vrm); return -ENOMEM; } @@ -902,7 +980,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, old_addr = new_addr; new_addr = err; } else { - mremap_userfaultfd_prep(new_vma, uf); + mremap_userfaultfd_prep(new_vma, vrm->uf); } if (is_vm_hugetlb_page(vma)) { @@ -910,7 +988,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { + if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) account_start = true; @@ -928,13 +1006,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; - vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); /* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn_clear(vma); - if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { + if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) { /* We always clear VM_LOCKED[ONFAULT] on the old vma */ vm_flags_clear(vma, VM_LOCKED_MASK); @@ -947,22 +1024,20 @@ static unsigned long move_vma(struct vm_area_struct *vma, unlink_anon_vmas(vma); /* Because we won't unmap we don't need to touch locked_vm */ + vrm_stat_account(vrm, new_len); return new_addr; } + vrm_stat_account(vrm, new_len); + vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { + if (do_vmi_munmap(&vmi, mm, old_addr, old_len, vrm->uf_unmap, false) < 0) { /* OOM: unable to split vma, just get accounts right */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) + if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); account_start = account_end = false; } - if (vm_flags & VM_LOCKED) { - mm->locked_vm += new_len >> PAGE_SHIFT; - *mlocked = true; - } - mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ @@ -1141,9 +1216,7 @@ static unsigned long mremap_to(struct vma_remap_struct *vrm) if (err) return err; - return move_vma(vrm->vma, vrm->addr, vrm->old_len, vrm->new_len, - vrm->new_addr, &vrm->mlocked, vrm->flags, - vrm->uf, vrm->uf_unmap); + return move_vma(vrm); } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) @@ -1248,17 +1321,11 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm) static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; - long pages = vrm->delta >> PAGE_SHIFT; struct vm_area_struct *vma = vrm->vma; VMA_ITERATOR(vmi, mm, vma->vm_end); - long charged = 0; - if (vma->vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, pages)) - return -ENOMEM; - - charged = pages; - } + if (!vrm_charge(vrm)) + return -ENOMEM; /* * Function vma_merge_extend() is called on the @@ -1271,15 +1338,11 @@ static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) */ vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta); if (!vma) { - vm_unacct_memory(charged); + vrm_uncharge(vrm); return -ENOMEM; } - vm_stat_account(mm, vma->vm_flags, pages); - if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; - vrm->mlocked = true; - } + vrm_stat_account(vrm, vrm->delta); return 0; } @@ -1319,11 +1382,7 @@ static bool align_hugetlb(struct vma_remap_struct *vrm) static unsigned long expand_vma(struct vma_remap_struct *vrm) { unsigned long err; - struct vm_area_struct *vma = vrm->vma; unsigned long addr = vrm->addr; - unsigned long old_len = vrm->old_len; - unsigned long new_len = vrm->new_len; - unsigned long flags = vrm->flags; err = resize_is_valid(vrm); if (err) @@ -1356,7 +1415,7 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm) */ /* We're not allowed to move the VMA, so error out. */ - if (!(flags & MREMAP_MAYMOVE)) + if (!(vrm->flags & MREMAP_MAYMOVE)) return -ENOMEM; /* Find a new location to move the VMA to. */ @@ -1364,8 +1423,7 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm) if (err) return err; - return move_vma(vma, addr, old_len, new_len, vrm->new_addr, - &vrm->mlocked, flags, vrm->uf, vrm->uf_unmap); + return move_vma(vrm); } /* From b714ccb02a76e170f3e6475749ed0812ee25f777 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:38 +0000 Subject: [PATCH 300/431] mm/mremap: complete refactor of move_vma() We invoke ksm_madvise() with an intentionally dummy flags field, so no need to pass around. Additionally, the code tries to be 'clever' with account_start, account_end, using these to both check that vma->vm_start != 0 and that we ought to account the newly split portion of VMA post-move, either before or after it. We need to do this because we intentionally removed VM_ACCOUNT on the VMA prior to unmapping, so we don't erroneously unaccount memory (we have already calculated the correct amount to account and accounted it, any subsequent subtraction will be incorrect). This patch significantly expands the comment (from 2002!) about 'concealing' the flag to make it abundantly clear what's going on, as well as adding and expanding a number of other comments also. We can remove account_start, account_end by instead tracking when we account (i.e. vma->vm_flags has the VM_ACCOUNT flag set, and this is not an MREMAP_DONTUNMAP operation), and figuring out when to reinstate the VM_ACCOUNT flag on prior/subsequent VMAs separately. We additionally break the function into logical pieces and attack the very confusing error handling logic (where, for instance, new_addr is set to err). After this change the code is considerably more readable and easy to manipulate. Link: https://lkml.kernel.org/r/e7eaa307e444ba2b04d94fd985c907c8e896f893.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 295 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 205 insertions(+), 90 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 6305cb9a86f6..7dc058d5d5e2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -884,13 +884,13 @@ static void vrm_stat_account(struct vma_remap_struct *vrm, * Perform checks before attempting to write a VMA prior to it being * moved. */ -static unsigned long prep_move_vma(struct vma_remap_struct *vrm, - unsigned long *vm_flags_ptr) +static unsigned long prep_move_vma(struct vma_remap_struct *vrm) { unsigned long err = 0; struct vm_area_struct *vma = vrm->vma; unsigned long old_addr = vrm->addr; unsigned long old_len = vrm->old_len; + unsigned long dummy = vma->vm_flags; /* * We'd prefer to avoid failure later on in do_munmap: @@ -916,56 +916,151 @@ static unsigned long prep_move_vma(struct vma_remap_struct *vrm, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, - MADV_UNMERGEABLE, vm_flags_ptr); + MADV_UNMERGEABLE, &dummy); if (err) return err; return 0; } -static unsigned long move_vma(struct vma_remap_struct *vrm) +/* + * Unmap source VMA for VMA move, turning it from a copy to a move, being + * careful to ensure we do not underflow memory account while doing so if an + * accountable move. + * + * This is best effort, if we fail to unmap then we simply try to correct + * accounting and exit. + */ +static void unmap_source_vma(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; + unsigned long addr = vrm->addr; + unsigned long len = vrm->old_len; struct vm_area_struct *vma = vrm->vma; - struct vm_area_struct *new_vma; - unsigned long vm_flags = vma->vm_flags; - unsigned long old_addr = vrm->addr, new_addr = vrm->new_addr; - unsigned long old_len = vrm->old_len, new_len = vrm->new_len; - unsigned long new_pgoff; - unsigned long moved_len; - unsigned long account_start = false; - unsigned long account_end = false; - unsigned long hiwater_vm; + VMA_ITERATOR(vmi, mm, addr); int err; + unsigned long vm_start; + unsigned long vm_end; + /* + * It might seem odd that we check for MREMAP_DONTUNMAP here, given this + * function implies that we unmap the original VMA, which seems + * contradictory. + * + * However, this occurs when this operation was attempted and an error + * arose, in which case we _do_ wish to unmap the _new_ VMA, which means + * we actually _do_ want it be unaccounted. + */ + bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && + !(vrm->flags & MREMAP_DONTUNMAP); + + /* + * So we perform a trick here to prevent incorrect accounting. Any merge + * or new VMA allocation performed in copy_vma() does not adjust + * accounting, it is expected that callers handle this. + * + * And indeed we already have, accounting appropriately in the case of + * both in vrm_charge(). + * + * However, when we unmap the existing VMA (to effect the move), this + * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount + * removed pages. + * + * To avoid this we temporarily clear this flag, reinstating on any + * portions of the original VMA that remain. + */ + if (accountable_move) { + vm_flags_clear(vma, VM_ACCOUNT); + /* We are about to split vma, so store the start/end. */ + vm_start = vma->vm_start; + vm_end = vma->vm_end; + } + + err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); + vrm->vma = NULL; /* Invalidated. */ + if (err) { + /* OOM: unable to split vma, just get accounts right */ + vm_acct_memory(len >> PAGE_SHIFT); + return; + } + + /* + * If we mremap() from a VMA like this: + * + * addr end + * | | + * v v + * |-------------| + * | | + * |-------------| + * + * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above + * we'll end up with: + * + * addr end + * | | + * v v + * |---| |---| + * | A | | B | + * |---| |---| + * + * The VMI is still pointing at addr, so vma_prev() will give us A, and + * a subsequent or lone vma_next() will give as B. + * + * do_vmi_munmap() will have restored the VMI back to addr. + */ + if (accountable_move) { + unsigned long end = addr + len; + + if (vm_start < addr) { + struct vm_area_struct *prev = vma_prev(&vmi); + + vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */ + } + + if (vm_end > end) { + struct vm_area_struct *next = vma_next(&vmi); + + vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */ + } + } +} + +/* + * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the + * process. Additionally handle an error occurring on moving of page tables, + * where we reset vrm state to cause unmapping of the new VMA. + * + * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an + * error code. + */ +static int copy_vma_and_data(struct vma_remap_struct *vrm, + struct vm_area_struct **new_vma_ptr) +{ + unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; + unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; + unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; + unsigned long moved_len; bool need_rmap_locks; - struct vma_iterator vmi; + struct vm_area_struct *vma; + struct vm_area_struct *new_vma; + int err = 0; - err = prep_move_vma(vrm, &vm_flags); - if (err) - return err; - - /* If accounted, charge the number of bytes the operation will use. */ - if (!vrm_charge(vrm)) - return -ENOMEM; - - vma_start_write(vma); - new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vrm->vma, new_addr, new_len, new_pgoff, + new_vma = copy_vma(&vrm->vma, vrm->new_addr, vrm->new_len, new_pgoff, &need_rmap_locks); - /* This may have been updated. */ - vma = vrm->vma; if (!new_vma) { vrm_uncharge(vrm); + *new_vma_ptr = NULL; return -ENOMEM; } + vma = vrm->vma; - moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, - need_rmap_locks, false); - if (moved_len < old_len) { + moved_len = move_page_tables(vma, vrm->addr, new_vma, + vrm->new_addr, vrm->old_len, + need_rmap_locks, /* for_stack= */false); + if (moved_len < vrm->old_len) err = -ENOMEM; - } else if (vma->vm_ops && vma->vm_ops->mremap) { + else if (vma->vm_ops && vma->vm_ops->mremap) err = vma->vm_ops->mremap(new_vma); - } if (unlikely(err)) { /* @@ -973,28 +1068,84 @@ static unsigned long move_vma(struct vma_remap_struct *vrm) * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, - true, false); - vma = new_vma; - old_len = new_len; - old_addr = new_addr; - new_addr = err; + move_page_tables(new_vma, vrm->new_addr, vma, vrm->addr, + moved_len, /* need_rmap_locks = */true, + /* for_stack= */false); + vrm->vma = new_vma; + vrm->old_len = vrm->new_len; + vrm->addr = vrm->new_addr; } else { mremap_userfaultfd_prep(new_vma, vrm->uf); } - if (is_vm_hugetlb_page(vma)) { + if (is_vm_hugetlb_page(vma)) clear_vma_resv_huge_pages(vma); - } - /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) { - vm_flags_clear(vma, VM_ACCOUNT); - if (vma->vm_start < old_addr) - account_start = true; - if (vma->vm_end > old_addr + old_len) - account_end = true; - } + /* Tell pfnmap has moved from this vma */ + if (unlikely(vma->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(vma); + + *new_vma_ptr = new_vma; + return err; +} + +/* + * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and + * account flags on remaining VMA by convention (it cannot be mlock()'d any + * longer, as pages in range are no longer mapped), and removing anon_vma_chain + * links from it (if the entire VMA was copied over). + */ +static void dontunmap_complete(struct vma_remap_struct *vrm, + struct vm_area_struct *new_vma) +{ + unsigned long start = vrm->addr; + unsigned long end = vrm->addr + vrm->old_len; + unsigned long old_start = vrm->vma->vm_start; + unsigned long old_end = vrm->vma->vm_end; + + /* + * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old + * vma. + */ + vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); + + /* + * anon_vma links of the old vma is no longer needed after its page + * table has been moved. + */ + if (new_vma != vrm->vma && start == old_start && end == old_end) + unlink_anon_vmas(vrm->vma); + + /* Because we won't unmap we don't need to touch locked_vm. */ +} + +static unsigned long move_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *new_vma; + unsigned long hiwater_vm; + int err; + + err = prep_move_vma(vrm); + if (err) + return err; + + /* If accounted, charge the number of bytes the operation will use. */ + if (!vrm_charge(vrm)) + return -ENOMEM; + + /* We don't want racing faults. */ + vma_start_write(vrm->vma); + + /* Perform copy step. */ + err = copy_vma_and_data(vrm, &new_vma); + /* + * If we established the copied-to VMA, we attempt to recover from the + * error by setting the destination VMA to the source VMA and unmapping + * it below. + */ + if (err && !new_vma) + return err; /* * If we failed to move page tables we still do total_vm increment @@ -1007,51 +1158,15 @@ static unsigned long move_vma(struct vma_remap_struct *vrm) */ hiwater_vm = mm->hiwater_vm; - /* Tell pfnmap has moved from this vma */ - if (unlikely(vma->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(vma); - - if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) { - /* We always clear VM_LOCKED[ONFAULT] on the old vma */ - vm_flags_clear(vma, VM_LOCKED_MASK); - - /* - * anon_vma links of the old vma is no longer needed after its page - * table has been moved. - */ - if (new_vma != vma && vma->vm_start == old_addr && - vma->vm_end == (old_addr + old_len)) - unlink_anon_vmas(vma); - - /* Because we won't unmap we don't need to touch locked_vm */ - vrm_stat_account(vrm, new_len); - return new_addr; - } - - vrm_stat_account(vrm, new_len); - - vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, vrm->uf_unmap, false) < 0) { - /* OOM: unable to split vma, just get accounts right */ - if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) - vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = false; - } + vrm_stat_account(vrm, vrm->new_len); + if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) + dontunmap_complete(vrm, new_vma); + else + unmap_source_vma(vrm); mm->hiwater_vm = hiwater_vm; - /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (account_start) { - vma = vma_prev(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - if (account_end) { - vma = vma_next(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - return new_addr; + return err ? (unsigned long)err : vrm->new_addr; } /* From 2a4077f49ccd6f904be6edb363714646e47292c9 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:39 +0000 Subject: [PATCH 301/431] mm/mremap: refactor move_page_tables(), abstracting state A lot of state is threaded throughout the page table moving logic within the mremap code, including boolean values which control behaviour specifically in regard to whether rmap locks need be held over the operation and whether the VMA belongs to a temporary stack being moved by move_arg_pages() (and consequently, relocate_vma_down()). As we already transmit state throughout this operation, it is neater and more readable to maintain a small state object. We do so in the form of pagetable_move_control. In addition, this allows us to update parameters within the state as we manipulate things, for instance with regard to the page table realignment logic. In future I want to add additional functionality to the page table logic, so this is an additional motivation for making it easier to do so. This patch changes move_page_tables() to accept a pointer to a pagetable_move_control struct, and performs changes at this level only. Further page table logic will be updated in a subsequent patch. We additionally also take the opportunity to add significant comments describing the address realignment logic to make it abundantly clear what is going on in this code. Link: https://lkml.kernel.org/r/e20180add9c8746184aa3f23a61fff69a06cdaa9.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/internal.h | 43 +++++++++++-- mm/mmap.c | 5 +- mm/mremap.c | 174 ++++++++++++++++++++++++++++++++++++-------------- 3 files changed, 169 insertions(+), 53 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index aa30282a774a..06f816cdb43b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -24,6 +24,44 @@ struct folio_batch; +/* + * Maintains state across a page table move. The operation assumes both source + * and destination VMAs already exist and are specified by the user. + * + * Partial moves are permitted, but the old and new ranges must both reside + * within a VMA. + * + * mmap lock must be held in write and VMA write locks must be held on any VMA + * that is visible. + * + * Use the PAGETABLE_MOVE() macro to initialise this struct. + * + * NOTE: The page table move is affected by reading from [old_addr, old_end), + * and old_addr may be updated for better page table alignment, so len_in + * represents the length of the range being copied as specified by the user. + */ +struct pagetable_move_control { + struct vm_area_struct *old; /* Source VMA. */ + struct vm_area_struct *new; /* Destination VMA. */ + unsigned long old_addr; /* Address from which the move begins. */ + unsigned long old_end; /* Exclusive address at which old range ends. */ + unsigned long new_addr; /* Address to move page tables to. */ + unsigned long len_in; /* Bytes to remap specified by user. */ + + bool need_rmap_locks; /* Do rmap locks need to be taken? */ + bool for_stack; /* Is this an early temp stack being moved? */ +}; + +#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ + struct pagetable_move_control name = { \ + .old = old_, \ + .new = new_, \ + .old_addr = old_addr_, \ + .old_end = (old_addr_) + (len_), \ + .new_addr = new_addr_, \ + .len_in = len_, \ + } + /* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints @@ -1527,10 +1565,7 @@ extern struct list_lru shadow_nodes; } while (0) /* mremap.c */ -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack); +unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); diff --git a/mm/mmap.c b/mm/mmap.c index 15d6cd7cc845..efcc4ca7500d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1694,6 +1694,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); struct vm_area_struct *next; struct mmu_gather tlb; + PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); BUG_ON(new_start > new_end); @@ -1716,8 +1717,8 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ - if (length != move_page_tables(vma, old_start, - vma, new_start, length, false, true)) + pmc.for_stack = true; + if (length != move_page_tables(&pmc)) return -ENOMEM; tlb_gather_mmu(&tlb, mm); diff --git a/mm/mremap.c b/mm/mremap.c index 7dc058d5d5e2..3a2ac167e876 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -580,8 +580,9 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, * the VMA that is created to span the source and destination of the move, * so we make an exception for it. */ -static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, - unsigned long mask, bool for_stack) +static bool can_align_down(struct pagetable_move_control *pmc, + struct vm_area_struct *vma, unsigned long addr_to_align, + unsigned long mask) { unsigned long addr_masked = addr_to_align & mask; @@ -590,11 +591,11 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali * of the corresponding VMA, we can't align down or we will destroy part * of the current mapping. */ - if (!for_stack && vma->vm_start != addr_to_align) + if (!pmc->for_stack && vma->vm_start != addr_to_align) return false; /* In the stack case we explicitly permit in-VMA alignment. */ - if (for_stack && addr_masked >= vma->vm_start) + if (pmc->for_stack && addr_masked >= vma->vm_start) return true; /* @@ -604,54 +605,131 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; } -/* Opportunistically realign to specified boundary for faster copy. */ -static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, - unsigned long *new_addr, struct vm_area_struct *new_vma, - unsigned long mask, bool for_stack) +/* + * Determine if are in fact able to realign for efficiency to a higher page + * table boundary. + */ +static bool can_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) { + unsigned long align_mask = ~pagetable_mask; + unsigned long old_align = pmc->old_addr & align_mask; + unsigned long new_align = pmc->new_addr & align_mask; + unsigned long pagetable_size = align_mask + 1; + unsigned long old_align_next = pagetable_size - old_align; + + /* + * We don't want to have to go hunting for VMAs from the end of the old + * VMA to the next page table boundary, also we want to make sure the + * operation is wortwhile. + * + * So ensure that we only perform this realignment if the end of the + * range being copied reaches or crosses the page table boundary. + * + * boundary boundary + * .<- old_align -> . + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . <----------------.-----------> + * . len_in + * <-------------------------------> + * . pagetable_size . + * . <----------------> + * . old_align_next . + */ + if (pmc->len_in < old_align_next) + return false; + /* Skip if the addresses are already aligned. */ - if ((*old_addr & ~mask) == 0) - return; + if (old_align == 0) + return false; /* Only realign if the new and old addresses are mutually aligned. */ - if ((*old_addr & ~mask) != (*new_addr & ~mask)) - return; + if (old_align != new_align) + return false; /* Ensure realignment doesn't cause overlap with existing mappings. */ - if (!can_align_down(old_vma, *old_addr, mask, for_stack) || - !can_align_down(new_vma, *new_addr, mask, for_stack)) - return; + if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || + !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) + return false; - *old_addr = *old_addr & mask; - *new_addr = *new_addr & mask; + return true; } -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack) +/* + * Opportunistically realign to specified boundary for faster copy. + * + * Consider an mremap() of a VMA with page table boundaries as below, and no + * preceding VMAs from the lower page table boundary to the start of the VMA, + * with the end of the range reaching or crossing the page table boundary. + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . pmc->old_addr . pmc->old_end + * . <----------------------------> + * . move these page tables + * + * If we proceed with moving page tables in this scenario, we will have a lot of + * work to do traversing old page tables and establishing new ones in the + * destination across multiple lower level page tables. + * + * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the + * page table boundary, so we can simply copy a single page table entry for the + * aligned portion of the VMA instead: + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * pmc->old_addr . pmc->old_end + * <-------------------------------------------> + * . move these page tables + */ +static void try_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) +{ + + if (!can_realign_addr(pmc, pagetable_mask)) + return; + + /* + * Simply align to page table boundaries. Note that we do NOT update the + * pmc->old_end value, and since the move_page_tables() operation spans + * from [old_addr, old_end) (offsetting new_addr as it is performed), + * this simply changes the start of the copy, not the end. + */ + pmc->old_addr &= pagetable_mask; + pmc->new_addr &= pagetable_mask; +} + +unsigned long move_page_tables(struct pagetable_move_control *pmc) { unsigned long extent, old_end; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + unsigned long old_addr, new_addr; + struct vm_area_struct *vma = pmc->old; - if (!len) + if (!pmc->len_in) return 0; - old_end = old_addr + len; - if (is_vm_hugetlb_page(vma)) - return move_hugetlb_page_tables(vma, new_vma, old_addr, - new_addr, len); + return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, + pmc->new_addr, pmc->len_in); + old_end = pmc->old_end; /* * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ - if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, - for_stack); + try_realign_addr(pmc, PMD_MASK); + /* These may have been changed. */ + old_addr = pmc->old_addr; + new_addr = pmc->new_addr; flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, @@ -675,12 +753,11 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, - old_pud, new_pud, need_rmap_locks); + old_pud, new_pud, pmc->need_rmap_locks); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, old_pud, new_pud, true)) continue; @@ -698,7 +775,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, need_rmap_locks)) + old_pmd, new_pmd, pmc->need_rmap_locks)) continue; split_huge_pmd(vma, old_pmd, old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && @@ -713,10 +790,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma, } if (pmd_none(*old_pmd)) continue; - if (pte_alloc(new_vma->vm_mm, new_pmd)) + if (pte_alloc(pmc->new->vm_mm, new_pmd)) break; if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, - new_vma, new_pmd, new_addr, need_rmap_locks) < 0) + pmc->new, new_pmd, new_addr, pmc->need_rmap_locks) < 0) goto again; } @@ -726,10 +803,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma, * Prevent negative return values when {old,new}_addr was realigned * but we broke out of the above loop for the first PMD itself. */ - if (old_addr < old_end - len) + if (old_addr < old_end - pmc->len_in) return 0; - return len + old_addr - old_end; /* how much done */ + return pmc->len_in + old_addr - old_end; /* how much done */ } /* Set vrm->delta to the difference in VMA size specified by user. */ @@ -1040,37 +1117,40 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm, unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; unsigned long moved_len; - bool need_rmap_locks; - struct vm_area_struct *vma; + struct vm_area_struct *vma = vrm->vma; struct vm_area_struct *new_vma; int err = 0; + PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); - new_vma = copy_vma(&vrm->vma, vrm->new_addr, vrm->new_len, new_pgoff, - &need_rmap_locks); + new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, + &pmc.need_rmap_locks); if (!new_vma) { vrm_uncharge(vrm); *new_vma_ptr = NULL; return -ENOMEM; } - vma = vrm->vma; + vrm->vma = vma; + pmc.old = vma; + pmc.new = new_vma; - moved_len = move_page_tables(vma, vrm->addr, new_vma, - vrm->new_addr, vrm->old_len, - need_rmap_locks, /* for_stack= */false); + moved_len = move_page_tables(&pmc); if (moved_len < vrm->old_len) err = -ENOMEM; else if (vma->vm_ops && vma->vm_ops->mremap) err = vma->vm_ops->mremap(new_vma); if (unlikely(err)) { + PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, + vrm->addr, moved_len); + /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, vrm->new_addr, vma, vrm->addr, - moved_len, /* need_rmap_locks = */true, - /* for_stack= */false); + pmc_revert.need_rmap_locks = true; + move_page_tables(&pmc_revert); + vrm->vma = new_vma; vrm->old_len = vrm->new_len; vrm->addr = vrm->new_addr; From 664dc4da2694fa56e49dc3e4a041bcf7608a060b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:40 +0000 Subject: [PATCH 302/431] mm/mremap: thread state through move page table operation Finish refactoring the page table logic by threading the PMC state throughout the operation, allowing us to control the operation as we go. Additionally, update the old_addr, new_addr fields in move_page_tables() as we progress through the process making use of the fact we have this state object now to track this. With these changes made, not only is the code far more readable, but we can finally transmit state throughout the entire operation, which lays the groundwork for sensibly making changes in future to how the mremap() operation is performed. Additionally take the opportunity to refactor the means of determining the progress of the operation, abstracting this to pmc_progress() and simplifying the logic to make it clearer what's going on. Link: https://lkml.kernel.org/r/230dd7a2b7b01a6eef442678f284d575e800356e.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/internal.h | 3 + mm/mremap.c | 196 +++++++++++++++++++++++++++++--------------------- 2 files changed, 116 insertions(+), 83 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 06f816cdb43b..1ac433d2092b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,9 @@ struct folio_batch; * * Use the PAGETABLE_MOVE() macro to initialise this struct. * + * The old_addr and new_addr fields are updated as the page table move is + * executed. + * * NOTE: The page table move is affected by reading from [old_addr, old_end), * and old_addr may be updated for better page table alignment, so len_in * represents the length of the range being copied as specified by the user. diff --git a/mm/mremap.c b/mm/mremap.c index 3a2ac167e876..0865387531ed 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -108,8 +108,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) return pmd; } -static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; @@ -122,13 +121,12 @@ static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, return pud_alloc(mm, p4d, addr); } -static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pmd_t *pmd; - pud = alloc_new_pud(mm, vma, addr); + pud = alloc_new_pud(mm, addr); if (!pud) return NULL; @@ -172,17 +170,19 @@ static pte_t move_soft_dirty_pte(pte_t pte) return pte; } -static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, - unsigned long old_addr, unsigned long old_end, - struct vm_area_struct *new_vma, pmd_t *new_pmd, - unsigned long new_addr, bool need_rmap_locks) +static int move_ptes(struct pagetable_move_control *pmc, + unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) { + struct vm_area_struct *vma = pmc->old; bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; pmd_t dummy_pmdval; spinlock_t *old_ptl, *new_ptl; bool force_flush = false; + unsigned long old_addr = pmc->old_addr; + unsigned long new_addr = pmc->new_addr; + unsigned long old_end = old_addr + extent; unsigned long len = old_end - old_addr; int err = 0; @@ -204,7 +204,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ - if (need_rmap_locks) + if (pmc->need_rmap_locks) take_rmap_locks(vma); /* @@ -278,7 +278,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); out: - if (need_rmap_locks) + if (pmc->need_rmap_locks) drop_rmap_locks(vma); return err; } @@ -293,10 +293,11 @@ static inline bool arch_supports_page_table_move(void) #endif #ifdef CONFIG_HAVE_MOVE_PMD -static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) +static bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; bool res = false; pmd_t pmd; @@ -342,7 +343,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pmd_lock(vma->vm_mm, old_pmd); + old_ptl = pmd_lock(mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -359,7 +360,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); - flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); out_unlock: if (new_ptl != old_ptl) spin_unlock(new_ptl); @@ -368,19 +369,19 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, return res; } #else -static inline bool move_normal_pmd(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, - pmd_t *new_pmd) +static inline bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { return false; } #endif #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) -static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -406,7 +407,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -418,7 +419,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pud_none(*new_pud)); pud_populate(mm, new_pud, pud_pgtable(pud)); - flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -426,19 +427,19 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static inline bool move_normal_pud(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, - pud_t *new_pud) +static inline bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { return false; } #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -453,7 +454,7 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -466,8 +467,8 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, /* Set the new pud */ /* mark soft_ditry when we add pud level soft dirty support */ - set_pud_at(mm, new_addr, new_pud, pud); - flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); + set_pud_at(mm, pmc->new_addr, new_pud, pud); + flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -475,8 +476,9 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) + { WARN_ON_ONCE(1); return false; @@ -497,10 +499,12 @@ enum pgt_entry { * destination pgt_entry. */ static __always_inline unsigned long get_extent(enum pgt_entry entry, - unsigned long old_addr, unsigned long old_end, - unsigned long new_addr) + struct pagetable_move_control *pmc) { unsigned long next, extent, mask, size; + unsigned long old_addr = pmc->old_addr; + unsigned long old_end = pmc->old_end; + unsigned long new_addr = pmc->new_addr; switch (entry) { case HPAGE_PMD: @@ -529,38 +533,51 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry, return extent; } +/* + * Should move_pgt_entry() acquire the rmap locks? This is either expressed in + * the PMC, or overridden in the case of normal, larger page tables. + */ +static bool should_take_rmap_locks(struct pagetable_move_control *pmc, + enum pgt_entry entry) +{ + switch (entry) { + case NORMAL_PMD: + case NORMAL_PUD: + return true; + default: + return pmc->need_rmap_locks; + } +} + /* * Attempts to speedup the move by moving entry at the level corresponding to * pgt_entry. Returns true if the move was successful, else false. */ -static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, - void *old_entry, void *new_entry, bool need_rmap_locks) +static bool move_pgt_entry(struct pagetable_move_control *pmc, + enum pgt_entry entry, void *old_entry, void *new_entry) { bool moved = false; + bool need_rmap_locks = should_take_rmap_locks(pmc, entry); /* See comment in move_ptes() */ if (need_rmap_locks) - take_rmap_locks(vma); + take_rmap_locks(pmc->old); switch (entry) { case NORMAL_PMD: - moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pmd(pmc, old_entry, new_entry); break; case NORMAL_PUD: - moved = move_normal_pud(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pud(pmc, old_entry, new_entry); break; case HPAGE_PMD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pmd(vma, old_addr, new_addr, old_entry, + move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, new_entry); break; case HPAGE_PUD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pud(vma, old_addr, new_addr, old_entry, - new_entry); + move_huge_pud(pmc, old_entry, new_entry); break; default: @@ -569,7 +586,7 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, } if (need_rmap_locks) - drop_rmap_locks(vma); + drop_rmap_locks(pmc->old); return moved; } @@ -705,108 +722,121 @@ static void try_realign_addr(struct pagetable_move_control *pmc, pmc->new_addr &= pagetable_mask; } +/* Is the page table move operation done? */ +static bool pmc_done(struct pagetable_move_control *pmc) +{ + return pmc->old_addr >= pmc->old_end; +} + +/* Advance to the next page table, offset by extent bytes. */ +static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent) +{ + pmc->old_addr += extent; + pmc->new_addr += extent; +} + +/* + * Determine how many bytes in the specified input range have had their page + * tables moved so far. + */ +static unsigned long pmc_progress(struct pagetable_move_control *pmc) +{ + unsigned long orig_old_addr = pmc->old_end - pmc->len_in; + unsigned long old_addr = pmc->old_addr; + + /* + * Prevent negative return values when {old,new}_addr was realigned but + * we broke out of the loop in move_page_tables() for the first PMD + * itself. + */ + return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; +} + unsigned long move_page_tables(struct pagetable_move_control *pmc) { - unsigned long extent, old_end; + unsigned long extent; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; - unsigned long old_addr, new_addr; - struct vm_area_struct *vma = pmc->old; + struct mm_struct *mm = pmc->old->vm_mm; if (!pmc->len_in) return 0; - if (is_vm_hugetlb_page(vma)) + if (is_vm_hugetlb_page(pmc->old)) return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, pmc->new_addr, pmc->len_in); - old_end = pmc->old_end; /* * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ try_realign_addr(pmc, PMD_MASK); - /* These may have been changed. */ - old_addr = pmc->old_addr; - new_addr = pmc->new_addr; - flush_cache_range(vma, old_addr, old_end); - mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, - old_addr, old_end); + flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); + mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, + pmc->old_addr, pmc->old_end); mmu_notifier_invalidate_range_start(&range); - for (; old_addr < old_end; old_addr += extent, new_addr += extent) { + for (; !pmc_done(pmc); pmc_next(pmc, extent)) { cond_resched(); /* * If extent is PUD-sized try to speed up the move by moving at the * PUD level if possible. */ - extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); + extent = get_extent(NORMAL_PUD, pmc); - old_pud = get_old_pud(vma->vm_mm, old_addr); + old_pud = get_old_pud(mm, pmc->old_addr); if (!old_pud) continue; - new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); + new_pud = alloc_new_pud(mm, pmc->new_addr); if (!new_pud) break; if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { - move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, - old_pud, new_pud, pmc->need_rmap_locks); + move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, - old_pud, new_pud, true)) + if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud)) continue; } - extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); - old_pmd = get_old_pmd(vma->vm_mm, old_addr); + extent = get_extent(NORMAL_PMD, pmc); + old_pmd = get_old_pmd(mm, pmc->old_addr); if (!old_pmd) continue; - new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); + new_pmd = alloc_new_pmd(mm, pmc->new_addr); if (!new_pmd) break; again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && - move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, pmc->need_rmap_locks)) + move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd)) continue; - split_huge_pmd(vma, old_pmd, old_addr); + split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ - if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, true)) + if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd)) continue; } if (pmd_none(*old_pmd)) continue; if (pte_alloc(pmc->new->vm_mm, new_pmd)) break; - if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, - pmc->new, new_pmd, new_addr, pmc->need_rmap_locks) < 0) + if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0) goto again; } mmu_notifier_invalidate_range_end(&range); - /* - * Prevent negative return values when {old,new}_addr was realigned - * but we broke out of the above loop for the first PMD itself. - */ - if (old_addr < old_end - pmc->len_in) - return 0; - - return pmc->len_in + old_addr - old_end; /* how much done */ + return pmc_progress(pmc); } /* Set vrm->delta to the difference in VMA size specified by user. */ From 6220ea5583e977e1eeea06c237cc440e5ac3de46 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:54 +0100 Subject: [PATCH 303/431] mm: factor out large folio handling from folio_order() into folio_large_order() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT", v3. Let's add an "easy" way to decide -- without false positives, without page-mapcounts and without page table/rmap scanning -- whether a large folio is "certainly mapped exclusively" into a single MM, or whether it "maybe mapped shared" into multiple MMs. Use that information to implement Copy-on-Write reuse, to convert folio_likely_mapped_shared() to folio_maybe_mapped_share(), and to introduce a kernel config option that lets us not use+maintain per-page mapcounts in large folios anymore. The bigger picture was presented at LSF/MM [1]. This series is effectively a follow-up on my early work [2], which implemented a more precise, but also more complicated, way to identify whether a large folio is "mapped shared" into multiple MMs or "mapped exclusively" into a single MM. 1 Patch Organization ==================== Patch #1 -> #6: make more room in order-1 folios, so we have two "unsigned long" available for our purposes Patch #7 -> #11: preparations Patch #12: MM owner tracking for large folios Patch #13: COW reuse for PTE-mapped anon THP Patch #14: folio_maybe_mapped_shared() Patch #15 -> #20: introduce and implement CONFIG_NO_PAGE_MAPCOUNT 2 MM owner tracking =================== We assign each MM a unique ID ("MM ID"), to be able to squeeze more information in our folios. On 32bit we use 15-bit IDs, on 64bit we use 31-bit IDs. For each large folios, we now store two MM-ID+mapcount ("slot") combinations: * mm0_id + mm0_mapcount * mm1_id + mm1_mapcount On 32bit, we use a 16-bit per-MM mapcount, on 64bit an ordinary 32bit mapcount. This way, we require 2x "unsigned long" on 32bit and 64bit for both slots. Paired with the large mapcount, we can reliably identify whether one of these MMs is the current owner (-> owns all mappings) or even holds all folio references (-> owns all mappings, and all references are from mappings). As long as only two MMs map folio pages at a time, we can reliably and precisely identify whether a large folio is "mapped shared" or "mapped exclusively". Any additional MM that starts mapping the folio while there are no free slots becomes an "untracked MM". If one such "untracked MM" is the last one mapping a folio exclusively, we will not detect the folio as "mapped exclusively" but instead as "maybe mapped shared". (exception: only a single mapping remains) So that's where the approach gets imprecise. For now, we use a bit-spinlock to sync the large mapcount + slots, and make sure we do keep the machinery fast, to not degrade (un)map performance drastically: for example, we make sure to only use a single atomic (when grabbing the bit-spinlock), like we would already perform when updating the large mapcount. 3 CONFIG_NO_PAGE_MAPCOUNT ========================= patch #15 -> #20 spell out and document what exactly is affected when not maintaining the per-page mapcounts in large folios anymore. Most importantly, as we cannot maintain folio->_nr_pages_mapped anymore when (un)mapping pages, we'll account a complete folio as mapped if a single page is mapped. In addition, we'll not detect partially mapped anonymous folios as such in all cases yet. Likely less relevant changes include that we might now under-estimate the USS (Unique Set Size) of a process, but never over-estimate it. The goal is to make CONFIG_NO_PAGE_MAPCOUNT the default at some point, to then slowly make it the only option, as we learn about real-life impacts and possible ways to mitigate them. 4 Performance ============= Detailed performance numbers were included in v1 [3], and not that much changed between v1 and v2. I did plenty of measurements on different systems in the meantime, that all revealed slightly different results. The pte-mapped-folio micro-benchmarks [4] are fairly sensitive to code layout changes on some systems. Especially the fork() benchmark started being more-shaky-than-before on recent kernels for some reason. In summary, with my micro-benchmarks: * Small folios are not impacted. * CoW performance seems to be mostly unchanged across all folios sizes. * CoW reuse performance of large folios now matches CoW reuse performance of small folios, because we now actually implement the CoW reuse optimization. On an Intel Xeon Silver 4210R I measured a ~65% reduction in runtime, on an arm64 system I measured ~54% reduction. * munmap() performance improves with CONFIG_NO_PAGE_MAPCOUNT. I saw double-digit % reduction (up to ~30% on an Intel Xeon Silver 4210R and up to ~70% on an AmpereOne A192-32X) with larger folios. The larger the folios, the larger the performance improvement. * munmao() performance very slightly (couple percent) degrades without CONFIG_NO_PAGE_MAPCOUNT for smaller folios. For larger folios, there seems to be no change at all. * fork() performance improves with CONFIG_NO_PAGE_MAPCOUNT. I saw double-digit % reduction (up to ~20% on an Intel Xeon Silver 4210R and up to ~10% on an AmpereOne A192-32X) with larger folios. The larger the folios, the larger the performance improvement. * While fork() performance without CONFIG_NO_PAGE_MAPCOUNT seems to be almost unchanged on some systems, I saw some degradation for smaller folios on the AmpereOne A192-32X. I did not investigate the details yet, but I suspect code layout changes or suboptimal code placement / inlining. I'm not to worried about the fork() micro-benchmarks for smaller folios given how shaky the results are lately and by how much we improved fork() performance recently. I also ran case-anon-cow-rand and case-anon-cow-seq part of vm-scalability, to assess the scalability and the impact of the bit-spinlock. My measurements on a two 2-socket 10-core Intel Xeon Silver 4210R CPU revealed no significant changes. Similarly, running these benchmarks with 2 MiB THPs enabled on the AmpereOne A192-32X with 192 cores, I got < 1% difference with < 1% stdev, which is nice. So far, I did not get my hands on a similarly large system with multiple sockets. I found no other fitting scalability benchmarks that seem to really hammer on concurrent mapping/unmapping of large folio pages like case-anon-cow-seq does. 5 Concerns ========== 5.1 Bit spinlock ---------------- I'm not quite happy about the bit-spinlock, but so far it does not seem to affect scalability in my measurements. If it ever becomes a problem we could either investigate improving the locking, or simply stopping the MM tracking once there are "too many mappings" and simply assume that the folio is "mapped shared" until it was freed. This would be similar (but slightly different) to the "0,1,2,stopped" counting idea Willy had at some point. Adding that logic to "stop tracking" adds more code to the hot path, so I avoided that for now. 5.2 folio_maybe_mapped_shared() ------------------------------- I documented the change from folio_likely_mapped_shared() to folio_maybe_mapped_shared() quite extensively. If we run into surprises, I have some ideas on how to resolve them. For now, I think we should be fine. 5.3 Added code to map/unmap hot path ------------------------------------ So far, it looks like the added code on the rmap hot path does not really seem to matter much in the bigger picture. I'd like to further reduce it (and possibly improve fork() performance further), but I don't easily see how right now. Well, and I am out of puff 🙂 Having that said, alternatives I considered (e.g., per-MM per-folio mapcount) would add a lot more overhead to these hot paths. 6 Future Work ============= 6.1 Large mapcount ------------------ It would be very handy if the large mapcount would count how often folio pages are actually mapped into page tables: a PMD on x86-64 would count 512 times. Calculating the average per-page mapcount will be easy, and remapping (PMD->PTE) folios would get even faster. That would also remove the need for the entire mapcount (except for PMD-sized folios for memory statistics reasons ...), and allow for mapping folios larger than PMDs (e.g., 4 MiB) easily. We likely would also have to take the same number of folio references to make our folio_mapcount() == folio_ref_count() work, and we'd want to be able to avoid mapcount+refcount overflows: this could already become an issue with pte-mapped PUD-sized folios (fsdax). One approach we discussed in the THP cabal meeting is (1) extending the mapcount for large folios to 64bit (at least on 64bit systems) and (2) keeping the refcount at 32bit, but (3) having exactly one reference if the the mapcount != 0. It should be doable, but there are some corner cases to consider on the unmap path; it is something that I will be looking into next. 6.2 hugetlb ----------- I'd love to make use of the same tracking also for hugetlb. The real problem is PMD table sharing: getting a page mapped by MM X and unmapped by MM Y will not work. With mshare, that problem should not exist (all mapping/unmapping will be routed through the mshare MM). [1] https://lwn.net/Articles/974223/ [2] https://lore.kernel.org/linux-mm/a9922f58-8129-4f15-b160-e0ace581bcbe@redhat.com/T/ [3] https://lkml.kernel.org/r/20240829165627.2256514-1-david@redhat.com [4] https://gitlab.com/davidhildenbrand/scratchspace/-/raw/main/pte-mapped-folio-benchmarks.c This patch (of 20): Let's factor it out into a simple helper function. This helper will also come in handy when working with code where we know that our folio is large. Maybe in the future we'll have the order readily available for small and large folios; in that case, folio_large_order() would simply translate to folio_order(). Link: https://lkml.kernel.org/r/20250303163014.1128035-1-david@redhat.com Link: https://lkml.kernel.org/r/20250303163014.1128035-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lance Yang Reviewed-by: Kirill A. Shutemov Cc: Thomas Gleixner Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: David Hildenbrand Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index de008efd96aa..e5cdbb94ef81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1194,6 +1194,11 @@ struct inode; extern void prep_compound_page(struct page *page, unsigned int order); +static inline unsigned int folio_large_order(const struct folio *folio) +{ + return folio->_flags_1 & 0xff; +} + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -1207,7 +1212,7 @@ static inline unsigned int compound_order(struct page *page) if (!test_bit(PG_head, &folio->flags)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } /** @@ -1223,7 +1228,7 @@ static inline unsigned int folio_order(const struct folio *folio) { if (!folio_test_large(folio)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } #include @@ -2145,7 +2150,7 @@ static inline long folio_nr_pages(const struct folio *folio) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << (folio->_flags_1 & 0xff); + return 1L << folio_large_order(folio); #endif } @@ -2170,7 +2175,7 @@ static inline unsigned long compound_nr(struct page *page) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << (folio->_flags_1 & 0xff); + return 1L << folio_large_order(folio); #endif } From 1ea5212aed0682ae1249cba9df7ad7ef539d0a7d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:55 +0100 Subject: [PATCH 304/431] mm: factor out large folio handling from folio_nr_pages() into folio_large_nr_pages() Let's factor it out into a simple helper function. This helper will also come in handy when working with code where we know that our folio is large. While at it, let's consistently return a "long" value from all these similar functions. Note that we cannot use "unsigned int" (even though _folio_nr_pages is of that type), because it would break some callers that do stuff like "-folio_nr_pages()". Both "int" or "unsigned long" would work as well. Maybe in the future we'll have the nr_pages readily available for all large folios, maybe even for small folios, or maybe for none. Link: https://lkml.kernel.org/r/20250303163014.1128035-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e5cdbb94ef81..cf892c08a88c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1199,6 +1199,18 @@ static inline unsigned int folio_large_order(const struct folio *folio) return folio->_flags_1 & 0xff; } +#ifdef CONFIG_64BIT +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return folio->_folio_nr_pages; +} +#else +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return 1L << folio_large_order(folio); +} +#endif + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -2147,11 +2159,7 @@ static inline long folio_nr_pages(const struct folio *folio) { if (!folio_test_large(folio)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << folio_large_order(folio); -#endif + return folio_large_nr_pages(folio); } /* Only hugetlbfs can allocate folios larger than MAX_ORDER */ @@ -2166,24 +2174,20 @@ static inline long folio_nr_pages(const struct folio *folio) * page. compound_nr() can be called on a tail page, and is defined to * return 1 in that case. */ -static inline unsigned long compound_nr(struct page *page) +static inline long compound_nr(struct page *page) { struct folio *folio = (struct folio *)page; if (!test_bit(PG_head, &folio->flags)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << folio_large_order(folio); -#endif + return folio_large_nr_pages(folio); } /** * thp_nr_pages - The number of regular pages in this huge page. * @page: The head page of a huge page. */ -static inline int thp_nr_pages(struct page *page) +static inline long thp_nr_pages(struct page *page) { return folio_nr_pages((struct folio *)page); } From 4996fc547f5b49f4a43c261dfadb02cf165cdb51 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:56 +0100 Subject: [PATCH 305/431] mm: let _folio_nr_pages overlay memcg_data in first tail page Let's free up some more of the "unconditionally available on 64BIT" space in order-1 folios by letting _folio_nr_pages overlay memcg_data in the first tail page (second folio page). Consequently, we have the optimization now whenever we have CONFIG_MEMCG, independent of 64BIT. We have to make sure that page->memcg on tail pages does not return "surprises". page_memcg_check() already properly refuses PageTail(). Let's do that earlier in print_page_owner_memcg() to avoid printing wrong "Slab cache page" information. No other code should touch that field on tail pages of compound pages. Reset the "_nr_pages" to 0 when splitting folios, or when freeing them back to the buddy (to avoid false page->memcg_data "bad page" reports). Note that in __split_huge_page(), folio_nr_pages() would stop working already as soon as we start messing with the subpages. Most kernel configs should have at least CONFIG_MEMCG enabled, even if disabled at runtime. 64byte "struct memmap" is what we usually have on 64BIT. While at it, rename "_folio_nr_pages" to "_nr_pages". Hopefully memdescs / dynamically allocating "strut folio" in the future will further clean this up, e.g., making _nr_pages available in all configs and maybe even in small folios. Doing that should be fairly easy on top of this change. [david@redhat.com: make "make htmldoc" happy] Link: https://lkml.kernel.org/r/a97f8a91-ec41-4796-81e3-7c9e0e491ba4@redhat.com Link: https://lkml.kernel.org/r/20250303163014.1128035-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 4 ++-- include/linux/mm_types.h | 32 ++++++++++++++++++++++++-------- mm/huge_memory.c | 8 ++++++++ mm/internal.h | 4 ++-- mm/page_alloc.c | 6 +++++- mm/page_owner.c | 2 +- 6 files changed, 42 insertions(+), 14 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index cf892c08a88c..c9c2ca345350 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1199,10 +1199,10 @@ static inline unsigned int folio_large_order(const struct folio *folio) return folio->_flags_1 & 0xff; } -#ifdef CONFIG_64BIT +#ifdef NR_PAGES_IN_LARGE_FOLIO static inline long folio_large_nr_pages(const struct folio *folio) { - return folio->_folio_nr_pages; + return folio->_nr_pages; } #else static inline long folio_large_nr_pages(const struct folio *folio) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b1827d78ff89..f3b519fbeca1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -287,6 +287,11 @@ typedef struct { unsigned long val; } swp_entry_t; +#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT) +/* We have some extra room after the refcount in tail pages. */ +#define NR_PAGES_IN_LARGE_FOLIO +#endif + /** * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. @@ -312,7 +317,7 @@ typedef struct { * @_large_mapcount: Do not use directly, call folio_mapcount(). * @_nr_pages_mapped: Do not use outside of rmap and debug code. * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). - * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). + * @_nr_pages: Do not use directly, call folio_nr_pages(). * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h. * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. @@ -376,14 +381,23 @@ struct folio { struct { unsigned long _flags_1; unsigned long _head_1; + union { + struct { /* public: */ - atomic_t _large_mapcount; - atomic_t _entire_mapcount; - atomic_t _nr_pages_mapped; - atomic_t _pincount; -#ifdef CONFIG_64BIT - unsigned int _folio_nr_pages; -#endif + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + /* private: the union with struct page is transitional */ + }; + unsigned long _usable_1[4]; + }; + atomic_t _mapcount_1; + atomic_t _refcount_1; + /* public: */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + unsigned int _nr_pages; +#endif /* NR_PAGES_IN_LARGE_FOLIO */ /* private: the union with struct page is transitional */ }; struct page __page_1; @@ -435,6 +449,8 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid); offsetof(struct page, pg) + sizeof(struct page)) FOLIO_MATCH(flags, _flags_1); FOLIO_MATCH(compound_head, _head_1); +FOLIO_MATCH(_mapcount, _mapcount_1); +FOLIO_MATCH(_refcount, _refcount_1); #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a15fd3453ff..2364990974ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3399,6 +3399,14 @@ static void __split_huge_page(struct page *page, struct list_head *list, int order = folio_order(folio); unsigned int nr = 1 << order; + /* + * Reset any memcg data overlay in the tail pages. folio_nr_pages() + * is unreliable after this point. + */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + /* complete memcg works before add pages to LRU */ split_page_memcg(head, order, new_order); diff --git a/mm/internal.h b/mm/internal.h index 1ac433d2092b..1cd977413859 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -725,8 +725,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) return; folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 1U << order; #endif } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 57f959af79c5..e1135dff9a86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1174,8 +1174,12 @@ __always_inline bool free_pages_prepare(struct page *page, if (unlikely(order)) { int i; - if (compound) + if (compound) { page[1].flags &= ~PAGE_FLAGS_SECOND; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + } for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_page_prepare(page, page + i); diff --git a/mm/page_owner.c b/mm/page_owner.c index 2d6360eaccbb..a409e2561a8f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -507,7 +507,7 @@ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, rcu_read_lock(); memcg_data = READ_ONCE(page->memcg_data); - if (!memcg_data) + if (!memcg_data || PageTail(page)) goto out_unlock; if (memcg_data & MEMCG_DATA_OBJEXTS) From 4eeec8c89a0c4a8c20fb13a4e7093cc8efce383d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:57 +0100 Subject: [PATCH 306/431] mm: move hugetlb specific things in folio to page[3] Let's just move the hugetlb specific stuff to a separate page, and stop letting it overlay other fields for now. This frees up some space in page[2], which we will use on 32bit to free up some space in page[1]. While we could move these things to page[3] instead, it's cleaner to just move the hugetlb specific things out of the way and pack the core-folio stuff as tight as possible. ... and we can minimize the work required in dump_folio. We can now avoid re-initializing &folio->_deferred_list in hugetlb code. Hopefully dynamically allocating "strut folio" in the future will further clean this up. Link: https://lkml.kernel.org/r/20250303163014.1128035-5-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 27 +++++++++++++++++---------- mm/hugetlb.c | 1 - mm/page_alloc.c | 5 +++++ 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f3b519fbeca1..727322ecbfdd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -406,6 +406,16 @@ struct folio { struct { unsigned long _flags_2; unsigned long _head_2; + /* public: */ + struct list_head _deferred_list; + /* private: the union with struct page is transitional */ + }; + struct page __page_2; + }; + union { + struct { + unsigned long _flags_3; + unsigned long _head_3; /* public: */ void *_hugetlb_subpool; void *_hugetlb_cgroup; @@ -413,14 +423,7 @@ struct folio { void *_hugetlb_hwpoison; /* private: the union with struct page is transitional */ }; - struct { - unsigned long _flags_2a; - unsigned long _head_2a; - /* public: */ - struct list_head _deferred_list; - /* private: the union with struct page is transitional */ - }; - struct page __page_2; + struct page __page_3; }; }; @@ -457,8 +460,12 @@ FOLIO_MATCH(_refcount, _refcount_1); offsetof(struct page, pg) + 2 * sizeof(struct page)) FOLIO_MATCH(flags, _flags_2); FOLIO_MATCH(compound_head, _head_2); -FOLIO_MATCH(flags, _flags_2a); -FOLIO_MATCH(compound_head, _head_2a); +#undef FOLIO_MATCH +#define FOLIO_MATCH(pg, fl) \ + static_assert(offsetof(struct folio, fl) == \ + offsetof(struct page, pg) + 3 * sizeof(struct page)) +FOLIO_MATCH(flags, _flags_3); +FOLIO_MATCH(compound_head, _head_3); #undef FOLIO_MATCH /** diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7a47a08e8526..438de55dd38d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1649,7 +1649,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, folio_ref_unfreeze(folio, 1); - INIT_LIST_HEAD(&folio->_deferred_list); hugetlb_free_folio(folio); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e1135dff9a86..735192222c36 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -971,6 +971,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) goto out; } break; + case 3: + /* the third tail page: hugetlb specifics overlap ->mappings */ + if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) + break; + fallthrough; default: if (page->mapping != TAIL_MAPPING) { bad_page(page, "corrupted mapping in tail page"); From 31a31da8a6187f1e5448ec73222e01d7d3fed4aa Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:58 +0100 Subject: [PATCH 307/431] mm: move _pincount in folio to page[2] on 32bit Let's free up some space on 32bit in page[1] by moving the _pincount to page[2]. For order-1 folios (never anon folios!) on 32bit, we will now also use the GUP_PIN_COUNTING_BIAS approach. A fully-mapped order-1 folio requires 2 references. With GUP_PIN_COUNTING_BIAS being 1024, we'd detect such folios as "maybe pinned" with 512 full mappings, instead of 1024 for order-0. As anon folios are out of the picture (which are the most relevant users of checking for pinnings on *mapped* pages) and we are talking about 32bit, this is not expected to cause any trouble. In __dump_page(), copy one additional folio page if we detect a folio with an order > 1, so we can dump the pincount on order > 1 folios reliably. Note that THPs on 32bit are not particularly common (and we don't care too much about performance), but we want to keep it working reliably, because likely we want to use large folios there as well in the future, independent of PMD leaf support. Once we dynamically allocate "struct folio", fortunately the 32bit specifics will likely go away again; even small folios could then have a pincount and folio_has_pincount() would essentially always return "true". Link: https://lkml.kernel.org/r/20250303163014.1128035-6-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 11 +++++++++-- include/linux/mm_types.h | 5 +++++ mm/debug.c | 10 +++++++++- mm/gup.c | 8 ++++---- mm/internal.h | 3 ++- mm/page_alloc.c | 14 +++++++++++--- 6 files changed, 40 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c9c2ca345350..860082ba8978 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2010,6 +2010,13 @@ static inline struct folio *pfn_folio(unsigned long pfn) return page_folio(pfn_to_page(pfn)); } +static inline bool folio_has_pincount(const struct folio *folio) +{ + if (IS_ENABLED(CONFIG_64BIT)) + return folio_test_large(folio); + return folio_order(folio) > 1; +} + /** * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. * @folio: The folio. @@ -2026,7 +2033,7 @@ static inline struct folio *pfn_folio(unsigned long pfn) * get that many refcounts, and b) all the callers of this routine are * expected to be able to deal gracefully with a false positive. * - * For large folios, the result will be exactly correct. That's because + * For most large folios, the result will be exactly correct. That's because * we have more tracking data available: the _pincount field is used * instead of the GUP_PIN_COUNTING_BIAS scheme. * @@ -2037,7 +2044,7 @@ static inline struct folio *pfn_folio(unsigned long pfn) */ static inline bool folio_maybe_dma_pinned(struct folio *folio) { - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) return atomic_read(&folio->_pincount) > 0; /* diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 727322ecbfdd..3ea2019a1aac 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -387,7 +387,9 @@ struct folio { atomic_t _large_mapcount; atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; +#ifdef CONFIG_64BIT atomic_t _pincount; +#endif /* CONFIG_64BIT */ /* private: the union with struct page is transitional */ }; unsigned long _usable_1[4]; @@ -408,6 +410,9 @@ struct folio { unsigned long _head_2; /* public: */ struct list_head _deferred_list; +#ifndef CONFIG_64BIT + atomic_t _pincount; +#endif /* !CONFIG_64BIT */ /* private: the union with struct page is transitional */ }; struct page __page_2; diff --git a/mm/debug.c b/mm/debug.c index 2d1bd67d957b..83ef3bd0ccd3 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -79,12 +79,17 @@ static void __dump_folio(struct folio *folio, struct page *page, folio_ref_count(folio), mapcount, mapping, folio->index + idx, pfn); if (folio_test_large(folio)) { + int pincount = 0; + + if (folio_has_pincount(folio)) + pincount = atomic_read(&folio->_pincount); + pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", folio_order(folio), folio_mapcount(folio), folio_entire_mapcount(folio), folio_nr_pages_mapped(folio), - atomic_read(&folio->_pincount)); + pincount); } #ifdef CONFIG_MEMCG @@ -146,6 +151,9 @@ static void __dump_page(const struct page *page) if (idx < MAX_FOLIO_NR_PAGES) { memcpy(&folio, foliop, 2 * sizeof(struct page)); nr_pages = folio_nr_pages(&folio); + if (nr_pages > 1) + memcpy(&folio.__page_2, &foliop->__page_2, + sizeof(struct page)); foliop = &folio; } diff --git a/mm/gup.c b/mm/gup.c index e5040657870e..2944fe8cf317 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -109,7 +109,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) if (is_zero_folio(folio)) return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) atomic_sub(refs, &folio->_pincount); else refs *= GUP_PIN_COUNTING_BIAS; @@ -164,7 +164,7 @@ int __must_check try_grab_folio(struct folio *folio, int refs, * Increment the normal page refcount field at least once, * so that the page really is pinned. */ - if (folio_test_large(folio)) { + if (folio_has_pincount(folio)) { folio_ref_add(folio, refs); atomic_add(refs, &folio->_pincount); } else { @@ -223,7 +223,7 @@ void folio_add_pin(struct folio *folio) * page refcount field at least once, so that the page really is * pinned. */ - if (folio_test_large(folio)) { + if (folio_has_pincount(folio)) { WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); folio_ref_inc(folio); atomic_inc(&folio->_pincount); @@ -575,7 +575,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, * is pinned. That's why the refcount from the earlier * try_get_folio() is left intact. */ - if (folio_test_large(folio)) + if (folio_has_pincount(folio)) atomic_add(refs, &folio->_pincount); else folio_ref_add(folio, diff --git a/mm/internal.h b/mm/internal.h index 1cd977413859..2d44a4c9d282 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -764,7 +764,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order) atomic_set(&folio->_large_mapcount, -1); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); - atomic_set(&folio->_pincount, 0); + if (IS_ENABLED(CONFIG_64BIT) || order > 1) + atomic_set(&folio->_pincount, 0); if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 735192222c36..2a9aa4439a66 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -959,9 +959,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) bad_page(page, "nonzero nr_pages_mapped"); goto out; } - if (unlikely(atomic_read(&folio->_pincount))) { - bad_page(page, "nonzero pincount"); - goto out; + if (IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_pincount))) { + bad_page(page, "nonzero pincount"); + goto out; + } } break; case 2: @@ -970,6 +972,12 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) bad_page(page, "on deferred list"); goto out; } + if (!IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_pincount))) { + bad_page(page, "nonzero pincount"); + goto out; + } + } break; case 3: /* the third tail page: hugetlb specifics overlap ->mappings */ From 845d2be6d41f016da670dcc4c8f5357c22172be8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:59 +0100 Subject: [PATCH 308/431] mm: move _entire_mapcount in folio to page[2] on 32bit Let's free up some space on 32bit in page[1] by moving the _pincount to page[2]. Ordinary folios only use the entire mapcount with PMD mappings, so order-1 folios don't apply. Similarly, hugetlb folios are always larger than order-1, turning the entire mapcount essentially unused for all order-1 folios. Moving it to order-1 folios will not change anything. On 32bit, simply check in folio_entire_mapcount() whether we have an order-1 folio, and return 0 in that case. Note that THPs on 32bit are not particularly common (and we don't care too much about performance), but we want to keep it working reliably, because likely we want to use large folios there as well in the future, independent of PMD leaf support. Once we dynamically allocate "struct folio", the 32bit specifics will go away again; even small folios could then have a pincount. Link: https://lkml.kernel.org/r/20250303163014.1128035-7-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 ++ include/linux/mm_types.h | 3 ++- mm/internal.h | 5 +++-- mm/page_alloc.c | 12 ++++++++---- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 860082ba8978..f366c180f2b6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1333,6 +1333,8 @@ static inline int is_vmalloc_or_module_addr(const void *x) static inline int folio_entire_mapcount(const struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1)) + return 0; return atomic_read(&folio->_entire_mapcount) + 1; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3ea2019a1aac..9499eb8e8e66 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -385,9 +385,9 @@ struct folio { struct { /* public: */ atomic_t _large_mapcount; - atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; #ifdef CONFIG_64BIT + atomic_t _entire_mapcount; atomic_t _pincount; #endif /* CONFIG_64BIT */ /* private: the union with struct page is transitional */ @@ -411,6 +411,7 @@ struct folio { /* public: */ struct list_head _deferred_list; #ifndef CONFIG_64BIT + atomic_t _entire_mapcount; atomic_t _pincount; #endif /* !CONFIG_64BIT */ /* private: the union with struct page is transitional */ diff --git a/mm/internal.h b/mm/internal.h index 2d44a4c9d282..fcf0aeae3934 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -762,10 +762,11 @@ static inline void prep_compound_head(struct page *page, unsigned int order) folio_set_order(folio, order); atomic_set(&folio->_large_mapcount, -1); - atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); - if (IS_ENABLED(CONFIG_64BIT) || order > 1) + if (IS_ENABLED(CONFIG_64BIT) || order > 1) { atomic_set(&folio->_pincount, 0); + atomic_set(&folio->_entire_mapcount, -1); + } if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2a9aa4439a66..e456a43811fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -947,10 +947,6 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) switch (page - head_page) { case 1: /* the first tail page: these may be in place of ->mapping */ - if (unlikely(folio_entire_mapcount(folio))) { - bad_page(page, "nonzero entire_mapcount"); - goto out; - } if (unlikely(folio_large_mapcount(folio))) { bad_page(page, "nonzero large_mapcount"); goto out; @@ -960,6 +956,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) goto out; } if (IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { + bad_page(page, "nonzero entire_mapcount"); + goto out; + } if (unlikely(atomic_read(&folio->_pincount))) { bad_page(page, "nonzero pincount"); goto out; @@ -973,6 +973,10 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) goto out; } if (!IS_ENABLED(CONFIG_64BIT)) { + if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { + bad_page(page, "nonzero entire_mapcount"); + goto out; + } if (unlikely(atomic_read(&folio->_pincount))) { bad_page(page, "nonzero pincount"); goto out; From 405c4ef769c7c5e83e3556b48a190fe1afe82425 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:00 +0100 Subject: [PATCH 309/431] mm/rmap: pass dst_vma to folio_dup_file_rmap_pte() and friends We'll need access to the destination MM when modifying the large mapcount of a non-hugetlb large folios next. So pass in the destination VMA. Link: https://lkml.kernel.org/r/20250303163014.1128035-8-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/rmap.h | 42 +++++++++++++++++++++++++----------------- mm/huge_memory.c | 2 +- mm/memory.c | 10 +++++----- 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 6abf7960077a..e795610bade8 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -335,7 +335,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio) } static __always_inline void __folio_dup_file_rmap(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + enum rmap_level level) { const int orig_nr_pages = nr_pages; @@ -366,45 +367,47 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, * @folio: The folio to duplicate the mappings of * @page: The first page to duplicate the mappings of * @nr_pages: The number of pages of which the mapping will be duplicated + * @dst_vma: The destination vm area * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ static inline void folio_dup_file_rmap_ptes(struct folio *folio, - struct page *page, int nr_pages) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma) { - __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE); } static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, - struct page *page) + struct page *page, struct vm_area_struct *dst_vma) { - __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE); } /** * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio * @folio: The folio to duplicate the mapping of * @page: The first page to duplicate the mapping of + * @dst_vma: The destination vm area * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ static inline void folio_dup_file_rmap_pmd(struct folio *folio, - struct page *page) + struct page *page, struct vm_area_struct *dst_vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); + __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE); #else WARN_ON_ONCE(true); #endif } static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, - struct page *page, int nr_pages, struct vm_area_struct *src_vma, - enum rmap_level level) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, enum rmap_level level) { const int orig_nr_pages = nr_pages; bool maybe_pinned; @@ -470,6 +473,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, * @folio: The folio to duplicate the mappings of * @page: The first page to duplicate the mappings of * @nr_pages: The number of pages of which the mapping will be duplicated + * @dst_vma: The destination vm area * @src_vma: The vm area from which the mappings are duplicated * * The page range of the folio is defined by [page, page + nr_pages) @@ -488,16 +492,18 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. */ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, - struct page *page, int nr_pages, struct vm_area_struct *src_vma) + struct page *page, int nr_pages, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { - return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, - RMAP_LEVEL_PTE); + return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma, + src_vma, RMAP_LEVEL_PTE); } static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, - struct page *page, struct vm_area_struct *src_vma) + struct page *page, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { - return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, + return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, RMAP_LEVEL_PTE); } @@ -506,6 +512,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, * of a folio * @folio: The folio to duplicate the mapping of * @page: The first page to duplicate the mapping of + * @dst_vma: The destination vm area * @src_vma: The vm area from which the mapping is duplicated * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) @@ -524,11 +531,12 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. */ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, - struct page *page, struct vm_area_struct *src_vma) + struct page *page, struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, - RMAP_LEVEL_PMD); + return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma, + src_vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); return -EBUSY; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2364990974ba..22e4e1194e9e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1782,7 +1782,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, src_folio = page_folio(src_page); folio_get(src_folio); - if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { /* Page maybe pinned: split and retry the fault on PTEs. */ folio_put(src_folio); pte_free(dst_mm, pgtable); diff --git a/mm/memory.c b/mm/memory.c index 8d1ea1dd6b52..f745f5e28f0c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -864,7 +864,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, folio_get(folio); rss[mm_counter(folio)]++; /* Cannot fail as these pages cannot get pinned. */ - folio_try_dup_anon_rmap_pte(folio, page, src_vma); + folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); /* * We do not preserve soft-dirty information, because so @@ -1018,14 +1018,14 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, - nr, src_vma))) { + nr, dst_vma, src_vma))) { folio_ref_sub(folio, nr); return -EAGAIN; } rss[MM_ANONPAGES] += nr; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { - folio_dup_file_rmap_ptes(folio, page, nr); + folio_dup_file_rmap_ptes(folio, page, nr, dst_vma); rss[mm_counter_file(folio)] += nr; } if (any_writable) @@ -1043,7 +1043,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * guarantee the pinned page won't be randomly replaced in the * future. */ - if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, @@ -1053,7 +1053,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma rss[MM_ANONPAGES]++; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { - folio_dup_file_rmap_pte(folio, page); + folio_dup_file_rmap_pte(folio, page, dst_vma); rss[mm_counter_file(folio)]++; } From 1862a4af107ec8fc090f291fa9273c3f91c406a0 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:01 +0100 Subject: [PATCH 310/431] mm/rmap: pass vma to __folio_add_rmap() We'll need access to the destination MM when modifying the mapcount large folios next. So pass in the VMA. Link: https://lkml.kernel.org/r/20250303163014.1128035-9-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- mm/rmap.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index bcec8677f68d..8a7d023b02e0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1242,8 +1242,8 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, } static __always_inline unsigned int __folio_add_rmap(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level, - int *nr_pmdmapped) + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level, int *nr_pmdmapped) { atomic_t *mapped = &folio->_nr_pages_mapped; const int orig_nr_pages = nr_pages; @@ -1411,7 +1411,7 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); - nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); + nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); if (likely(!folio_test_ksm(folio))) __page_check_anon_rmap(folio, page, vma, address); @@ -1582,7 +1582,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); - nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); + nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped); __folio_mod_stat(folio, nr, nr_pmdmapped); /* See comments in folio_add_anon_rmap_*() */ From 932961c4b666937e27f7ec5358fb7aabf0f17d41 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:02 +0100 Subject: [PATCH 311/431] mm/rmap: abstract large mapcount operations for large folios (!hugetlb) Let's abstract the operations so we can extend these operations easily. Link: https://lkml.kernel.org/r/20250303163014.1128035-10-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/rmap.h | 32 ++++++++++++++++++++++++++++---- mm/rmap.c | 14 ++++++-------- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index e795610bade8..d1e888cc97a5 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -173,6 +173,30 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, struct anon_vma *folio_get_anon_vma(const struct folio *folio); +static inline void folio_set_large_mapcount(struct folio *folio, int mapcount, + struct vm_area_struct *vma) +{ + /* Note: mapcounts start at -1. */ + atomic_set(&folio->_large_mapcount, mapcount - 1); +} + +static inline void folio_add_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + atomic_add(diff, &folio->_large_mapcount); +} + +static inline void folio_sub_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + atomic_sub(diff, &folio->_large_mapcount); +} + +#define folio_inc_large_mapcount(folio, vma) \ + folio_add_large_mapcount(folio, 1, vma) +#define folio_dec_large_mapcount(folio, vma) \ + folio_sub_large_mapcount(folio, 1, vma) + /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; @@ -352,12 +376,12 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, do { atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); - atomic_add(orig_nr_pages, &folio->_large_mapcount); + folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: atomic_inc(&folio->_entire_mapcount); - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, dst_vma); break; } } @@ -451,7 +475,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, ClearPageAnonExclusive(page); atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); - atomic_add(orig_nr_pages, &folio->_large_mapcount); + folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: @@ -461,7 +485,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, ClearPageAnonExclusive(page); } atomic_inc(&folio->_entire_mapcount); - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, dst_vma); break; } return 0; diff --git a/mm/rmap.c b/mm/rmap.c index 8a7d023b02e0..08846b7eced6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1266,7 +1266,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) nr = first; - atomic_add(orig_nr_pages, &folio->_large_mapcount); + folio_add_large_mapcount(folio, orig_nr_pages, vma); break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: @@ -1290,7 +1290,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, nr = 0; } } - atomic_inc(&folio->_large_mapcount); + folio_inc_large_mapcount(folio, vma); break; } return nr; @@ -1556,14 +1556,12 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, SetPageAnonExclusive(page); } - /* increment count (starts at -1) */ - atomic_set(&folio->_large_mapcount, nr - 1); + folio_set_large_mapcount(folio, nr, vma); atomic_set(&folio->_nr_pages_mapped, nr); } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); - /* increment count (starts at -1) */ - atomic_set(&folio->_large_mapcount, 0); + folio_set_large_mapcount(folio, 1, vma); atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); if (exclusive) SetPageAnonExclusive(&folio->page); @@ -1665,7 +1663,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; } - atomic_sub(nr_pages, &folio->_large_mapcount); + folio_sub_large_mapcount(folio, nr_pages, vma); do { last += atomic_add_negative(-1, &page->_mapcount); } while (page++, --nr_pages > 0); @@ -1678,7 +1676,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: - atomic_dec(&folio->_large_mapcount); + folio_dec_large_mapcount(folio, vma); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); From b85aa9b11b67ed4b086bf5366392adec1b4a6a81 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:03 +0100 Subject: [PATCH 312/431] bit_spinlock: __always_inline (un)lock functions The compiler might decide that it is a smart idea to not inline bit_spin_lock(), primarily when a couple of functions in the same file end up calling it. Especially when used in RMAP map/unmap code next, the compiler sometimes decides to not inline, which is then observable in some micro-benchmarks. Let's simply flag all lock/unlock functions as __always_inline; arch_test_and_set_bit_lock() and friends are already tagged like that (but not test_and_set_bit_lock() for some reason). If ever a problem, we could split it into a fast and a slow path, and only force the fast path to be inlined. But there is nothing particularly "big" here. Link: https://lkml.kernel.org/r/20250303163014.1128035-11-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/bit_spinlock.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index bbc4730a6505..c0989b5b0407 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -13,7 +13,7 @@ * Don't use this unless you really need to: spin_lock() and spin_unlock() * are significantly faster. */ -static inline void bit_spin_lock(int bitnum, unsigned long *addr) +static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) { /* * Assuming the lock is uncontended, this never enters @@ -38,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr) /* * Return true if it was acquired */ -static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -54,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr) /* * bit-based spin_unlock() */ -static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -71,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) * non-atomic version, which can be used eg. if the bit lock itself is * protecting the rest of the flags in the word. */ -static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); From 448854478ab2f4770f4e8b2bebff97c44e5bc54a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:04 +0100 Subject: [PATCH 313/431] mm/rmap: use folio_large_nr_pages() in add/remove functions Let's just use the "large" variant in code where we are sure that we have a large folio in our hands: this way we are sure that we don't perform any unnecessary "large" checks. While at it, convert the VM_BUG_ON_VMA to a VM_WARN_ON_ONCE. Maybe in the future there will not be a difference in that regard between large and small folios; in that case, unifying the handling again will be easy. E.g., folio_large_nr_pages() will simply translate to folio_nr_pages() until we replace all instances. Link: https://lkml.kernel.org/r/20250303163014.1128035-12-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- mm/rmap.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 08846b7eced6..c9922928616e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1274,7 +1274,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { - nr_pages = folio_nr_pages(folio); + nr_pages = folio_large_nr_pages(folio); /* * We only track PMD mappings of PMD-sized * folios separately. @@ -1522,14 +1522,11 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { - const int nr = folio_nr_pages(folio); const bool exclusive = flags & RMAP_EXCLUSIVE; - int nr_pmdmapped = 0; + int nr = 1, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); - VM_BUG_ON_VMA(address < vma->vm_start || - address + (nr << PAGE_SHIFT) > vma->vm_end, vma); /* * VM_DROPPABLE mappings don't swap; instead they're just dropped when @@ -1547,6 +1544,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } else if (!folio_test_pmd_mappable(folio)) { int i; + nr = folio_large_nr_pages(folio); for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); @@ -1559,6 +1557,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, folio_set_large_mapcount(folio, nr, vma); atomic_set(&folio->_nr_pages_mapped, nr); } else { + nr = folio_large_nr_pages(folio); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_set_large_mapcount(folio, 1, vma); @@ -1568,6 +1567,9 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, nr_pmdmapped = nr; } + VM_WARN_ON_ONCE(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end); + __folio_mod_stat(folio, nr, nr_pmdmapped); mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } @@ -1681,7 +1683,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { - nr_pages = folio_nr_pages(folio); + nr_pages = folio_large_nr_pages(folio); if (level == RMAP_LEVEL_PMD) nr_pmdmapped = nr_pages; nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); From 6af8cb80d3a9a6bbd521d8a7c949b4eafb7dba5d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:05 +0100 Subject: [PATCH 314/431] mm/rmap: basic MM owner tracking for large folios (!hugetlb) For small folios, we traditionally use the mapcount to decide whether it was "certainly mapped exclusively" by a single MM (mapcount == 1) or whether it "maybe mapped shared" by multiple MMs (mapcount > 1). For PMD-sized folios that were PMD-mapped, we were able to use a similar mechanism (single PMD mapping), but for PTE-mapped folios and in the future folios that span multiple PMDs, this does not work. So we need a different mechanism to handle large folios. Let's add a new mechanism to detect whether a large folio is "certainly mapped exclusively", or whether it is "maybe mapped shared". We'll use this information next to optimize CoW reuse for PTE-mapped anonymous THP, and to convert folio_likely_mapped_shared() to folio_maybe_mapped_shared(), independent of per-page mapcounts. For each large folio, we'll have two slots, whereby a slot stores: (1) an MM id: unique id assigned to each MM (2) a per-MM mapcount If a slot is unoccupied, it can be taken by the next MM that maps folio page. In addition, we'll remember the current state -- "mapped exclusively" vs. "maybe mapped shared" -- and use a bit spinlock to sync on updates and to reduce the total number of atomic accesses on updates. In the future, it might be possible to squeeze a proper spinlock into "struct folio". For now, keep it simple, as we require the whole thing with THP only, that is incompatible with RT. As we have to squeeze this information into the "struct folio" of even folios of order-1 (2 pages), and we generally want to reduce the required metadata, we'll assign each MM a unique ID that can fit into an int. In total, we can squeeze everything into 4x int (2x long) on 64bit. 32bit support is a bit challenging, because we only have 2x long == 2x int in order-1 folios. But we can make it work for now, because we neither expect many MMs nor very large folios on 32bit. We will reliably detect folios as "mapped exclusively" vs. "mapped shared" as long as only two MMs map pages of a folio at one point in time -- for example with fork() and short-lived child processes, or with apps that hand over state from one instance to another. As soon as three MMs are involved at the same time, we might detect "maybe mapped shared" although the folio is "mapped exclusively". Example 1: (1) App1 faults in a (shmem/file-backed) folio page -> Tracked as MM0 (2) App2 faults in a folio page -> Tracked as MM1 (4) App1 unmaps all folio pages -> We will detect "mapped exclusively". Example 2: (1) App1 faults in a (shmem/file-backed) folio page -> Tracked as MM0 (2) App2 faults in a folio page -> Tracked as MM1 (3) App3 faults in a folio page -> No slot available, tracked as "unknown" (4) App1 and App2 unmap all folio pages -> We will detect "maybe mapped shared". Make use of __always_inline to keep possible performance degradation when (un)mapping large folios to a minimum. Note: by squeezing the two flags into the "unsigned long" that stores the MM ids, we can use non-atomic __bit_spin_unlock() and non-atomic setting/clearing of the "maybe mapped shared" bit, effectively not adding any new atomics on the hot path when updating the large mapcount + new metadata, which further helps reduce the runtime overhead in micro-benchmarks. Link: https://lkml.kernel.org/r/20250303163014.1128035-13-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/mm/transhuge.rst | 8 ++ include/linux/mm_types.h | 49 ++++++++++ include/linux/page-flags.h | 4 + include/linux/rmap.h | 165 +++++++++++++++++++++++++++++++++ kernel/fork.c | 36 +++++++ mm/Kconfig | 4 + mm/internal.h | 5 + mm/page_alloc.c | 10 ++ 8 files changed, 281 insertions(+) diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index a2cd8800d527..baa17d718a76 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -120,11 +120,19 @@ pages: and also increment/decrement folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes from -1 to 0 or 0 to -1. + We also maintain the two slots for tracking MM owners (MM ID and + corresponding mapcount), and the current status ("maybe mapped shared" vs. + "mapped exclusively"). + - map/unmap of individual pages with PTE entry increment/decrement page->_mapcount, increment/decrement folio->_large_mapcount and also increment/decrement folio->_nr_pages_mapped when page->_mapcount goes from -1 to 0 or 0 to -1 as this counts the number of pages mapped by PTE. + We also maintain the two slots for tracking MM owners (MM ID and + corresponding mapcount), and the current status ("maybe mapped shared" vs. + "mapped exclusively"). + split_huge_page internally has to distribute the refcounts in the head page to the tail pages before clearing all PG_head/tail bits from the page structures. It can be done easily for refcounts taken by page table diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9499eb8e8e66..aac7c87b04e1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -292,6 +292,44 @@ typedef struct { #define NR_PAGES_IN_LARGE_FOLIO #endif +/* + * On 32bit, we can cut the required metadata in half, because: + * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have, + * so we can limit MM IDs to 15 bit (32767). + * (b) We don't expect folios where even a single complete PTE mapping by + * one MM would exceed 15 bits (order-15). + */ +#ifdef CONFIG_64BIT +typedef int mm_id_mapcount_t; +#define MM_ID_MAPCOUNT_MAX INT_MAX +typedef unsigned int mm_id_t; +#else /* !CONFIG_64BIT */ +typedef short mm_id_mapcount_t; +#define MM_ID_MAPCOUNT_MAX SHRT_MAX +typedef unsigned short mm_id_t; +#endif /* CONFIG_64BIT */ + +/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */ +#define MM_ID_DUMMY 0 +#define MM_ID_MIN (MM_ID_DUMMY + 1) + +/* + * We leave the highest bit of each MM id unused, so we can store a flag + * in the highest bit of each folio->_mm_id[]. + */ +#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1) +#define MM_ID_MASK ((1U << MM_ID_BITS) - 1) +#define MM_ID_MAX MM_ID_MASK + +/* + * In order to use bit_spin_lock(), which requires an unsigned long, we + * operate on folio->_mm_ids when working on flags. + */ +#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS +#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM) +#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1) +#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM) + /** * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. @@ -318,6 +356,9 @@ typedef struct { * @_nr_pages_mapped: Do not use outside of rmap and debug code. * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). * @_nr_pages: Do not use directly, call folio_nr_pages(). + * @_mm_id: Do not use outside of rmap code. + * @_mm_ids: Do not use outside of rmap code. + * @_mm_id_mapcount: Do not use outside of rmap code. * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h. * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. @@ -390,6 +431,11 @@ struct folio { atomic_t _entire_mapcount; atomic_t _pincount; #endif /* CONFIG_64BIT */ + mm_id_mapcount_t _mm_id_mapcount[2]; + union { + mm_id_t _mm_id[2]; + unsigned long _mm_ids; + }; /* private: the union with struct page is transitional */ }; unsigned long _usable_1[4]; @@ -1114,6 +1160,9 @@ struct mm_struct { #endif } lru_gen; #endif /* CONFIG_LRU_GEN_WALKS_MMU */ +#ifdef CONFIG_MM_ID + mm_id_t mm_id; +#endif /* CONFIG_MM_ID */ } __randomize_layout; /* diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index cab382bd965e..f26ce54c7aa4 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1185,6 +1185,10 @@ static inline int folio_has_private(const struct folio *folio) return !!(folio->flags & PAGE_FLAGS_PRIVATE); } +static inline bool folio_test_large_maybe_mapped_shared(const struct folio *folio) +{ + return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids); +} #undef PF_ANY #undef PF_HEAD #undef PF_NO_TAIL diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d1e888cc97a5..c131b0efff0f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -13,6 +13,7 @@ #include #include #include +#include /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -173,6 +174,169 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, struct anon_vma *folio_get_anon_vma(const struct folio *folio); +#ifdef CONFIG_MM_ID +static __always_inline void folio_lock_large_mapcount(struct folio *folio) +{ + bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); +} + +static __always_inline void folio_unlock_large_mapcount(struct folio *folio) +{ + __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids); +} + +static inline unsigned int folio_mm_id(const struct folio *folio, int idx) +{ + VM_WARN_ON_ONCE(idx != 0 && idx != 1); + return folio->_mm_id[idx] & MM_ID_MASK; +} + +static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id) +{ + VM_WARN_ON_ONCE(idx != 0 && idx != 1); + folio->_mm_id[idx] &= ~MM_ID_MASK; + folio->_mm_id[idx] |= id; +} + +static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio, + int diff, mm_id_t mm_id) +{ + VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio)); + VM_WARN_ON_ONCE(diff <= 0); + VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX); + + /* + * Make sure we can detect at least one complete PTE mapping of the + * folio in a single MM as "exclusively mapped". This is primarily + * a check on 32bit, where we currently reduce the size of the per-MM + * mapcount to a short. + */ + VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio)); + VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX); + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY && + folio->_mm_id_mapcount[0] != -1); + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY && + folio->_mm_id_mapcount[0] < 0); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY && + folio->_mm_id_mapcount[1] != -1); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY && + folio->_mm_id_mapcount[1] < 0); + VM_WARN_ON_ONCE(!folio_mapped(folio) && + folio_test_large_maybe_mapped_shared(folio)); +} + +static __always_inline void folio_set_large_mapcount(struct folio *folio, + int mapcount, struct vm_area_struct *vma) +{ + __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id); + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY); + VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY); + + /* Note: mapcounts start at -1. */ + atomic_set(&folio->_large_mapcount, mapcount - 1); + folio->_mm_id_mapcount[0] = mapcount - 1; + folio_set_mm_id(folio, 0, vma->vm_mm->mm_id); +} + +static __always_inline void folio_add_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + const mm_id_t mm_id = vma->vm_mm->mm_id; + int new_mapcount_val; + + folio_lock_large_mapcount(folio); + __folio_large_mapcount_sanity_checks(folio, diff, mm_id); + + new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff; + atomic_set(&folio->_large_mapcount, new_mapcount_val); + + /* + * If a folio is mapped more than once into an MM on 32bit, we + * can in theory overflow the per-MM mapcount (although only for + * fairly large folios), turning it negative. In that case, just + * free up the slot and mark the folio "mapped shared", otherwise + * we might be in trouble when unmapping pages later. + */ + if (folio_mm_id(folio, 0) == mm_id) { + folio->_mm_id_mapcount[0] += diff; + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) { + folio->_mm_id_mapcount[0] = -1; + folio_set_mm_id(folio, 0, MM_ID_DUMMY); + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + } else if (folio_mm_id(folio, 1) == mm_id) { + folio->_mm_id_mapcount[1] += diff; + if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) { + folio->_mm_id_mapcount[1] = -1; + folio_set_mm_id(folio, 1, MM_ID_DUMMY); + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) { + folio_set_mm_id(folio, 0, mm_id); + folio->_mm_id_mapcount[0] = diff - 1; + /* We might have other mappings already. */ + if (new_mapcount_val != diff - 1) + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) { + folio_set_mm_id(folio, 1, mm_id); + folio->_mm_id_mapcount[1] = diff - 1; + /* Slot 0 certainly has mappings as well. */ + folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; + } + folio_unlock_large_mapcount(folio); +} + +static __always_inline void folio_sub_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + const mm_id_t mm_id = vma->vm_mm->mm_id; + int new_mapcount_val; + + folio_lock_large_mapcount(folio); + __folio_large_mapcount_sanity_checks(folio, diff, mm_id); + + new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff; + atomic_set(&folio->_large_mapcount, new_mapcount_val); + + /* + * There are valid corner cases where we might underflow a per-MM + * mapcount (some mappings added when no slot was free, some mappings + * added once a slot was free), so we always set it to -1 once we go + * negative. + */ + if (folio_mm_id(folio, 0) == mm_id) { + folio->_mm_id_mapcount[0] -= diff; + if (folio->_mm_id_mapcount[0] >= 0) + goto out; + folio->_mm_id_mapcount[0] = -1; + folio_set_mm_id(folio, 0, MM_ID_DUMMY); + } else if (folio_mm_id(folio, 1) == mm_id) { + folio->_mm_id_mapcount[1] -= diff; + if (folio->_mm_id_mapcount[1] >= 0) + goto out; + folio->_mm_id_mapcount[1] = -1; + folio_set_mm_id(folio, 1, MM_ID_DUMMY); + } + + /* + * If one MM slot owns all mappings, the folio is mapped exclusively. + * Note that if the folio is now unmapped (new_mapcount_val == -1), both + * slots must be free (mapcount == -1), and we'll also mark it as + * exclusive. + */ + if (folio->_mm_id_mapcount[0] == new_mapcount_val || + folio->_mm_id_mapcount[1] == new_mapcount_val) + folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT; +out: + folio_unlock_large_mapcount(folio); +} +#else /* !CONFIG_MM_ID */ +/* + * See __folio_rmap_sanity_checks(), we might map large folios even without + * CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now. + */ static inline void folio_set_large_mapcount(struct folio *folio, int mapcount, struct vm_area_struct *vma) { @@ -191,6 +355,7 @@ static inline void folio_sub_large_mapcount(struct folio *folio, { atomic_sub(diff, &folio->_large_mapcount); } +#endif /* CONFIG_MM_ID */ #define folio_inc_large_mapcount(folio, vma) \ folio_add_large_mapcount(folio, 1, vma) diff --git a/kernel/fork.c b/kernel/fork.c index 364b2d4fd3ef..f9cf0f056eb6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -802,6 +802,36 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ +#ifdef CONFIG_MM_ID +static DEFINE_IDA(mm_ida); + +static inline int mm_alloc_id(struct mm_struct *mm) +{ + int ret; + + ret = ida_alloc_range(&mm_ida, MM_ID_MIN, MM_ID_MAX, GFP_KERNEL); + if (ret < 0) + return ret; + mm->mm_id = ret; + return 0; +} + +static inline void mm_free_id(struct mm_struct *mm) +{ + const mm_id_t id = mm->mm_id; + + mm->mm_id = MM_ID_DUMMY; + if (id == MM_ID_DUMMY) + return; + if (WARN_ON_ONCE(id < MM_ID_MIN || id > MM_ID_MAX)) + return; + ida_free(&mm_ida, id); +} +#else /* !CONFIG_MM_ID */ +static inline int mm_alloc_id(struct mm_struct *mm) { return 0; } +static inline void mm_free_id(struct mm_struct *mm) {} +#endif /* CONFIG_MM_ID */ + static void check_mm(struct mm_struct *mm) { int i; @@ -905,6 +935,7 @@ void __mmdrop(struct mm_struct *mm) WARN_ON_ONCE(mm == current->active_mm); mm_free_pgd(mm); + mm_free_id(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); check_mm(mm); @@ -1289,6 +1320,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, if (mm_alloc_pgd(mm)) goto fail_nopgd; + if (mm_alloc_id(mm)) + goto fail_noid; + if (init_new_context(p, mm)) goto fail_nocontext; @@ -1308,6 +1342,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, fail_cid: destroy_context(mm); fail_nocontext: + mm_free_id(mm); +fail_noid: mm_free_pgd(mm); fail_nopgd: free_mm(mm); diff --git a/mm/Kconfig b/mm/Kconfig index 4c1640a197a0..a25c5476b3ad 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -815,11 +815,15 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_WANTS_THP_SWAP def_bool n +config MM_ID + def_bool n + menuconfig TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT select COMPACTION select XARRAY_MULTI + select MM_ID help Transparent Hugepages allows the kernel to use huge pages and huge tlb transparently to the applications whenever possible. diff --git a/mm/internal.h b/mm/internal.h index fcf0aeae3934..04724971379c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -763,6 +763,11 @@ static inline void prep_compound_head(struct page *page, unsigned int order) folio_set_order(folio, order); atomic_set(&folio->_large_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); + if (IS_ENABLED(CONFIG_MM_ID)) { + folio->_mm_ids = 0; + folio->_mm_id_mapcount[0] = -1; + folio->_mm_id_mapcount[1] = -1; + } if (IS_ENABLED(CONFIG_64BIT) || order > 1) { atomic_set(&folio->_pincount, 0); atomic_set(&folio->_entire_mapcount, -1); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e456a43811fd..c8daa3e64266 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -955,6 +955,16 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) bad_page(page, "nonzero nr_pages_mapped"); goto out; } + if (IS_ENABLED(CONFIG_MM_ID)) { + if (unlikely(folio->_mm_id_mapcount[0] != -1)) { + bad_page(page, "nonzero mm mapcount 0"); + goto out; + } + if (unlikely(folio->_mm_id_mapcount[1] != -1)) { + bad_page(page, "nonzero mm mapcount 1"); + goto out; + } + } if (IS_ENABLED(CONFIG_64BIT)) { if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) { bad_page(page, "nonzero entire_mapcount"); From 1da190f4d0a604ac919e63731441201bd08ef4ac Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:06 +0100 Subject: [PATCH 315/431] mm: Copy-on-Write (COW) reuse support for PTE-mapped THP Currently, we never end up reusing PTE-mapped THPs after fork. This wasn't really a problem with PMD-sized THPs, because they would have to be PTE-mapped first, but it's getting a problem with smaller THP sizes that are effectively always PTE-mapped. With our new "mapped exclusively" vs "maybe mapped shared" logic for large folios, implementing CoW reuse for PTE-mapped THPs is straight forward: if exclusively mapped, make sure that all references are from these (our) mappings. Add some helpful comments to explain the details. CONFIG_TRANSPARENT_HUGEPAGE selects CONFIG_MM_ID. If we spot an anon large folio without CONFIG_TRANSPARENT_HUGEPAGE in that code, something is seriously messed up. There are plenty of things we can optimize in the future: For example, we could remember that the folio is fully exclusive so we could speedup the next fault further. Also, we could try "faulting around", turning surrounding PTEs that map the same folio writable. But especially the latter might increase COW latency, so it would need further investigation. Link: https://lkml.kernel.org/r/20250303163014.1128035-14-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- mm/memory.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 76 insertions(+), 9 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f745f5e28f0c..5d5a2d81f05a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3727,18 +3727,85 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) return ret; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static bool __wp_can_reuse_large_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + bool exclusive = false; + + /* Let's just free up a large folio if only a single page is mapped. */ + if (folio_large_mapcount(folio) <= 1) + return false; + + /* + * The assumption for anonymous folios is that each page can only get + * mapped once into each MM. The only exception are KSM folios, which + * are always small. + * + * Each taken mapcount must be paired with exactly one taken reference, + * whereby the refcount must be incremented before the mapcount when + * mapping a page, and the refcount must be decremented after the + * mapcount when unmapping a page. + * + * If all folio references are from mappings, and all mappings are in + * the page tables of this MM, then this folio is exclusive to this MM. + */ + if (folio_test_large_maybe_mapped_shared(folio)) + return false; + + VM_WARN_ON_ONCE(folio_test_ksm(folio)); + VM_WARN_ON_ONCE(folio_mapcount(folio) > folio_nr_pages(folio)); + VM_WARN_ON_ONCE(folio_entire_mapcount(folio)); + + if (unlikely(folio_test_swapcache(folio))) { + /* + * Note: freeing up the swapcache will fail if some PTEs are + * still swap entries. + */ + if (!folio_trylock(folio)) + return false; + folio_free_swap(folio); + folio_unlock(folio); + } + + if (folio_large_mapcount(folio) != folio_ref_count(folio)) + return false; + + /* Stabilize the mapcount vs. refcount and recheck. */ + folio_lock_large_mapcount(folio); + VM_WARN_ON_ONCE(folio_large_mapcount(folio) < folio_ref_count(folio)); + + if (folio_test_large_maybe_mapped_shared(folio)) + goto unlock; + if (folio_large_mapcount(folio) != folio_ref_count(folio)) + goto unlock; + + VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id && + folio_mm_id(folio, 1) != vma->vm_mm->mm_id); + + /* + * Do we need the folio lock? Likely not. If there would have been + * references from page migration/swapout, we would have detected + * an additional folio reference and never ended up here. + */ + exclusive = true; +unlock: + folio_unlock_large_mapcount(folio); + return exclusive; +} +#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ +static bool __wp_can_reuse_large_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + static bool wp_can_reuse_anon_folio(struct folio *folio, struct vm_area_struct *vma) { - /* - * We could currently only reuse a subpage of a large folio if no - * other subpages of the large folios are still mapped. However, - * let's just consistently not reuse subpages even if we could - * reuse in that scenario, and give back a large folio a bit - * sooner. - */ - if (folio_test_large(folio)) - return false; + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio)) + return __wp_can_reuse_large_anon_folio(folio, vma); /* * We have to verify under folio lock: these early checks are From 003fde4492c88ac3a1fee3d97b3834a679780af3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:07 +0100 Subject: [PATCH 316/431] mm: convert folio_likely_mapped_shared() to folio_maybe_mapped_shared() Let's reuse our new MM ownership tracking infrastructure for large folios to make folio_likely_mapped_shared() never return false negatives -- never indicating "not mapped shared" although the folio *is* mapped shared. With that, we can rename it to folio_maybe_mapped_shared() and get rid of the dependency on the mapcount of the first folio page. The semantics are now arguably clearer: no mixture of "false negatives" and "false positives", only the remaining possibility for "false positives". Thoroughly document the new semantics. We might now detect that a large folio is "maybe mapped shared" although it *no longer* is -- but once was. Now, if more than two MMs mapped a folio at the same time, and the MM mapping the folio exclusively at the end is not one tracked in the two folio MM slots, we will detect the folio as "maybe mapped shared". For anonymous folios, usually (except weird corner cases) all PTEs that target a "maybe mapped shared" folio are R/O. As soon as a child process would write to them (iow, actively use them), we would CoW and effectively replace these PTEs. Most cases (below) are not expected to really matter with large anonymous folios for this reason. Most importantly, there will be no change at all for: * small folios * hugetlb folios * PMD-mapped PMD-sized THPs (single mapping) This change has the potential to affect existing callers of folio_likely_mapped_shared() -> folio_maybe_mapped_shared(): (1) fs/proc/task_mmu.c: no change (hugetlb) (2) khugepaged counts PTEs that target shared folios towards max_ptes_shared (default: HPAGE_PMD_NR / 2), meaning we could skip a collapse where we would have previously collapsed. This only applies to anonymous folios and is not expected to matter in practice. Worth noting that this change sorts out case (A) documented in commit 1bafe96e89f0 ("mm/khugepaged: replace page_mapcount() check by folio_likely_mapped_shared()") by removing the possibility for "false negatives". (3) MADV_COLD / MADV_PAGEOUT / MADV_FREE will not try splitting PTE-mapped THPs that are considered shared but not fully covered by the requested range, consequently not processing them. PMD-mapped PMD-sized THP are not affected, or when all PTEs are covered. These functions are usually only called on anon/file folios that are exclusively mapped most of the time (no other file mappings or no fork()), so the "false negatives" are not expected to matter in practice. (4) mbind() / migrate_pages() / move_pages() will refuse to migrate shared folios unless MPOL_MF_MOVE_ALL is effective (requires CAP_SYS_NICE). We will now reject some folios that could be migrated. Similar to (3), especially with MPOL_MF_MOVE_ALL, so this is not expected to matter in practice. Note that cpuset_migrate_mm_workfn() calls do_migrate_pages() with MPOL_MF_MOVE_ALL. (5) NUMA hinting mm/migrate.c:migrate_misplaced_folio_prepare() will skip file folios that are probably shared libraries (-> "mapped shared" and executable). This check would have detected it as a shared library at some point (at least 3 MMs mapping it), so detecting it afterwards does not sound wrong (still a shared library). Not expected to matter. mm/memory.c:numa_migrate_check() will indicate TNF_SHARED in MAP_SHARED file mappings when encountering a shared folio. Similar reasoning, not expected to matter. mm/mprotect.c:change_pte_range() will skip folios detected as shared in CoW mappings. Similarly, this is not expected to matter in practice, but if it would ever be a problem we could relax that check a bit (e.g., basing it on the average page-mapcount in a folio), because it was only an optimization when many (e.g., 288) processes were mapping the same folios -- see commit 859d4adc3415 ("mm: numa: do not trap faults on shared data section pages.") (6) mm/rmap.c:folio_referenced_one() will skip exclusive swapbacked folios in dying processes. Applies to anonymous folios only. Without "false negatives", we'll now skip all actually shared ones. Skipping ones that are actually exclusive won't really matter, it's a pure optimization, and is not expected to matter in practice. In theory, one can detect the problematic scenario: folio_mapcount() > 0 and no folio MM slot is occupied ("state unknown"). One could reset the MM slots while doing an rmap walk, which migration / folio split already do when setting everything up. Further, when batching PTEs we might naturally learn about a owner (e.g., folio_mapcount() == nr_ptes) and could update the owner. However, we'll defer that until the scenarios where it would really matter are clear. Link: https://lkml.kernel.org/r/20250303163014.1128035-15-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 4 ++-- include/linux/mm.h | 43 ++++++++++++++++++++++--------------------- mm/huge_memory.c | 2 +- mm/khugepaged.c | 8 +++----- mm/madvise.c | 6 +++--- mm/memory.c | 2 +- mm/mempolicy.c | 8 ++++---- mm/migrate.c | 7 +++---- mm/mprotect.c | 2 +- mm/rmap.c | 2 +- 10 files changed, 41 insertions(+), 43 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b0f189815512..d811b24db65b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1023,7 +1023,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, if (folio) { /* We treat non-present entries as "maybe shared". */ - if (!present || folio_likely_mapped_shared(folio) || + if (!present || folio_maybe_mapped_shared(folio) || hugetlb_pmd_shared(pte)) mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); else @@ -1882,7 +1882,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, if (!folio_test_anon(folio)) flags |= PM_FILE; - if (!folio_likely_mapped_shared(folio) && + if (!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(ptep)) flags |= PM_MMAP_EXCLUSIVE; diff --git a/include/linux/mm.h b/include/linux/mm.h index f366c180f2b6..82776b409391 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2251,23 +2251,18 @@ static inline size_t folio_size(const struct folio *folio) } /** - * folio_likely_mapped_shared - Estimate if the folio is mapped into the page - * tables of more than one MM + * folio_maybe_mapped_shared - Whether the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * This function checks if the folio is currently mapped into more than one - * MM ("mapped shared"), or if the folio is only mapped into a single MM - * ("mapped exclusively"). + * This function checks if the folio maybe currently mapped into more than one + * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single + * MM ("mapped exclusively"). * * For KSM folios, this function also returns "mapped shared" when a folio is * mapped multiple times into the same MM, because the individual page mappings * are independent. * - * As precise information is not easily available for all folios, this function - * estimates the number of MMs ("sharers") that are currently mapping a folio - * using the number of times the first page of the folio is currently mapped - * into page tables. - * * For small anonymous folios and anonymous hugetlb folios, the return * value will be exactly correct: non-KSM folios can only be mapped at most once * into an MM, and they cannot be partially mapped. KSM folios are @@ -2275,8 +2270,8 @@ static inline size_t folio_size(const struct folio *folio) * * For other folios, the result can be fuzzy: * #. For partially-mappable large folios (THP), the return value can wrongly - * indicate "mapped exclusively" (false negative) when the folio is - * only partially mapped into at least one MM. + * indicate "mapped shared" (false positive) if a folio was mapped by + * more than two MMs at one point in time. * #. For pagecache folios (including hugetlb), the return value can wrongly * indicate "mapped shared" (false positive) when two VMAs in the same MM * cover the same file range. @@ -2293,7 +2288,7 @@ static inline size_t folio_size(const struct folio *folio) * * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline bool folio_likely_mapped_shared(struct folio *folio) +static inline bool folio_maybe_mapped_shared(struct folio *folio) { int mapcount = folio_mapcount(folio); @@ -2301,16 +2296,22 @@ static inline bool folio_likely_mapped_shared(struct folio *folio) if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio))) return mapcount > 1; - /* A single mapping implies "mapped exclusively". */ - if (mapcount <= 1) - return false; - - /* If any page is mapped more than once we treat it "mapped shared". */ - if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio)) + /* + * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ... + * simply assume "mapped shared", nobody should really care + * about this for arbitrary kernel allocations. + */ + if (!IS_ENABLED(CONFIG_MM_ID)) return true; - /* Let's guess based on the first subpage. */ - return atomic_read(&folio->_mapcount) > 0; + /* + * A single mapping implies "mapped exclusively", even if the + * folio flag says something different: it's easier to handle this + * case here instead of on the RMAP hot path. + */ + if (mapcount <= 1) + return false; + return folio_test_large_maybe_mapped_shared(folio); } #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 22e4e1194e9e..7433369d5d1f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2155,7 +2155,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) goto out; if (!folio_trylock(folio)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5f0be134141e..cc945c6ab3bd 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -607,7 +607,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); /* See hpage_collapse_scan_pmd(). */ - if (folio_likely_mapped_shared(folio)) { + if (folio_maybe_mapped_shared(folio)) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { @@ -1359,11 +1359,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, /* * We treat a single page as shared if any part of the THP - * is shared. "False negatives" from - * folio_likely_mapped_shared() are not expected to matter - * much in practice. + * is shared. */ - if (folio_likely_mapped_shared(folio)) { + if (folio_maybe_mapped_shared(folio)) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { diff --git a/mm/madvise.c b/mm/madvise.c index e01e93e179a8..388dc289b5d1 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -387,7 +387,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pmd_folio(orig_pmd); /* Do not interfere with other mappings of this folio */ - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -486,7 +486,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (nr < folio_nr_pages(folio)) { int err; - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; @@ -721,7 +721,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (nr < folio_nr_pages(folio)) { int err; - if (folio_likely_mapped_shared(folio)) + if (folio_maybe_mapped_shared(folio)) continue; if (!folio_trylock(folio)) continue; diff --git a/mm/memory.c b/mm/memory.c index 5d5a2d81f05a..8873b7a4962c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5698,7 +5698,7 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) + if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) *flags |= TNF_SHARED; /* * For memory tiering mode, cpupid of slow memory page is used diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bbaadbeeb291..530e71fe9147 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -642,11 +642,11 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * See folio_likely_mapped_shared() on possible imprecision when we + * See folio_maybe_mapped_shared() on possible imprecision when we * cannot easily detect if a folio is shared. */ if ((flags & MPOL_MF_MOVE_ALL) || - (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) + (!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) if (!folio_isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: @@ -1033,10 +1033,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * See folio_likely_mapped_shared() on possible imprecision when we + * See folio_maybe_mapped_shared() on possible imprecision when we * cannot easily detect if a folio is shared. */ - if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { + if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, diff --git a/mm/migrate.c b/mm/migrate.c index a991d3691bda..c0adea67cd62 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2226,7 +2226,7 @@ static int __add_folio_for_migration(struct folio *folio, int node, if (folio_nid(folio) == node) return 0; - if (folio_likely_mapped_shared(folio) && !migrate_all) + if (folio_maybe_mapped_shared(folio) && !migrate_all) return -EACCES; if (folio_test_hugetlb(folio)) { @@ -2651,11 +2651,10 @@ int migrate_misplaced_folio_prepare(struct folio *folio, * processes with execute permissions as they are probably * shared libraries. * - * See folio_likely_mapped_shared() on possible imprecision + * See folio_maybe_mapped_shared() on possible imprecision * when we cannot easily detect if a folio is shared. */ - if ((vma->vm_flags & VM_EXEC) && - folio_likely_mapped_shared(folio)) + if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio)) return -EACCES; /* diff --git a/mm/mprotect.c b/mm/mprotect.c index 1444878f7aeb..62c1f7945741 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -133,7 +133,7 @@ static long change_pte_range(struct mmu_gather *tlb, /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && (folio_maybe_dma_pinned(folio) || - folio_likely_mapped_shared(folio))) + folio_maybe_mapped_shared(folio))) continue; /* diff --git a/mm/rmap.c b/mm/rmap.c index c9922928616e..8de415157bc8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -889,7 +889,7 @@ static bool folio_referenced_one(struct folio *folio, if ((!atomic_read(&vma->vm_mm->mm_users) || check_stable_address_space(vma->vm_mm)) && folio_test_anon(folio) && folio_test_swapbacked(folio) && - !folio_likely_mapped_shared(folio)) { + !folio_maybe_mapped_shared(folio)) { pra->referenced = -1; page_vma_mapped_walk_done(&pvmw); return false; From e63ee43e3edaacfca04454b34aee8d5e303953df Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:08 +0100 Subject: [PATCH 317/431] mm: CONFIG_NO_PAGE_MAPCOUNT to prepare for not maintain per-page mapcounts in large folios We're close to the finishing line: let's introduce a new CONFIG_NO_PAGE_MAPCOUNT config option where we will incrementally remove any dependencies on per-page mapcounts in large folios. Once that's done, we'll stop maintaining the per-page mapcounts with this config option enabled. CONFIG_NO_PAGE_MAPCOUNT will be EXPERIMENTAL for now, as we'll have to learn about some of the real world impact of some of the implications. As writing "!CONFIG_NO_PAGE_MAPCOUNT" is really nasty, let's introduce a helper config option "CONFIG_PAGE_MAPCOUNT" that expresses the negation. Link: https://lkml.kernel.org/r/20250303163014.1128035-16-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- mm/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/mm/Kconfig b/mm/Kconfig index a25c5476b3ad..4a4e7b63d30a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -887,8 +887,25 @@ config READ_ONLY_THP_FOR_FS support of file THPs will be developed in the next few release cycles. +config NO_PAGE_MAPCOUNT + bool "No per-page mapcount (EXPERIMENTAL)" + help + Do not maintain per-page mapcounts for pages part of larger + allocations, such as transparent huge pages. + + When this config option is enabled, some interfaces that relied on + this information will rely on less-precise per-allocation information + instead: for example, using the average per-page mapcount in such + a large allocation instead of the per-page mapcount. + + EXPERIMENTAL because the impact of some changes is still unclear. + endif # TRANSPARENT_HUGEPAGE +# simple helper to make the code a bit easier to read +config PAGE_MAPCOUNT + def_bool !NO_PAGE_MAPCOUNT + # # The architecture supports pgtable leaves that is larger than PAGE_SIZE # From ae4192b7691cbd18aa6e286f4ccaf5ab574fc9cf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:09 +0100 Subject: [PATCH 318/431] fs/proc/page: remove per-page mapcount dependency for /proc/kpagecount (CONFIG_NO_PAGE_MAPCOUNT) Let's implement an alternative when per-page mapcounts in large folios are no longer maintained -- soon with CONFIG_NO_PAGE_MAPCOUNT. For large folios, we'll return the per-page average mapcount within the folio, whereby we round to the closest integer when calculating the average: however, we'll always return at least 1 if the folio is mapped. So assuming a folio with 512 pages, the average would be: * 0 if not pages are mapped * 1 if there are 1 .. 767 per-page mappings * 2 if there are 767 .. 1279 per-page mappings ... For hugetlb folios and for large folios that are fully mapped into all address spaces, there is no change. We'll make use of this helper in other context next. As an alternative, we could simply return 0 for non-hugetlb large folios, or disable this legacy interface with CONFIG_NO_PAGE_MAPCOUNT. But the information exposed by this interface can still be valuable, and frequently we deal with fully-mapped large folios where the average corresponds to the actual page mapcount. So we'll leave it like this for now and document the new behavior. Note: this interface is likely not very relevant for performance. If ever required, we could try doing a rather expensive rmap walk to collect precisely how often this folio page is mapped. Link: https://lkml.kernel.org/r/20250303163014.1128035-17-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/pagemap.rst | 7 ++++- fs/proc/internal.h | 35 ++++++++++++++++++++++++ fs/proc/page.c | 11 ++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst index a297e824f990..d6647daca912 100644 --- a/Documentation/admin-guide/mm/pagemap.rst +++ b/Documentation/admin-guide/mm/pagemap.rst @@ -43,7 +43,12 @@ There are four components to pagemap: skip over unmapped regions. * ``/proc/kpagecount``. This file contains a 64-bit count of the number of - times each page is mapped, indexed by PFN. + times each page is mapped, indexed by PFN. Some kernel configurations do + not track the precise number of times a page part of a larger allocation + (e.g., THP) is mapped. In these configurations, the average number of + mappings per page in this larger allocation is returned instead. However, + if any page of the large allocation is mapped, the returned value will + be at least 1. The page-types tool in the tools/mm directory can be used to query the number of times a page is mapped. diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 77a517f91821..5f5271852b53 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -188,6 +188,41 @@ static inline int folio_precise_page_mapcount(struct folio *folio, return mapcount; } +/** + * folio_average_page_mapcount() - Average number of mappings per page in this + * folio + * @folio: The folio. + * + * The average number of user page table entries that reference each page in + * this folio as tracked via the RMAP: either referenced directly (PTE) or + * as part of a larger area that covers this page (e.g., PMD). + * + * The average is calculated by rounding to the nearest integer; however, + * to avoid duplicated code in current callers, the average is at least + * 1 if any page of the folio is mapped. + * + * Returns: The average number of mappings per page in this folio. + */ +static inline int folio_average_page_mapcount(struct folio *folio) +{ + int mapcount, entire_mapcount, avg; + + if (!folio_test_large(folio)) + return atomic_read(&folio->_mapcount) + 1; + + mapcount = folio_large_mapcount(folio); + if (unlikely(mapcount <= 0)) + return 0; + entire_mapcount = folio_entire_mapcount(folio); + if (mapcount <= entire_mapcount) + return entire_mapcount; + mapcount -= entire_mapcount; + + /* Round to closest integer ... */ + avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio); + /* ... but return at least 1. */ + return max_t(int, avg + entire_mapcount, 1); +} /* * array.c */ diff --git a/fs/proc/page.c b/fs/proc/page.c index a55f5acefa97..23fc771100ae 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -67,9 +67,14 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, * memmaps that were actually initialized. */ page = pfn_to_online_page(pfn); - if (page) - mapcount = folio_precise_page_mapcount(page_folio(page), - page); + if (page) { + struct folio *folio = page_folio(page); + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + mapcount = folio_precise_page_mapcount(folio, page); + else + mapcount = folio_average_page_mapcount(folio); + } if (put_user(mapcount, out)) { ret = -EFAULT; From eb16876971ea8f26c5bf839120ff308246d9cc7b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:10 +0100 Subject: [PATCH 319/431] fs/proc/task_mmu: remove per-page mapcount dependency for PM_MMAP_EXCLUSIVE (CONFIG_NO_PAGE_MAPCOUNT) Let's implement an alternative when per-page mapcounts in large folios are no longer maintained -- soon with CONFIG_NO_PAGE_MAPCOUNT. PM_MMAP_EXCLUSIVE will now be set if folio_likely_mapped_shared() is true -- when the folio is considered "mapped shared", including when it once was "mapped shared" but no longer is, as documented. This might result in and under-indication of "exclusively mapped", which is considered better than over-indicating it: under-estimating the USS (Unique Set Size) is better than over-estimating it. As an alternative, we could simply remove that flag with CONFIG_NO_PAGE_MAPCOUNT completely, but there might be value to it. So, let's keep it like that and document the behavior. Link: https://lkml.kernel.org/r/20250303163014.1128035-18-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/pagemap.rst | 11 +++++++++++ fs/proc/task_mmu.c | 11 +++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst index d6647daca912..afce291649dd 100644 --- a/Documentation/admin-guide/mm/pagemap.rst +++ b/Documentation/admin-guide/mm/pagemap.rst @@ -38,6 +38,17 @@ There are four components to pagemap: precisely which pages are mapped (or in swap) and comparing mapped pages between processes. + Traditionally, bit 56 indicates that a page is mapped exactly once and bit + 56 is clear when a page is mapped multiple times, even when mapped in the + same process multiple times. In some kernel configurations, the semantics + for pages part of a larger allocation (e.g., THP) can differ: bit 56 is set + if all pages part of the corresponding large allocation are *certainly* + mapped in the same process, even if the page is mapped multiple times in that + process. Bit 56 is clear when any page page of the larger allocation + is *maybe* mapped in a different process. In some cases, a large allocation + might be treated as "maybe mapped by multiple processes" even though this + is no longer the case. + Efficient users of this interface will use ``/proc/pid/maps`` to determine which areas of memory are actually mapped and llseek to skip over unmapped regions. diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index d811b24db65b..8192cbe4f356 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1652,6 +1652,13 @@ static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm) return 0; } +static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page) +{ + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + return folio_precise_page_mapcount(folio, page) == 1; + return !folio_maybe_mapped_shared(folio); +} + static int pagemap_pte_hole(unsigned long start, unsigned long end, __always_unused int depth, struct mm_walk *walk) { @@ -1742,7 +1749,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (!folio_test_anon(folio)) flags |= PM_FILE; if ((flags & PM_PRESENT) && - folio_precise_page_mapcount(folio, page) == 1) + __folio_page_mapped_exclusively(folio, page)) flags |= PM_MMAP_EXCLUSIVE; } if (vma->vm_flags & VM_SOFTDIRTY) @@ -1817,7 +1824,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, pagemap_entry_t pme; if (folio && (flags & PM_PRESENT) && - folio_precise_page_mapcount(folio, page + idx) == 1) + __folio_page_mapped_exclusively(folio, page)) cur_flags |= PM_MMAP_EXCLUSIVE; pme = make_pme(frame, cur_flags); From 7a34ae14491e03b64c6002abd23a8920144ae7d8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:11 +0100 Subject: [PATCH 320/431] fs/proc/task_mmu: remove per-page mapcount dependency for "mapmax" (CONFIG_NO_PAGE_MAPCOUNT) Let's implement an alternative when per-page mapcounts in large folios are no longer maintained -- soon with CONFIG_NO_PAGE_MAPCOUNT. For calculating "mapmax", we now use the average per-page mapcount in a large folio instead of the per-page mapcount. For hugetlb folios and folios that are not partially mapped into MMs, there is no change. Likely, this change will not matter much in practice, and an alternative might be to simple remove this stat with CONFIG_NO_PAGE_MAPCOUNT. However, there might be value to it, so let's keep it like that and document the behavior. Link: https://lkml.kernel.org/r/20250303163014.1128035-19-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 5 +++++ fs/proc/task_mmu.c | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 09f0aed5a08b..1aa190017f79 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -686,6 +686,11 @@ Where: node locality page counters (N0 == node0, N1 == node1, ...) and the kernel page size, in KB, that is backing the mapping up. +Note that some kernel configurations do not track the precise number of times +a page part of a larger allocation (e.g., THP) is mapped. In these +configurations, "mapmax" might corresponds to the average number of mappings +per page in such a larger allocation instead. + 1.2 Kernel data --------------- diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8192cbe4f356..dc4f819d1549 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2863,7 +2863,12 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, unsigned long nr_pages) { struct folio *folio = page_folio(page); - int count = folio_precise_page_mapcount(folio, page); + int count; + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + count = folio_precise_page_mapcount(folio, page); + else + count = folio_average_page_mapcount(folio); md->pages += nr_pages; if (pte_dirty || folio_test_dirty(folio)) From 6dd55dd1c55553f42605ebf0bdc8def5f2fd9309 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:12 +0100 Subject: [PATCH 321/431] fs/proc/task_mmu: remove per-page mapcount dependency for smaps/smaps_rollup (CONFIG_NO_PAGE_MAPCOUNT) Let's implement an alternative when per-page mapcounts in large folios are no longer maintained -- soon with CONFIG_NO_PAGE_MAPCOUNT. When computing the output for smaps / smaps_rollups, in particular when calculating the USS (Unique Set Size) and the PSS (Proportional Set Size), we still rely on per-page mapcounts. To determine private vs. shared, we'll use folio_likely_mapped_shared(), similar to how we handle PM_MMAP_EXCLUSIVE. Similarly, we might now under-estimate the USS and count pages towards "shared" that are actually "private" ("exclusively mapped"). When calculating the PSS, we'll now also use the average per-page mapcount for large folios: this can result in both, an over-estimation and an under-estimation of the PSS. The difference is not expected to matter much in practice, but we'll have to learn as we go. We can now provide folio_precise_page_mapcount() only with CONFIG_PAGE_MAPCOUNT, and remove one of the last users of per-page mapcounts when CONFIG_NO_PAGE_MAPCOUNT is enabled. Document the new behavior. Link: https://lkml.kernel.org/r/20250303163014.1128035-20-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 22 +++++++++++++++++++--- fs/proc/internal.h | 8 ++++++++ fs/proc/task_mmu.c | 17 +++++++++++++++-- 3 files changed, 42 insertions(+), 5 deletions(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 1aa190017f79..c9e62e8e0685 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -502,9 +502,25 @@ process, its PSS will be 1500. "Pss_Dirty" is the portion of PSS which consists of dirty pages. ("Pss_Clean" is not included, but it can be calculated by subtracting "Pss_Dirty" from "Pss".) -Note that even a page which is part of a MAP_SHARED mapping, but has only -a single pte mapped, i.e. is currently used by only one process, is accounted -as private and not as shared. +Traditionally, a page is accounted as "private" if it is mapped exactly once, +and a page is accounted as "shared" when mapped multiple times, even when +mapped in the same process multiple times. Note that this accounting is +independent of MAP_SHARED. + +In some kernel configurations, the semantics of pages part of a larger +allocation (e.g., THP) can differ: a page is accounted as "private" if all +pages part of the corresponding large allocation are *certainly* mapped in the +same process, even if the page is mapped multiple times in that process. A +page is accounted as "shared" if any page page of the larger allocation +is *maybe* mapped in a different process. In some cases, a large allocation +might be treated as "maybe mapped by multiple processes" even though this +is no longer the case. + +Some kernel configurations do not track the precise number of times a page part +of a larger allocation is mapped. In this case, when calculating the PSS, the +average number of mappings per page in this larger allocation might be used +as an approximation for the number of mappings of a page. The PSS calculation +will be imprecise in this case. "Referenced" indicates the amount of memory currently marked as referenced or accessed. diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5f5271852b53..96122e91c645 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -157,6 +157,7 @@ unsigned name_to_int(const struct qstr *qstr); /* Worst case buffer size needed for holding an integer. */ #define PROC_NUMBUF 13 +#ifdef CONFIG_PAGE_MAPCOUNT /** * folio_precise_page_mapcount() - Number of mappings of this folio page. * @folio: The folio. @@ -187,6 +188,13 @@ static inline int folio_precise_page_mapcount(struct folio *folio, return mapcount; } +#else /* !CONFIG_PAGE_MAPCOUNT */ +static inline int folio_precise_page_mapcount(struct folio *folio, + struct page *page) +{ + BUILD_BUG(); +} +#endif /* CONFIG_PAGE_MAPCOUNT */ /** * folio_average_page_mapcount() - Average number of mappings per page in this diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dc4f819d1549..994cde10e3f4 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -707,6 +707,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, struct folio *folio = page_folio(page); int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; + bool exclusive; + int mapcount; /* * First accumulate quantities that depend only on |size| and the type @@ -747,18 +749,29 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, dirty, locked, present); return; } + + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + mapcount = folio_average_page_mapcount(folio); + exclusive = !folio_maybe_mapped_shared(folio); + } + /* * We obtain a snapshot of the mapcount. Without holding the folio lock * this snapshot can be slightly wrong as we cannot always read the * mapcount atomically. */ for (i = 0; i < nr; i++, page++) { - int mapcount = folio_precise_page_mapcount(folio, page); unsigned long pss = PAGE_SIZE << PSS_SHIFT; + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { + mapcount = folio_precise_page_mapcount(folio, page); + exclusive = mapcount < 2; + } + if (mapcount >= 2) pss /= mapcount; smaps_page_accumulate(mss, folio, PAGE_SIZE, pss, - dirty, locked, mapcount < 2); + dirty, locked, exclusive); } } From 749492229e3bd6222dda7267b8244135229d1fd8 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:30:13 +0100 Subject: [PATCH 322/431] mm: stop maintaining the per-page mapcount of large folios (CONFIG_NO_PAGE_MAPCOUNT) Everything is in place to stop using the per-page mapcounts in large folios: the mapcount of tail pages will always be logically 0 (-1 value), just like it currently is for hugetlb folios already, and the page mapcount of the head page is either 0 (-1 value) or contains a page type (e.g., hugetlb). Maintaining _nr_pages_mapped without per-page mapcounts is impossible, so that one also has to go with CONFIG_NO_PAGE_MAPCOUNT. There are two remaining implications: (1) Per-node, per-cgroup and per-lruvec stats of "NR_ANON_MAPPED" ("mapped anonymous memory") and "NR_FILE_MAPPED" ("mapped file memory"): As soon as any page of the folio is mapped -- folio_mapped() -- we now account the complete folio as mapped. Once the last page is unmapped -- !folio_mapped() -- we account the complete folio as unmapped. This implies that ... * "AnonPages" and "Mapped" in /proc/meminfo and /sys/devices/system/node/*/meminfo * cgroup v2: "anon" and "file_mapped" in "memory.stat" and "memory.numa_stat" * cgroup v1: "rss" and "mapped_file" in "memory.stat" and "memory.numa_stat ... can now appear higher than before. But note that these folios do consume that memory, simply not all pages are actually currently mapped. It's worth nothing that other accounting in the kernel (esp. cgroup charging on allocation) is not affected by this change. [why oh why is "anon" called "rss" in cgroup v1] (2) Detecting partial mappings Detecting whether anon THPs are partially mapped gets a bit more unreliable. As long as a single MM maps such a large folio ("exclusively mapped"), we can reliably detect it. Especially before fork() / after a short-lived child process quit, we will detect partial mappings reliably, which is the common case. In essence, if the average per-page mapcount in an anon THP is < 1, we know for sure that we have a partial mapping. However, as soon as multiple MMs are involved, we might miss detecting partial mappings: this might be relevant with long-lived child processes. If we have a fully-mapped anon folio before fork(), once our child processes and our parent all unmap (zap/COW) the same pages (but not the complete folio), we might not detect the partial mapping. However, once the child processes quit we would detect the partial mapping. How relevant this case is in practice remains to be seen. Swapout/migration will likely mitigate this. In the future, RMAP walkers could check for that for that case (e.g., when collecting access bits during reclaim) and simply flag them for deferred-splitting. Link: https://lkml.kernel.org/r/20250303163014.1128035-21-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- .../admin-guide/cgroup-v1/memory.rst | 4 + Documentation/admin-guide/cgroup-v2.rst | 10 ++- Documentation/filesystems/proc.rst | 10 ++- Documentation/mm/transhuge.rst | 31 +++++-- include/linux/rmap.h | 35 ++++++-- mm/internal.h | 5 +- mm/page_alloc.c | 3 +- mm/rmap.c | 80 +++++++++++++++++-- 8 files changed, 150 insertions(+), 28 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 286d16fc22eb..53cf081b22e8 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -609,6 +609,10 @@ memory.stat file includes following statistics: 'rss + mapped_file" will give you resident set size of cgroup. + Note that some kernel configurations might account complete larger + allocations (e.g., THP) towards 'rss' and 'mapped_file', even if + only some, but not all that memory is mapped. + (Note: file and shmem may be shared among other cgroups. In that case, mapped_file is accounted only when the memory cgroup is owner of page cache.) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index cb1b4e759b7e..f8a894a16307 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1440,7 +1440,10 @@ The following nested keys are defined. anon Amount of memory used in anonymous mappings such as - brk(), sbrk(), and mmap(MAP_ANONYMOUS) + brk(), sbrk(), and mmap(MAP_ANONYMOUS). Note that + some kernel configurations might account complete larger + allocations (e.g., THP) if only some, but not all the + memory of such an allocation is mapped anymore. file Amount of memory used to cache filesystem data, @@ -1483,7 +1486,10 @@ The following nested keys are defined. Amount of application memory swapped out to zswap. file_mapped - Amount of cached filesystem data mapped with mmap() + Amount of cached filesystem data mapped with mmap(). Note + that some kernel configurations might account complete + larger allocations (e.g., THP) if only some, but not + not all the memory of such an allocation is mapped. file_dirty Amount of cached filesystem data that was modified but diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index c9e62e8e0685..3c37b248fc4f 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1153,9 +1153,15 @@ Dirty Writeback Memory which is actively being written back to the disk AnonPages - Non-file backed pages mapped into userspace page tables + Non-file backed pages mapped into userspace page tables. Note that + some kernel configurations might consider all pages part of a + larger allocation (e.g., THP) as "mapped", as soon as a single + page is mapped. Mapped - files which have been mmapped, such as libraries + files which have been mmapped, such as libraries. Note that some + kernel configurations might consider all pages part of a larger + allocation (e.g., THP) as "mapped", as soon as a single page is + mapped. Shmem Total memory used by shared memory (shmem) and tmpfs KReclaimable diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index baa17d718a76..0e7f8e4cd2e3 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -116,23 +116,28 @@ pages: succeeds on tail pages. - map/unmap of a PMD entry for the whole THP increment/decrement - folio->_entire_mapcount, increment/decrement folio->_large_mapcount - and also increment/decrement folio->_nr_pages_mapped by ENTIRELY_MAPPED - when _entire_mapcount goes from -1 to 0 or 0 to -1. + folio->_entire_mapcount and folio->_large_mapcount. We also maintain the two slots for tracking MM owners (MM ID and corresponding mapcount), and the current status ("maybe mapped shared" vs. "mapped exclusively"). + With CONFIG_PAGE_MAPCOUNT, we also increment/decrement + folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes + from -1 to 0 or 0 to -1. + - map/unmap of individual pages with PTE entry increment/decrement - page->_mapcount, increment/decrement folio->_large_mapcount and also - increment/decrement folio->_nr_pages_mapped when page->_mapcount goes - from -1 to 0 or 0 to -1 as this counts the number of pages mapped by PTE. + folio->_large_mapcount. We also maintain the two slots for tracking MM owners (MM ID and corresponding mapcount), and the current status ("maybe mapped shared" vs. "mapped exclusively"). + With CONFIG_PAGE_MAPCOUNT, we also increment/decrement + page->_mapcount and increment/decrement folio->_nr_pages_mapped when + page->_mapcount goes from -1 to 0 or 0 to -1 as this counts the number + of pages mapped by PTE. + split_huge_page internally has to distribute the refcounts in the head page to the tail pages before clearing all PG_head/tail bits from the page structures. It can be done easily for refcounts taken by page table @@ -159,8 +164,8 @@ clear where references should go after split: it will stay on the head page. Note that split_huge_pmd() doesn't have any limitations on refcounting: pmd can be split at any point and never fails. -Partial unmap and deferred_split_folio() -======================================== +Partial unmap and deferred_split_folio() (anon THP only) +======================================================== Unmapping part of THP (with munmap() or other way) is not going to free memory immediately. Instead, we detect that a subpage of THP is not in use @@ -175,3 +180,13 @@ a THP crosses a VMA boundary. The function deferred_split_folio() is used to queue a folio for splitting. The splitting itself will happen when we get memory pressure via shrinker interface. + +With CONFIG_PAGE_MAPCOUNT, we reliably detect partial mappings based on +folio->_nr_pages_mapped. + +With CONFIG_NO_PAGE_MAPCOUNT, we detect partial mappings based on the +average per-page mapcount in a THP: if the average is < 1, an anon THP is +certainly partially mapped. As long as only a single process maps a THP, +this detection is reliable. With long-running child processes, there can +be scenarios where partial mappings can currently not be detected, and +might need asynchronous detection during memory reclaim in the future. diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c131b0efff0f..6b82b618846e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -240,7 +240,7 @@ static __always_inline void folio_set_large_mapcount(struct folio *folio, folio_set_mm_id(folio, 0, vma->vm_mm->mm_id); } -static __always_inline void folio_add_large_mapcount(struct folio *folio, +static __always_inline int folio_add_return_large_mapcount(struct folio *folio, int diff, struct vm_area_struct *vma) { const mm_id_t mm_id = vma->vm_mm->mm_id; @@ -286,9 +286,11 @@ static __always_inline void folio_add_large_mapcount(struct folio *folio, folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT; } folio_unlock_large_mapcount(folio); + return new_mapcount_val + 1; } +#define folio_add_large_mapcount folio_add_return_large_mapcount -static __always_inline void folio_sub_large_mapcount(struct folio *folio, +static __always_inline int folio_sub_return_large_mapcount(struct folio *folio, int diff, struct vm_area_struct *vma) { const mm_id_t mm_id = vma->vm_mm->mm_id; @@ -331,7 +333,9 @@ static __always_inline void folio_sub_large_mapcount(struct folio *folio, folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT; out: folio_unlock_large_mapcount(folio); + return new_mapcount_val + 1; } +#define folio_sub_large_mapcount folio_sub_return_large_mapcount #else /* !CONFIG_MM_ID */ /* * See __folio_rmap_sanity_checks(), we might map large folios even without @@ -350,17 +354,33 @@ static inline void folio_add_large_mapcount(struct folio *folio, atomic_add(diff, &folio->_large_mapcount); } +static inline int folio_add_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + BUILD_BUG(); +} + static inline void folio_sub_large_mapcount(struct folio *folio, int diff, struct vm_area_struct *vma) { atomic_sub(diff, &folio->_large_mapcount); } + +static inline int folio_sub_return_large_mapcount(struct folio *folio, + int diff, struct vm_area_struct *vma) +{ + BUILD_BUG(); +} #endif /* CONFIG_MM_ID */ #define folio_inc_large_mapcount(folio, vma) \ folio_add_large_mapcount(folio, 1, vma) +#define folio_inc_return_large_mapcount(folio, vma) \ + folio_add_return_large_mapcount(folio, 1, vma) #define folio_dec_large_mapcount(folio, vma) \ folio_sub_large_mapcount(folio, 1, vma) +#define folio_dec_return_large_mapcount(folio, vma) \ + folio_sub_return_large_mapcount(folio, 1, vma) /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; @@ -538,9 +558,11 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, break; } - do { - atomic_inc(&page->_mapcount); - } while (page++, --nr_pages > 0); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { + do { + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + } folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; case RMAP_LEVEL_PMD: @@ -638,7 +660,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, do { if (PageAnonExclusive(page)) ClearPageAnonExclusive(page); - atomic_inc(&page->_mapcount); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); folio_add_large_mapcount(folio, orig_nr_pages, dst_vma); break; diff --git a/mm/internal.h b/mm/internal.h index 04724971379c..558c8e2a3d94 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -125,6 +125,8 @@ void page_writeback_init(void); */ static inline int folio_nr_pages_mapped(const struct folio *folio) { + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) + return -1; return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; } @@ -762,7 +764,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order) folio_set_order(folio, order); atomic_set(&folio->_large_mapcount, -1); - atomic_set(&folio->_nr_pages_mapped, 0); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, 0); if (IS_ENABLED(CONFIG_MM_ID)) { folio->_mm_ids = 0; folio->_mm_id_mapcount[0] = -1; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c8daa3e64266..2c6ae7e5aaad 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -951,7 +951,8 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) bad_page(page, "nonzero large_mapcount"); goto out; } - if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) && + unlikely(atomic_read(&folio->_nr_pages_mapped))) { bad_page(page, "nonzero nr_pages_mapped"); goto out; } diff --git a/mm/rmap.c b/mm/rmap.c index 8de415157bc8..67bb273dfb80 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1258,6 +1258,16 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, break; } + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); + if (nr == orig_nr_pages) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + do { first += atomic_inc_and_test(&page->_mapcount); } while (page++, --nr_pages > 0); @@ -1271,6 +1281,18 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: first = atomic_inc_and_test(&folio->_entire_mapcount); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + if (level == RMAP_LEVEL_PMD && first) + *nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_inc_return_large_mapcount(folio, vma); + if (nr == 1) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { @@ -1436,13 +1458,23 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, break; } } + + VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && + atomic_read(&folio->_mapcount) > 0, folio); for (i = 0; i < nr_pages; i++) { struct page *cur_page = page + i; - /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ - VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || - (folio_test_large(folio) && - folio_entire_mapcount(folio) > 1)) && + VM_WARN_ON_FOLIO(folio_test_large(folio) && + folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(cur_page), folio); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) + continue; + + /* + * While PTE-mapping a THP we have a PMD and a PTE + * mapping. + */ + VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && PageAnonExclusive(cur_page), folio); } @@ -1548,20 +1580,23 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); - /* increment count (starts at -1) */ - atomic_set(&page->_mapcount, 0); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); if (exclusive) SetPageAnonExclusive(page); } folio_set_large_mapcount(folio, nr, vma); - atomic_set(&folio->_nr_pages_mapped, nr); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, nr); } else { nr = folio_large_nr_pages(folio); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_set_large_mapcount(folio, 1, vma); - atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); if (exclusive) SetPageAnonExclusive(&folio->page); nr_pmdmapped = nr; @@ -1665,6 +1700,19 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; } + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_nr_pages(folio); + } else { + partially_mapped = nr < folio_large_nr_pages(folio) && + !folio_entire_mapcount(folio); + nr = 0; + } + break; + } + folio_sub_large_mapcount(folio, nr_pages, vma); do { last += atomic_add_negative(-1, &page->_mapcount); @@ -1678,6 +1726,22 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, break; case RMAP_LEVEL_PMD: case RMAP_LEVEL_PUD: + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + last = atomic_add_negative(-1, &folio->_entire_mapcount); + if (level == RMAP_LEVEL_PMD && last) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_dec_return_large_mapcount(folio, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = last && + nr < folio_large_nr_pages(folio); + nr = 0; + } + break; + } + folio_dec_large_mapcount(folio, vma); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { From ab71d2d301211a45420b1fb085072c79ea6a8027 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:26 -0800 Subject: [PATCH 323/431] mm/damon/sysfs-schemes: let damon_sysfs_scheme_set_filters() be used for different named directories Patch series "mm/damon: add sysfs dirs for managing DAMOS filters based on handling layers". DAMOS filters are categorized into two groups based on their handling layers, namely core and operations layers. The categorization affects when each filter is evaluated. Core layer handled filters are evaluated first. The order meant nothing before, but introduction of allow filters changed that. DAMOS sysfs interface provides single directory for filters, namely 'filters'. Users can install any filters in any order there. DAMON will internally categorize those into core and operations layer handled ones, and apply the evaluation order rule. The ordering rule is clearly documented. But the interface could still confuse users since it is allowed to install filters on the directory in mixed ways. Add two sysfs directories for managing filters by handling layers, namely 'core_filters' and 'ops_filters' for filters that handled by core and operations layer, respectively. Those are avoided to be used for installing filters that not handled by the assumed layers. For backward compatibility, keep 'filters' directory with its curernt behavior. Filters installed in the directory will be added to DAMON after those of 'core_filters' and 'ops_filters' directories, with the automatic categorizations. Also recommend users to use the new directories while noticing 'filters' directory could be deprecated in future on the usage documents. Note that new directories provide all features that were provided with 'filters', but just in a more clear way. Deprecating 'filters' in future will hence not make an irreversal feature loss. This patch (of 8): damon_sysfs_scheme_set_filters() is using a hard-coded directory name, "filters". Refactor for general named directories of same files hierarchy, to use from upcoming changes for adding sibling directories having files same to those of "filters", and named as "core_filters" and "ops_filters". [arnd@arndb.deL avoid Wformat-security warning] Link: https://lkml.kernel.org/r/20250310135142.4176976-1-arnd@kernel.org Link: https://lkml.kernel.org/r/20250305222733.59089-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250305222733.59089-2-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Arnd Bergmann Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 66a1c46cee84..d769b24f90a4 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1604,7 +1604,9 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) return err; } -static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) +static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme, + const char *name, + struct damon_sysfs_scheme_filters **filters_ptr) { struct damon_sysfs_scheme_filters *filters = damon_sysfs_scheme_filters_alloc(); @@ -1614,11 +1616,11 @@ static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme) return -ENOMEM; err = kobject_init_and_add(&filters->kobj, &damon_sysfs_scheme_filters_ktype, &scheme->kobj, - "filters"); + "%s", name); if (err) kobject_put(&filters->kobj); else - scheme->filters = filters; + *filters_ptr = filters; return err; } @@ -1670,7 +1672,8 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) err = damon_sysfs_scheme_set_watermarks(scheme); if (err) goto put_quotas_access_pattern_out; - err = damon_sysfs_scheme_set_filters(scheme); + err = damon_sysfs_scheme_set_filters(scheme, "filters", + &scheme->filters); if (err) goto put_watermarks_quotas_access_pattern_out; err = damon_sysfs_scheme_set_stats(scheme); From db2e76ceb40b85f5d6302b05f40dd99955b938f4 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:27 -0800 Subject: [PATCH 324/431] mm/damon/sysfs-schemes: implement core_filters and ops_filters directories Implement two DAMOS sysfs directories for managing core and operations layer handled filters separately. Those are named as 'core_filters' and 'ops_filters', and have files hierarchy same to 'filters'. This commit is only populating and cleaning up the directories, not really connecting the files with DAMON. Following changes will make the connections. Link: https://lkml.kernel.org/r/20250305222733.59089-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index d769b24f90a4..30a7f288bd4c 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1504,6 +1504,8 @@ struct damon_sysfs_scheme { unsigned long apply_interval_us; struct damon_sysfs_quotas *quotas; struct damon_sysfs_watermarks *watermarks; + struct damon_sysfs_scheme_filters *core_filters; + struct damon_sysfs_scheme_filters *ops_filters; struct damon_sysfs_scheme_filters *filters; struct damon_sysfs_stats *stats; struct damon_sysfs_scheme_regions *tried_regions; @@ -1624,6 +1626,33 @@ static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme, return err; } +static int damos_sysfs_set_filter_dirs(struct damon_sysfs_scheme *scheme) +{ + int err; + + err = damon_sysfs_scheme_set_filters(scheme, "filters", + &scheme->filters); + if (err) + return err; + err = damon_sysfs_scheme_set_filters(scheme, "core_filters", + &scheme->core_filters); + if (err) + goto put_filters_out; + err = damon_sysfs_scheme_set_filters(scheme, "ops_filters", + &scheme->ops_filters); + if (err) + goto put_core_filters_out; + return 0; + +put_core_filters_out: + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; +put_filters_out: + kobject_put(&scheme->filters->kobj); + scheme->filters = NULL; + return err; +} + static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme) { struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc(); @@ -1672,8 +1701,7 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) err = damon_sysfs_scheme_set_watermarks(scheme); if (err) goto put_quotas_access_pattern_out; - err = damon_sysfs_scheme_set_filters(scheme, "filters", - &scheme->filters); + err = damos_sysfs_set_filter_dirs(scheme); if (err) goto put_watermarks_quotas_access_pattern_out; err = damon_sysfs_scheme_set_stats(scheme); @@ -1688,6 +1716,10 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) kobject_put(&scheme->tried_regions->kobj); scheme->tried_regions = NULL; put_filters_watermarks_quotas_access_pattern_out: + kobject_put(&scheme->ops_filters->kobj); + scheme->ops_filters = NULL; + kobject_put(&scheme->core_filters->kobj); + scheme->core_filters = NULL; kobject_put(&scheme->filters->kobj); scheme->filters = NULL; put_watermarks_quotas_access_pattern_out: @@ -1711,6 +1743,10 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) kobject_put(&scheme->watermarks->kobj); damon_sysfs_scheme_filters_rm_dirs(scheme->filters); kobject_put(&scheme->filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->core_filters); + kobject_put(&scheme->core_filters->kobj); + damon_sysfs_scheme_filters_rm_dirs(scheme->ops_filters); + kobject_put(&scheme->ops_filters->kobj); kobject_put(&scheme->stats->kobj); damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions); kobject_put(&scheme->tried_regions->kobj); From 968cbea1bb0e4f608f4c87a3c7d67ef9fd720c33 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:28 -0800 Subject: [PATCH 325/431] mm/damon/sysfs-schemes: commit filters in {core,ops}_filters directories Connect user inputs for files under core_filters and ops_filters with DAMON, so that the files can really function. Becasuse {core,ops}_filters are easier to be managed in terms of expecting filters evaluation order, add filters in {core,ops}_filters before 'filters' directory. Link: https://lkml.kernel.org/r/20250305222733.59089-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 30a7f288bd4c..65c6091cd879 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -2143,8 +2143,6 @@ static struct damos *damon_sysfs_mk_scheme( struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; - struct damon_sysfs_scheme_filters *sysfs_filters = - sysfs_scheme->filters; struct damos *scheme; int err; @@ -2184,7 +2182,17 @@ static struct damos *damon_sysfs_mk_scheme( return NULL; } - err = damon_sysfs_add_scheme_filters(scheme, sysfs_filters); + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->core_filters); + if (err) { + damon_destroy_scheme(scheme); + return NULL; + } + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->ops_filters); + if (err) { + damon_destroy_scheme(scheme); + return NULL; + } + err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->filters); if (err) { damon_destroy_scheme(scheme); return NULL; From f7f0d88b7d6da29d2b073740aa130685f0e18609 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:29 -0800 Subject: [PATCH 326/431] mm/damon/core: expose damos_filter_for_ops() to DAMON kernel API callers damos_filter_for_ops() can be useful to avoid putting wrong type of filters in wrong place. Make it be exposed to DAMON kernel API callers. Link: https://lkml.kernel.org/r/20250305222733.59089-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 1 + mm/damon/core.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 52559475dbe7..eed008b64a23 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -894,6 +894,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed, struct damos_filter *damos_new_filter(enum damos_filter_type type, bool matching, bool allow); void damos_add_filter(struct damos *s, struct damos_filter *f); +bool damos_filter_for_ops(enum damos_filter_type type); void damos_destroy_filter(struct damos_filter *f); struct damos_quota_goal *damos_new_quota_goal( diff --git a/mm/damon/core.c b/mm/damon/core.c index 511c464adcc5..ebbb22840435 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -281,7 +281,14 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return filter; } -static bool damos_filter_for_ops(enum damos_filter_type type) +/** + * damos_filter_for_ops() - Return if the filter is ops-hndled one. + * @type: type of the filter. + * + * Return: true if the filter of @type needs to be handled by ops layer, false + * otherwise. + */ +bool damos_filter_for_ops(enum damos_filter_type type) { switch (type) { case DAMOS_FILTER_TYPE_ADDR: From 9f643a9854df682548bbcdf68cbd89e74cbfd6dc Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:30 -0800 Subject: [PATCH 327/431] mm/damon/sysfs-schemes: record filters of which layer should be added to the given filters directory Unlike their name and assumed purposes, {core,ops}_filters DAMOS sysfs directories are allowing installing any type of filters. As a first step for preventing such wrong installments, add information about filters that handled by what layer should the installed to the given filters directory in the DAMOS sysfs internal data structures. Link: https://lkml.kernel.org/r/20250305222733.59089-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 46 +++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 65c6091cd879..e4e36c34ec0e 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -309,8 +309,18 @@ static const struct kobj_type damon_sysfs_stats_ktype = { * filter directory */ +/* + * enum damos_sysfs_filter_handle_layer - Layers handling filters of a dir. + */ +enum damos_sysfs_filter_handle_layer { + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, +}; + struct damon_sysfs_scheme_filter { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; enum damos_filter_type type; bool matching; bool allow; @@ -320,9 +330,15 @@ struct damon_sysfs_scheme_filter { int target_idx; }; -static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void) +static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc( + enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + struct damon_sysfs_scheme_filter *filter; + + filter = kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + if (filter) + filter->handle_layer = layer; + return filter; } /* Should match with enum damos_filter_type */ @@ -595,14 +611,20 @@ static const struct kobj_type damon_sysfs_scheme_filter_ktype = { struct damon_sysfs_scheme_filters { struct kobject kobj; + enum damos_sysfs_filter_handle_layer handle_layer; struct damon_sysfs_scheme_filter **filters_arr; int nr; }; static struct damon_sysfs_scheme_filters * -damon_sysfs_scheme_filters_alloc(void) +damon_sysfs_scheme_filters_alloc(enum damos_sysfs_filter_handle_layer layer) { - return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + struct damon_sysfs_scheme_filters *filters; + + filters = kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + if (filters) + filters->handle_layer = layer; + return filters; } static void damon_sysfs_scheme_filters_rm_dirs( @@ -635,7 +657,8 @@ static int damon_sysfs_scheme_filters_add_dirs( filters->filters_arr = filters_arr; for (i = 0; i < nr_filters; i++) { - filter = damon_sysfs_scheme_filter_alloc(); + filter = damon_sysfs_scheme_filter_alloc( + filters->handle_layer); if (!filter) { damon_sysfs_scheme_filters_rm_dirs(filters); return -ENOMEM; @@ -1607,11 +1630,11 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) } static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme, - const char *name, + enum damos_sysfs_filter_handle_layer layer, const char *name, struct damon_sysfs_scheme_filters **filters_ptr) { struct damon_sysfs_scheme_filters *filters = - damon_sysfs_scheme_filters_alloc(); + damon_sysfs_scheme_filters_alloc(layer); int err; if (!filters) @@ -1630,15 +1653,18 @@ static int damos_sysfs_set_filter_dirs(struct damon_sysfs_scheme *scheme) { int err; - err = damon_sysfs_scheme_set_filters(scheme, "filters", + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, "filters", &scheme->filters); if (err) return err; - err = damon_sysfs_scheme_set_filters(scheme, "core_filters", + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, "core_filters", &scheme->core_filters); if (err) goto put_filters_out; - err = damon_sysfs_scheme_set_filters(scheme, "ops_filters", + err = damon_sysfs_scheme_set_filters(scheme, + DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, "ops_filters", &scheme->ops_filters); if (err) goto put_core_filters_out; From ae8fd5b6666b0d99f485748b2b2259de1a7458dc Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:31 -0800 Subject: [PATCH 328/431] mm/damon/sysfs-schemes: return error when for attempts to install filters on wrong sysfs directory Return error if the user tries to install a DAMOS filter on DAMOS filters sysfs directory that assumed to be used for filters that handled by a DAMON layer that not same to that for the installing filter. Link: https://lkml.kernel.org/r/20250305222733.59089-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index e4e36c34ec0e..1895d2d2c295 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -362,6 +362,23 @@ static ssize_t type_show(struct kobject *kobj, damon_sysfs_scheme_filter_type_strs[filter->type]); } +static bool damos_sysfs_scheme_filter_valid_type( + enum damos_sysfs_filter_handle_layer layer, + enum damos_filter_type type) +{ + switch (layer) { + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH: + return true; + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE: + return !damos_filter_for_ops(type); + case DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS: + return damos_filter_for_ops(type); + default: + break; + } + return false; +} + static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -373,6 +390,9 @@ static ssize_t type_store(struct kobject *kobj, for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) { if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[ type])) { + if (!damos_sysfs_scheme_filter_valid_type( + filter->handle_layer, type)) + break; filter->type = type; ret = count; break; From 899e4c14afa640b8f7374e162f2336b67f07123d Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:32 -0800 Subject: [PATCH 329/431] Docs/ABI/damon: document {core,ops}_filters directories Document the new DAMOS filters sysfs directories on ABI doc. Link: https://lkml.kernel.org/r/20250305222733.59089-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-damon | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index 76da77d7f7b6..293197f180ad 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -409,6 +409,22 @@ Description: Writing 'Y' or 'N' to this file sets whether to allow or reject applying the scheme's action to the memory that satisfies the 'type' and the 'matching' of the directory. +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//core_filters +Date: Feb 2025 +Contact: SeongJae Park +Description: Directory for DAMON core layer-handled DAMOS filters. Files + under this directory works same to those of + /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters + directory. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//ops_filters +Date: Feb 2025 +Contact: SeongJae Park +Description: Directory for DAMON operations set layer-handled DAMOS filters. + Files under this directory works same to those of + /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters + directory. + What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//stats/nr_tried Date: Mar 2022 Contact: SeongJae Park From 114b480877698f7835a5ba95c6fbd97b63b119f6 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 5 Mar 2025 14:27:33 -0800 Subject: [PATCH 330/431] Docs/admin-guide/mm/damon/usage: update for {core,ops}_filters directories Document {core,ops}_filters directories on usage document. Link: https://lkml.kernel.org/r/20250305222733.59089-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 31 ++++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index de549dd18107..ced2013db3df 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -83,7 +83,7 @@ comma (","). │ │ │ │ │ │ │ │ :ref:`goals `/nr_goals │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value │ │ │ │ │ │ │ :ref:`watermarks `/metric,interval_us,high,mid,low - │ │ │ │ │ │ │ :ref:`filters `/nr_filters + │ │ │ │ │ │ │ :ref:`{core_,ops_,}filters `/nr_filters │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes @@ -307,9 +307,10 @@ to ``N-1``. Each directory represents each DAMON-based operation scheme. schemes// ------------ -In each scheme directory, five directories (``access_pattern``, ``quotas``, -``watermarks``, ``filters``, ``stats``, and ``tried_regions``) and three files -(``action``, ``target_nid`` and ``apply_interval``) exist. +In each scheme directory, seven directories (``access_pattern``, ``quotas``, +``watermarks``, ``core_filters``, ``ops_filters``, ``filters``, ``stats``, and +``tried_regions``) and three files (``action``, ``target_nid`` and +``apply_interval``) exist. The ``action`` file is for setting and getting the scheme's :ref:`action `. The keywords that can be written to and read @@ -420,13 +421,24 @@ The ``interval`` should written in microseconds unit. .. _sysfs_filters: -schemes//filters/ --------------------- +schemes//{core\_,ops\_,}filters/ +----------------------------------- -The directory for the :ref:`filters ` of the given +Directories for :ref:`filters ` of the given DAMON-based operation scheme. -In the beginning, this directory has only one file, ``nr_filters``. Writing a +``core_filters`` and ``ops_filters`` directories are for the filters handled by +the DAMON core layer and operations set layer, respectively. ``filters`` +directory can be used for installing filters regardless of their handled +layers. Filters that requested by ``core_filters`` and ``ops_filters`` will be +installed before those of ``filters``. All three directories have same files. + +Use of ``filters`` directory can make expecting evaluation orders of given +filters with the files under directory bit confusing. Users are hence +recommended to use ``core_filters`` and ``ops_filters`` directories. The +``filters`` directory could be deprecated in future. + +In the beginning, the directory has only one file, ``nr_filters``. Writing a number (``N``) to the file creates the number of child directories named ``0`` to ``N-1``. Each directory represents each filter. The filters are evaluated in the numeric order. @@ -435,7 +447,7 @@ Each filter directory contains nine files, namely ``type``, ``matching``, ``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, ``min``, ``max`` and ``target_idx``. To ``type`` file, you can write the type of the filter. Refer to :ref:`the design doc ` for available type -names and their meanings. +names, their meaning and on what layer those are handled. For ``memcg`` type, you can specify the memory cgroup of the interest by writing the path of the memory cgroup from the cgroups mount point to @@ -455,6 +467,7 @@ the ``type`` and ``matching`` should be allowed or not. For example, below restricts a DAMOS action to be applied to only non-anonymous pages of all memory cgroups except ``/having_care_already``.:: + # cd ops_filters/0/ # echo 2 > nr_filters # # disallow anonymous pages echo anon > 0/type From 2273dea6b1e1fcdd06d207048a2cd563ed80111a Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Wed, 5 Mar 2025 11:54:09 +0800 Subject: [PATCH 331/431] mm/hugetlb: update nr_huge_pages and surplus_huge_pages together In alloc_surplus_hugetlb_folio(), we increase nr_huge_pages and surplus_huge_pages separately. In the middle window, if we set nr_hugepages to smaller and satisfy count < persistent_huge_pages(h), the surplus_huge_pages will be increased by adjust_pool_surplus(). After adding delay in the middle window, we can reproduce the problem easily by following step: 1. echo 3 > /proc/sys/vm/nr_overcommit_hugepages 2. mmap two hugepages. When nr_huge_pages=2 and surplus_huge_pages=1, goto step 3. 3. echo 0 > /proc/sys/vm/nr_huge_pages Finally, nr_huge_pages is less than surplus_huge_pages. To fix the problem, call only_alloc_fresh_hugetlb_folio() instead and move down __prep_account_new_huge_page() into the hugetlb_lock. Link: https://lkml.kernel.org/r/20250305035409.2391344-1-liushixin2@huawei.com Fixes: 0c397daea1d4 ("mm, hugetlb: further simplify hugetlb allocation API") Signed-off-by: Liu Shixin Acked-by: Peter Xu Acked-by: Oscar Salvador Cc: David Hildenbrand Cc: Kefeng Wang Cc: Liu Shixin Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 438de55dd38d..af9b8c1fca67 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2259,11 +2259,20 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, goto out_unlock; spin_unlock_irq(&hugetlb_lock); - folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask); + folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); if (!folio) return NULL; + hugetlb_vmemmap_optimize_folio(h, folio); + spin_lock_irq(&hugetlb_lock); + /* + * nr_huge_pages needs to be adjusted within the same lock cycle + * as surplus_pages, otherwise it might confuse + * persistent_huge_pages() momentarily. + */ + __prep_account_new_huge_page(h, nid); + /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page From ff22f9299d7b2c7874b560993c21543708b7e1b6 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Thu, 6 Mar 2025 12:50:10 -0800 Subject: [PATCH 332/431] page_io: zswap: do not crash the kernel on decompression failure Currently, we crash the kernel when a decompression failure occurs in zswap (either because of memory corruption, or a bug in the compression algorithm). This is overkill. We should only SIGBUS the unfortunate process asking for the zswap entry on zswap load, and skip the corrupted entry in zswap writeback. See [1] for a recent upstream discussion about this. The zswap writeback case is relatively straightforward to fix. For the zswap_load() case, we change the return behavior: * Return 0 on success. * Return -ENOENT (with the folio locked) if zswap does not own the swapped out content. * Return -EIO if zswap owns the swapped out content, but encounters a decompression failure for some reasons. The folio will be unlocked, but not be marked up-to-date, which will eventually cause the process requesting the page to SIGBUS (see the handling of not-up-to-date folio in do_swap_page() in mm/memory.c), without crashing the kernel. * Return -EINVAL if we encounter a large folio, as large folio should not be swapped in while zswap is being used. Similar to the -EIO case, we also unlock the folio but do not mark it as up-to-date to SIGBUS the faulting process. As a side effect, we require one extra zswap tree traversal in the load and writeback paths. Quick benchmarking on a kernel build test shows no performance difference: With the new scheme: real: mean: 125.1s, stdev: 0.12s user: mean: 3265.23s, stdev: 9.62s sys: mean: 2156.41s, stdev: 13.98s The old scheme: real: mean: 125.78s, stdev: 0.45s user: mean: 3287.18s, stdev: 5.95s sys: mean: 2177.08s, stdev: 26.52s [nphamcs@gmail.com: fix documentation of zswap_load()] Link: https://lkml.kernel.org/r/20250306222453.1269456-1-nphamcs@gmail.com Link: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/ [1] Link: https://lkml.kernel.org/r/20250306205011.784787-1-nphamcs@gmail.com Signed-off-by: Nhat Pham Suggested-by: Matthew Wilcox Suggested-by: Yosry Ahmed Suggested-by: Johannes Weiner Reviewed-by: Chengming Zhou Acked-by: Johannes Weiner Signed-off-by: Andrew Morton --- include/linux/zswap.h | 6 +-- mm/page_io.c | 6 +-- mm/zswap.c | 119 +++++++++++++++++++++++++++++------------- 3 files changed, 88 insertions(+), 43 deletions(-) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index d961ead91bf1..30c193a1207e 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -26,7 +26,7 @@ struct zswap_lruvec_state { unsigned long zswap_total_pages(void); bool zswap_store(struct folio *folio); -bool zswap_load(struct folio *folio); +int zswap_load(struct folio *folio); void zswap_invalidate(swp_entry_t swp); int zswap_swapon(int type, unsigned long nr_pages); void zswap_swapoff(int type); @@ -44,9 +44,9 @@ static inline bool zswap_store(struct folio *folio) return false; } -static inline bool zswap_load(struct folio *folio) +static inline int zswap_load(struct folio *folio) { - return false; + return -ENOENT; } static inline void zswap_invalidate(swp_entry_t swp) {} diff --git a/mm/page_io.c b/mm/page_io.c index 9b983de351f9..4bce19df557b 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -638,11 +638,11 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug) if (swap_read_folio_zeromap(folio)) { folio_unlock(folio); goto finish; - } else if (zswap_load(folio)) { - folio_unlock(folio); - goto finish; } + if (zswap_load(folio) != -ENOENT) + goto finish; + /* We have to read from slower devices. Increase zswap protection. */ zswap_folio_swapin(folio); diff --git a/mm/zswap.c b/mm/zswap.c index 5f0e62289444..0dcc54eab58b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail; static u64 zswap_reject_compress_fail; /* Compressed page was too big for the allocator to (optimally) store */ static u64 zswap_reject_compress_poor; +/* Load or writeback failed due to decompression failure */ +static u64 zswap_decompress_fail; /* Store failed because underlying allocator could not get memory */ static u64 zswap_reject_alloc_fail; /* Store failed because the entry metadata could not be allocated (rare) */ @@ -985,11 +987,12 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, return comp_ret == 0 && alloc_ret == 0; } -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio) { struct zpool *zpool = entry->pool->zpool; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; + int decomp_ret, dlen; u8 *src, *obj; acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool); @@ -1012,11 +1015,21 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) sg_init_table(&output, 1); sg_set_folio(&output, folio, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); - BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); - BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); + decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); + dlen = acomp_ctx->req->dlen; zpool_obj_read_end(zpool, entry->handle, obj); acomp_ctx_put_unlock(acomp_ctx); + + if (!decomp_ret && dlen == PAGE_SIZE) + return true; + + zswap_decompress_fail++; + pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n", + swp_type(entry->swpentry), + swp_offset(entry->swpentry), + entry->pool->tfm_name, entry->length, dlen); + return false; } /********************************* @@ -1046,6 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; + int ret = 0; /* try to allocate swap cache folio */ si = get_swap_device(swpentry); @@ -1067,8 +1081,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry, * and freed when invalidated by the concurrent shrinker anyway. */ if (!folio_was_allocated) { - folio_put(folio); - return -EEXIST; + ret = -EEXIST; + goto out; } /* @@ -1081,14 +1095,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry, * be dereferenced. */ tree = swap_zswap_tree(swpentry); - if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) { - delete_from_swap_cache(folio); - folio_unlock(folio); - folio_put(folio); - return -ENOMEM; + if (entry != xa_load(tree, offset)) { + ret = -ENOMEM; + goto out; } - zswap_decompress(entry, folio); + if (!zswap_decompress(entry, folio)) { + ret = -EIO; + goto out; + } + + xa_erase(tree, offset); count_vm_event(ZSWPWB); if (entry->objcg) @@ -1104,9 +1121,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry, /* start writeback */ __swap_writepage(folio, &wbc); - folio_put(folio); - return 0; +out: + if (ret && ret != -EEXIST) { + delete_from_swap_cache(folio); + folio_unlock(folio); + } + folio_put(folio); + return ret; } /********************************* @@ -1606,7 +1628,27 @@ bool zswap_store(struct folio *folio) return ret; } -bool zswap_load(struct folio *folio) +/** + * zswap_load() - load a folio from zswap + * @folio: folio to load + * + * Return: 0 on success, with the folio unlocked and marked up-to-date, or one + * of the following error codes: + * + * -EIO: if the swapped out content was in zswap, but could not be loaded + * into the page due to a decompression failure. The folio is unlocked, but + * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page() + * will SIGBUS). + * + * -EINVAL: if the swapped out content was in zswap, but the page belongs + * to a large folio, which is not supported by zswap. The folio is unlocked, + * but NOT marked up-to-date, so that an IO error is emitted (e.g. + * do_swap_page() will SIGBUS). + * + * -ENOENT: if the swapped out content was not in zswap. The folio remains + * locked on return. + */ +int zswap_load(struct folio *folio) { swp_entry_t swp = folio->swap; pgoff_t offset = swp_offset(swp); @@ -1617,18 +1659,32 @@ bool zswap_load(struct folio *folio) VM_WARN_ON_ONCE(!folio_test_locked(folio)); if (zswap_never_enabled()) - return false; + return -ENOENT; /* * Large folios should not be swapped in while zswap is being used, as * they are not properly handled. Zswap does not properly load large * folios, and a large folio may only be partially in zswap. - * - * Return true without marking the folio uptodate so that an IO error is - * emitted (e.g. do_swap_page() will sigbus). */ - if (WARN_ON_ONCE(folio_test_large(folio))) - return true; + if (WARN_ON_ONCE(folio_test_large(folio))) { + folio_unlock(folio); + return -EINVAL; + } + + entry = xa_load(tree, offset); + if (!entry) + return -ENOENT; + + if (!zswap_decompress(entry, folio)) { + folio_unlock(folio); + return -EIO; + } + + folio_mark_uptodate(folio); + + count_vm_event(ZSWPIN); + if (entry->objcg) + count_objcg_events(entry->objcg, ZSWPIN, 1); /* * When reading into the swapcache, invalidate our entry. The @@ -1642,27 +1698,14 @@ bool zswap_load(struct folio *folio) * files, which reads into a private page and may free it if * the fault fails. We remain the primary owner of the entry.) */ - if (swapcache) - entry = xa_erase(tree, offset); - else - entry = xa_load(tree, offset); - - if (!entry) - return false; - - zswap_decompress(entry, folio); - - count_vm_event(ZSWPIN); - if (entry->objcg) - count_objcg_events(entry->objcg, ZSWPIN, 1); - if (swapcache) { - zswap_entry_free(entry); folio_mark_dirty(folio); + xa_erase(tree, offset); + zswap_entry_free(entry); } - folio_mark_uptodate(folio); - return true; + folio_unlock(folio); + return 0; } void zswap_invalidate(swp_entry_t swp) @@ -1757,6 +1800,8 @@ static int zswap_debugfs_init(void) zswap_debugfs_root, &zswap_reject_compress_fail); debugfs_create_u64("reject_compress_poor", 0444, zswap_debugfs_root, &zswap_reject_compress_poor); + debugfs_create_u64("decompress_fail", 0444, + zswap_debugfs_root, &zswap_decompress_fail); debugfs_create_u64("written_back_pages", 0444, zswap_debugfs_root, &zswap_written_back_pages); debugfs_create_file("pool_total_size", 0444, From e11079dd25c525aaf238d81287851ef16a521ef3 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:51 +0200 Subject: [PATCH 333/431] arm: mem_init: use memblock_phys_free() to free DMA memory on SA1111 Patch series "arch, mm: reduce code duplication in mem_init()", v2. Every architecture has implementation of mem_init() function and some even more than one. All these release free memory to the buddy allocator, most of them set high_memory to the end of directly addressable memory and many of them set max_mapnr for FLATMEM case. These patches pull the commonalities into the generic code and refactor some of the mem_init() implementations so that many of them can be just dropped. This patch (of 13): This will help to pull out memblock_free_all() to generic code. Link: https://lkml.kernel.org/r/20250313135003.836600-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20250313135003.836600-2-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: "Mike Rapoport (IBM)" Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm/mm/init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 5345d218899a..9aec1cb2386f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -277,14 +277,14 @@ void __init mem_init(void) set_max_mapnr(pfn_to_page(max_pfn) - mem_map); - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); + memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif + /* this will put all unused low memory onto the freelists */ + memblock_free_all(); + free_highpages(); /* From 2b1d532e106ee63acb61a8e11608fafd75e52c4d Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:52 +0200 Subject: [PATCH 334/431] csky: move setup_initrd() to setup.c Memory used by initrd should be reserved as soon as possible before there any memblock allocations that might overwrite that memory. This will also help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-3-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Guo Ren (csky) Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/csky/kernel/setup.c | 43 ++++++++++++++++++++++++++++++++++++++++ arch/csky/mm/init.c | 43 ---------------------------------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c index fe715b707fd0..e0d6ca86ea8c 100644 --- a/arch/csky/kernel/setup.c +++ b/arch/csky/kernel/setup.c @@ -12,6 +12,45 @@ #include #include +#ifdef CONFIG_BLK_DEV_INITRD +static void __init setup_initrd(void) +{ + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_err("initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + + if (memblock_is_region_reserved(__pa(initrd_start), size)) { + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", + __pa(initrd_start), size); + goto disable; + } + + memblock_reserve(__pa(initrd_start), size); + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + + initrd_below_start_ok = 1; + + return; + +disable: + initrd_start = initrd_end = 0; + + pr_err(" - disabling initrd\n"); +} +#endif + static void __init csky_memblock_init(void) { unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); @@ -40,6 +79,10 @@ static void __init csky_memblock_init(void) max_low_pfn = min_low_pfn + sseg_size; } +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; mmu_init(min_low_pfn, max_low_pfn); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bde7cabd23df..ab51acbc19b2 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -42,45 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - unsigned long size; - - if (initrd_start >= initrd_end) { - pr_err("initrd not found or empty"); - goto disable; - } - - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); - goto disable; - } - - size = initrd_end - initrd_start; - - if (memblock_is_region_reserved(__pa(initrd_start), size)) { - pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", - __pa(initrd_start), size); - goto disable; - } - - memblock_reserve(__pa(initrd_start), size); - - pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - - initrd_below_start_ok = 1; - - return; - -disable: - initrd_start = initrd_end = 0; - - pr_err(" - disabling initrd\n"); -} -#endif - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM @@ -92,10 +53,6 @@ void __init mem_init(void) #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif - memblock_free_all(); #ifdef CONFIG_HIGHMEM From 30686816214b6062246e4918f3eadd1f55382425 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:53 +0200 Subject: [PATCH 335/431] hexagon: move initialization of init_mm.context init to paging_init() This will help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-4-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/hexagon/mm/init.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 3458f39ca2ac..508bb6a8dcc9 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -59,14 +59,6 @@ void __init mem_init(void) * To-Do: someone somewhere should wipe out the bootmem map * after we're done? */ - - /* - * This can be moved to some more virtual-memory-specific - * initialization hook at some point. Set the init_mm - * descriptors "context" value to point to the initial - * kernel segment table's physical address. - */ - init_mm.context.ptbase = __pa(init_mm.pgd); } void sync_icache_dcache(pte_t pte) @@ -103,6 +95,12 @@ static void __init paging_init(void) free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */ + /* + * Set the init_mm descriptors "context" value to point to the + * initial kernel segment table's physical address. + */ + init_mm.context.ptbase = __pa(init_mm.pgd); + /* * Start of high memory area. Will probably need something more * fancy if we... get more fancy. From 67e7a600869ca557e259f1ce20f8b89bb95ca97d Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:54 +0200 Subject: [PATCH 336/431] MIPS: consolidate mem_init() for NUMA machines Both MIPS systems that support numa (loongsoon3 and sgi-ip27) have identical mem_init() for NUMA case. Move that into arch/mips/mm/init.c and drop duplicate per-machine definitions. Link: https://lkml.kernel.org/r/20250313135003.836600-5-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/mips/loongson64/numa.c | 7 ------- arch/mips/mm/init.c | 7 +++++++ arch/mips/sgi-ip27/ip27-memory.c | 9 --------- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 8388400d052f..95d5f553ce19 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -164,13 +164,6 @@ void __init paging_init(void) free_area_init(zones_size); } -void __init mem_init(void) -{ - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); - memblock_free_all(); - setup_zero_pages(); /* This comes from node 0 */ -} - /* All PCI device belongs to logical Node-0 */ int pcibus_to_node(struct pci_bus *bus) { diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 4583d1a2a73e..3db6082c611e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -482,6 +482,13 @@ void __init mem_init(void) 0x80000000 - 4, KCORE_TEXT); #endif } +#else /* CONFIG_NUMA */ +void __init mem_init(void) +{ + high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); + memblock_free_all(); + setup_zero_pages(); /* This comes from node 0 */ +} #endif /* !CONFIG_NUMA */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 1963313f55d8..2b3e46e2e607 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -406,8 +406,6 @@ void __init prom_meminit(void) } } -extern void setup_zero_pages(void); - void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; @@ -416,10 +414,3 @@ void __init paging_init(void) zones_size[ZONE_NORMAL] = max_low_pfn; free_area_init(zones_size); } - -void __init mem_init(void) -{ - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); - memblock_free_all(); - setup_zero_pages(); /* This comes from node 0 */ -} From e74e2b8eb424c26dff35727d242437edb87684aa Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:55 +0200 Subject: [PATCH 337/431] MIPS: make setup_zero_pages() use memblock Allocating the zero pages from memblock is simpler because the memory is already reserved. This will also help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-6-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/mips/include/asm/mmzone.h | 2 -- arch/mips/mm/init.c | 18 +++++------------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index 14226ea42036..602a21aee9d4 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -20,6 +20,4 @@ #define nid_to_addrbase(nid) 0 #endif -extern void setup_zero_pages(void); - #endif /* _ASM_MMZONE_H_ */ diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 3db6082c611e..820e35a59d4d 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -59,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask); /* * Not static inline because used by IP27 special magic initialization code */ -void setup_zero_pages(void) +static void __init setup_zero_pages(void) { - unsigned int order, i; - struct page *page; + unsigned int order; if (cpu_has_vce) order = 3; else order = 0; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Oh boy, that early out of memory?"); - - page = virt_to_page((void *)empty_zero_page); - split_page(page, order); - for (i = 0; i < (1 << order); i++, page++) - mark_page_reserved(page); + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -470,9 +462,9 @@ void __init mem_init(void) BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); maar_init(); - memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); + memblock_free_all(); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -486,8 +478,8 @@ void __init mem_init(void) void __init mem_init(void) { high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); - memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ + memblock_free_all(); } #endif /* !CONFIG_NUMA */ From be971f957a80e2bbd7747a295886df1472803ff1 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:56 +0200 Subject: [PATCH 338/431] nios2: move pr_debug() about memory start and end to setup_arch() This will help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-7-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/nios2/kernel/setup.c | 2 ++ arch/nios2/mm/init.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index da122a5fa43b..a4cffbfc1399 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -149,6 +149,8 @@ void __init setup_arch(char **cmdline_p) memory_start = memblock_start_of_DRAM(); memory_end = memblock_end_of_DRAM(); + pr_debug("%s: start=%lx, end=%lx\n", __func__, memory_start, memory_end); + setup_initial_init_mm(_stext, _etext, _edata, _end); init_task.thread.kregs = &fake_regs; diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index a2278485de19..aa692ad30044 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -65,8 +65,6 @@ void __init mem_init(void) unsigned long end_mem = memory_end; /* this must not include kernel stack at top */ - pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end); - end_mem &= PAGE_MASK; high_memory = __va(end_mem); From 54ccf66f99d6d2630895eb13156be19a033d4566 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:57 +0200 Subject: [PATCH 339/431] s390: make setup_zero_pages() use memblock Allocating the zero pages from memblock is simpler because the memory is already reserved. This will also help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-8-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Heiko Carstens Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/s390/mm/init.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d88cb1c13f7d..2b41dc9b1fa3 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -73,8 +73,6 @@ static void __init setup_zero_pages(void) { unsigned long total_pages = memblock_estimated_nr_free_pages(); unsigned int order; - struct page *page; - int i; /* Latest machines require a mapping granularity of 512KB */ order = 7; @@ -83,16 +81,7 @@ static void __init setup_zero_pages(void) while (order > 2 && (total_pages >> 10) < (1UL << order)) order--; - empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); - if (!empty_zero_page) - panic("Out of memory in setup_zero_pages"); - - page = virt_to_page((void *) empty_zero_page); - split_page(page, order); - for (i = 1 << order; i > 0; i--) { - mark_page_reserved(page); - page++; - } + empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE); zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; } @@ -176,9 +165,10 @@ void __init mem_init(void) pv_init(); kfence_split_mapping(); + setup_zero_pages(); /* Setup zeroed pages. */ + /* this will put all low memory onto the freelists */ memblock_free_all(); - setup_zero_pages(); /* Setup zeroed pages. */ } unsigned long memory_block_size_bytes(void) From d319c8b4918d24aea6fd90bd39cd5bc9fcf40859 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:58 +0200 Subject: [PATCH 340/431] xtensa: split out printing of virtual memory layout to a function This will help with pulling out memblock_free_all() to the generic code and reducing code duplication in arch::mem_init(). Link: https://lkml.kernel.org/r/20250313135003.836600-9-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: Max Filippov Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Mark Brown Cc: Matt Turner Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/xtensa/mm/init.c | 107 ++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 52 deletions(-) diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index b2587a1a7c46..01577d33e602 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -66,59 +66,8 @@ void __init bootmem_init(void) memblock_dump_all(); } - -void __init zones_init(void) +static void __init print_vm_layout(void) { - /* All pages are DMA-able, so we put them all in the DMA zone. */ - unsigned long max_zone_pfn[MAX_NR_ZONES] = { - [ZONE_NORMAL] = max_low_pfn, -#ifdef CONFIG_HIGHMEM - [ZONE_HIGHMEM] = max_pfn, -#endif - }; - free_area_init(max_zone_pfn); -} - -static void __init free_highpages(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn; - phys_addr_t range_start, range_end; - u64 i; - - /* set highmem page free */ - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, - &range_start, &range_end, NULL) { - unsigned long start = PFN_UP(range_start); - unsigned long end = PFN_DOWN(range_end); - - /* Ignore complete lowmem entries */ - if (end <= max_low) - continue; - - /* Truncate partial highmem entries */ - if (start < max_low) - start = max_low; - - for (; start < end; start++) - free_highmem_page(pfn_to_page(start)); - } -#endif -} - -/* - * Initialize memory pages. - */ - -void __init mem_init(void) -{ - free_highpages(); - - max_mapnr = max_pfn - ARCH_PFN_OFFSET; - high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); - - memblock_free_all(); - pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" @@ -167,6 +116,60 @@ void __init mem_init(void) (unsigned long)(__bss_stop - __bss_start) >> 10); } +void __init zones_init(void) +{ + /* All pages are DMA-able, so we put them all in the DMA zone. */ + unsigned long max_zone_pfn[MAX_NR_ZONES] = { + [ZONE_NORMAL] = max_low_pfn, +#ifdef CONFIG_HIGHMEM + [ZONE_HIGHMEM] = max_pfn, +#endif + }; + free_area_init(max_zone_pfn); + print_vm_layout(); +} + +static void __init free_highpages(void) +{ +#ifdef CONFIG_HIGHMEM + unsigned long max_low = max_low_pfn; + phys_addr_t range_start, range_end; + u64 i; + + /* set highmem page free */ + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &range_start, &range_end, NULL) { + unsigned long start = PFN_UP(range_start); + unsigned long end = PFN_DOWN(range_end); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + for (; start < end; start++) + free_highmem_page(pfn_to_page(start)); + } +#endif +} + +/* + * Initialize memory pages. + */ + +void __init mem_init(void) +{ + free_highpages(); + + max_mapnr = max_pfn - ARCH_PFN_OFFSET; + high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); + + memblock_free_all(); +} + static void __init parse_memmap_one(char *p) { char *oldp; From 8268af309d07d1c6279080b4e6fd16ec75cc977c Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:49:59 +0200 Subject: [PATCH 341/431] arch, mm: set max_mapnr when allocating memory map for FLATMEM max_mapnr is essentially the size of the memory map for systems that use FLATMEM. There is no reason to calculate it in each and every architecture when it's anyway calculated in alloc_node_mem_map(). Drop setting of max_mapnr from architecture code and set it once in alloc_node_mem_map(). While on it, move definition of mem_map and max_mapnr to mm/mm_init.c so there won't be two copies for MMU and !MMU variants. Link: https://lkml.kernel.org/r/20250313135003.836600-10-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Dave Hansen [x86] Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/alpha/mm/init.c | 1 - arch/arc/mm/init.c | 5 ----- arch/arm/mm/init.c | 2 -- arch/csky/mm/init.c | 4 ---- arch/loongarch/mm/init.c | 1 - arch/microblaze/mm/init.c | 4 ---- arch/mips/mm/init.c | 8 -------- arch/nios2/kernel/setup.c | 1 - arch/nios2/mm/init.c | 2 +- arch/openrisc/mm/init.c | 1 - arch/parisc/mm/init.c | 1 - arch/powerpc/kernel/setup-common.c | 2 -- arch/riscv/mm/init.c | 1 - arch/s390/mm/init.c | 1 - arch/sh/mm/init.c | 1 - arch/sparc/mm/init_32.c | 1 - arch/um/include/shared/mem_user.h | 1 - arch/um/kernel/physmem.c | 12 ------------ arch/um/kernel/um_arch.c | 1 - arch/x86/mm/init_32.c | 3 --- arch/xtensa/mm/init.c | 1 - include/asm-generic/memory_model.h | 5 +++-- include/linux/mm.h | 11 ----------- mm/memory.c | 8 -------- mm/mm_init.c | 25 +++++++++++++++++-------- mm/nommu.c | 4 ---- 26 files changed, 21 insertions(+), 86 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 61c2198b1359..ec0eeae9c653 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -276,7 +276,6 @@ srm_paging_stop (void) void __init mem_init(void) { - set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 6a71b23f1383..7ef883d58dc1 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -154,11 +154,6 @@ void __init setup_arch_memory(void) arch_pfn_offset = min(min_low_pfn, min_high_pfn); kmap_init(); - -#else /* CONFIG_HIGHMEM */ - /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */ - max_mapnr = max_low_pfn - min_low_pfn; - #endif /* CONFIG_HIGHMEM */ free_area_init(max_zone_pfn); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9aec1cb2386f..d4bcc745a044 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -275,8 +275,6 @@ void __init mem_init(void) swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE); #endif - set_max_mapnr(pfn_to_page(max_pfn) - mem_map); - #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index ab51acbc19b2..ba6694d6170a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -46,10 +46,6 @@ void __init mem_init(void) { #ifdef CONFIG_HIGHMEM unsigned long tmp; - - set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); -#else - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index ca5aa5f46a9f..00449df50db1 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -78,7 +78,6 @@ void __init paging_init(void) void __init mem_init(void) { - max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 4520c5741579..857cd2b44bcf 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -104,17 +104,13 @@ void __init setup_memory(void) * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn - * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ - /* RAM is assumed contiguous */ - max_mapnr = memory_size >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; - pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 820e35a59d4d..eb61a73520a0 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -415,15 +415,7 @@ void __init paging_init(void) " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; - - max_mapnr = max_low_pfn; - } else if (highend_pfn) { - max_mapnr = highend_pfn; - } else { - max_mapnr = max_low_pfn; } -#else - max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index a4cffbfc1399..2a40150142c3 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -158,7 +158,6 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); - max_mapnr = max_low_pfn; memblock_reserve(__pa_symbol(_stext), _end - _stext); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index aa692ad30044..3cafa87ead9e 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -51,7 +51,7 @@ void __init paging_init(void) pagetable_init(); pgd_current = swapper_pg_dir; - max_zone_pfn[ZONE_NORMAL] = max_mapnr; + max_zone_pfn[ZONE_NORMAL] = max_low_pfn; /* pass the memory from the bootmem allocator to the main allocator */ free_area_init(max_zone_pfn); diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index d0cb1a0126f9..9093c336e158 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -193,7 +193,6 @@ void __init mem_init(void) { BUG_ON(!mem_map); - max_mapnr = max_low_pfn; high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* clear the zero-page */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 61c0a2477072..2cdfc0b1195c 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -563,7 +563,6 @@ void __init mem_init(void) #endif high_memory = __va((max_pfn << PAGE_SHIFT)); - set_max_mapnr(max_low_pfn); memblock_free_all(); #ifdef CONFIG_PA11 diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index a08b0ede4e64..68d47c53876c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -957,8 +957,6 @@ void __init setup_arch(char **cmdline_p) /* Parse memory topology */ mem_topology_setup(); - /* Set max_mapnr before paging_init() */ - set_max_mapnr(max_pfn); high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 15b2eda4c364..157c9ca51541 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -298,7 +298,6 @@ static void __init setup_bootmem(void) high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); - set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); reserve_initrd_mem(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 2b41dc9b1fa3..ad567e2100b7 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -159,7 +159,6 @@ void __init mem_init(void) cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); - set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); pv_init(); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 289a2fecebef..72aea5cd1b85 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -290,7 +290,6 @@ void __init paging_init(void) */ max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; - set_max_mapnr(max_low_pfn - min_low_pfn); nodes_clear(node_online_map); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d96a14ffceeb..6b58da14edc6 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -275,7 +275,6 @@ void __init mem_init(void) taint_real_pages(); - max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); diff --git a/arch/um/include/shared/mem_user.h b/arch/um/include/shared/mem_user.h index adfa08062f88..d4727efcf23d 100644 --- a/arch/um/include/shared/mem_user.h +++ b/arch/um/include/shared/mem_user.h @@ -47,7 +47,6 @@ extern int iomem_size; #define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1)) extern unsigned long find_iomem(char *driver, unsigned long *len_out); -extern void mem_total_pages(unsigned long physmem, unsigned long iomem); extern void setup_physmem(unsigned long start, unsigned long usable, unsigned long len); extern void map_memory(unsigned long virt, unsigned long phys, diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index a74f17b033c4..af02b5f9911d 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -22,18 +22,6 @@ static int physmem_fd = -1; unsigned long high_physmem; EXPORT_SYMBOL(high_physmem); -void __init mem_total_pages(unsigned long physmem, unsigned long iomem) -{ - unsigned long phys_pages, iomem_pages, total_pages; - - phys_pages = physmem >> PAGE_SHIFT; - iomem_pages = iomem >> PAGE_SHIFT; - - total_pages = phys_pages + iomem_pages; - - max_mapnr = total_pages; -} - void map_memory(unsigned long virt, unsigned long phys, unsigned long len, int r, int w, int x) { diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 79ea97d4797e..6414cbf00572 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -419,7 +419,6 @@ void __init setup_arch(char **cmdline_p) stack_protections((unsigned long) init_task.stack); setup_physmem(uml_physmem, uml_reserved, physmem_size); - mem_total_pages(physmem_size, iomem_size); uml_dtb_init(); read_initrd(); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ac41b1e0940d..6d2f8cb9451e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -650,9 +650,6 @@ void __init initmem_init(void) memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); -#ifdef CONFIG_FLATMEM - max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; -#endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 01577d33e602..9f1b0d5fccc7 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -164,7 +164,6 @@ void __init mem_init(void) { free_highpages(); - max_mapnr = max_pfn - ARCH_PFN_OFFSET; high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 6d1fb6162ac1..a3b5029aebbd 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -19,11 +19,12 @@ #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ ARCH_PFN_OFFSET) +/* avoid include hell */ +extern unsigned long max_mapnr; + #ifndef pfn_valid static inline int pfn_valid(unsigned long pfn) { - /* avoid include hell */ - extern unsigned long max_mapnr; unsigned long pfn_offset = ARCH_PFN_OFFSET; return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; diff --git a/include/linux/mm.h b/include/linux/mm.h index 82776b409391..8c4cb8c28507 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -46,17 +46,6 @@ extern int sysctl_page_lock_unfairness; void mm_core_init(void); void init_mm_internals(void); -#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ -extern unsigned long max_mapnr; - -static inline void set_max_mapnr(unsigned long limit) -{ - max_mapnr = limit; -} -#else -static inline void set_max_mapnr(unsigned long limit) { } -#endif - extern atomic_long_t _totalram_pages; static inline unsigned long totalram_pages(void) { diff --git a/mm/memory.c b/mm/memory.c index 8873b7a4962c..a1d7664855f2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -95,14 +95,6 @@ #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif -#ifndef CONFIG_NUMA -unsigned long max_mapnr; -EXPORT_SYMBOL(max_mapnr); - -struct page *mem_map; -EXPORT_SYMBOL(mem_map); -#endif - static vm_fault_t do_fault(struct vm_fault *vmf); static vm_fault_t do_anonymous_page(struct vm_fault *vmf); static bool vmf_pte_changed(struct vm_fault *vmf); diff --git a/mm/mm_init.c b/mm/mm_init.c index 133640a93d1d..7fd48d2d5064 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -37,6 +37,14 @@ #include +#ifndef CONFIG_NUMA +unsigned long max_mapnr; +EXPORT_SYMBOL(max_mapnr); + +struct page *mem_map; +EXPORT_SYMBOL(mem_map); +#endif + #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; @@ -1639,7 +1647,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); offset = pgdat->node_start_pfn - start; /* - * The zone's endpoints aren't required to be MAX_PAGE_ORDER + * The zone's endpoints aren't required to be MAX_PAGE_ORDER * aligned but the node_mem_map endpoints must be in order * for the buddy allocator to function correctly. */ @@ -1655,14 +1663,15 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", __func__, pgdat->node_id, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map); -#ifndef CONFIG_NUMA + /* the global mem_map is just set as node 0's */ - if (pgdat == NODE_DATA(0)) { - mem_map = NODE_DATA(0)->node_mem_map; - if (page_to_pfn(mem_map) != pgdat->node_start_pfn) - mem_map -= offset; - } -#endif + WARN_ON(pgdat != NODE_DATA(0)); + + mem_map = pgdat->node_mem_map; + if (page_to_pfn(mem_map) != pgdat->node_start_pfn) + mem_map -= offset; + + max_mapnr = end - start; } #else static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } diff --git a/mm/nommu.c b/mm/nommu.c index 8b31d8396297..43751726f977 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -44,16 +44,12 @@ void *high_memory; EXPORT_SYMBOL(high_memory); -struct page *mem_map; -unsigned long max_mapnr; -EXPORT_SYMBOL(max_mapnr); unsigned long highest_memmap_pfn; int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; int heap_stack_gap = 0; atomic_long_t mmap_pages_allocated; -EXPORT_SYMBOL(mem_map); /* list of mapped, potentially shareable regions */ static struct kmem_cache *vm_region_jar; From e120d1bc12da5c1bb871c346f741296610fd6fcb Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:50:00 +0200 Subject: [PATCH 342/431] arch, mm: set high_memory in free_area_init() high_memory defines upper bound on the directly mapped memory. This bound is defined by the beginning of ZONE_HIGHMEM when a system has high memory and by the end of memory otherwise. All this is known to generic memory management initialization code that can set high_memory while initializing core mm structures. Add a generic calculation of high_memory to free_area_init() and remove per-architecture calculation except for the architectures that set and use high_memory earlier than that. Link: https://lkml.kernel.org/r/20250313135003.836600-11-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Dave Hansen [x86] Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/alpha/mm/init.c | 1 - arch/arc/mm/init.c | 2 -- arch/arm64/mm/init.c | 2 -- arch/csky/mm/init.c | 1 - arch/hexagon/mm/init.c | 6 ------ arch/loongarch/kernel/numa.c | 1 - arch/loongarch/mm/init.c | 2 -- arch/microblaze/mm/init.c | 2 -- arch/mips/mm/init.c | 2 -- arch/nios2/mm/init.c | 6 ------ arch/openrisc/mm/init.c | 2 -- arch/parisc/mm/init.c | 1 - arch/riscv/mm/init.c | 1 - arch/s390/mm/init.c | 2 -- arch/sh/mm/init.c | 7 ------- arch/sparc/mm/init_32.c | 1 - arch/sparc/mm/init_64.c | 2 -- arch/um/kernel/um_arch.c | 1 - arch/x86/kernel/setup.c | 2 -- arch/x86/mm/init_32.c | 3 --- arch/x86/mm/numa_32.c | 3 --- arch/xtensa/mm/init.c | 2 -- mm/memory.c | 8 -------- mm/mm_init.c | 30 ++++++++++++++++++++++++++++++ mm/nommu.c | 2 -- 25 files changed, 30 insertions(+), 62 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index ec0eeae9c653..3ab2d2f3c917 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -276,7 +276,6 @@ srm_paging_stop (void) void __init mem_init(void) { - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 7ef883d58dc1..05025122e965 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -150,8 +150,6 @@ void __init setup_arch_memory(void) */ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; - high_memory = (void *)(min_high_pfn << PAGE_SHIFT); - arch_pfn_offset = min(min_low_pfn, min_high_pfn); kmap_init(); #endif /* CONFIG_HIGHMEM */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ccdef53872a0..53a0b105890b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -309,8 +309,6 @@ void __init arm64_memblock_init(void) } early_init_fdt_scan_reserved_mem(); - - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; } void __init bootmem_init(void) diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index ba6694d6170a..a22801aa503a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -47,7 +47,6 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM unsigned long tmp; #endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 508bb6a8dcc9..d412c2314509 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -100,12 +100,6 @@ static void __init paging_init(void) * initial kernel segment table's physical address. */ init_mm.context.ptbase = __pa(init_mm.pgd); - - /* - * Start of high memory area. Will probably need something more - * fancy if we... get more fancy. - */ - high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); } #ifndef DMA_RESERVE diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 84fe7f854820..8eb489725b1a 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -389,7 +389,6 @@ void __init paging_init(void) void __init mem_init(void) { - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 00449df50db1..6affa3609188 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -78,8 +78,6 @@ void __init paging_init(void) void __init mem_init(void) { - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); } #endif /* !CONFIG_NUMA */ diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 857cd2b44bcf..7e2e342e84c5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -120,8 +120,6 @@ void __init setup_memory(void) void __init mem_init(void) { - high_memory = (void *)__va(memory_start + lowmem_size - 1); - /* this will put all memory onto the freelists */ memblock_free_all(); #ifdef CONFIG_HIGHMEM diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index eb61a73520a0..ed9dde6a00f7 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -417,7 +417,6 @@ void __init paging_init(void) max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; } #endif - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); free_area_init(max_zone_pfns); } @@ -469,7 +468,6 @@ void __init mem_init(void) #else /* CONFIG_NUMA */ void __init mem_init(void) { - high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); setup_zero_pages(); /* This comes from node 0 */ memblock_free_all(); } diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 3cafa87ead9e..4ba8dfa0d238 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -62,12 +62,6 @@ void __init paging_init(void) void __init mem_init(void) { - unsigned long end_mem = memory_end; /* this must not include - kernel stack at top */ - - end_mem &= PAGE_MASK; - high_memory = __va(end_mem); - /* this will put all memory onto the freelists */ memblock_free_all(); } diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 9093c336e158..72c5952607ac 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -193,8 +193,6 @@ void __init mem_init(void) { BUG_ON(!mem_map); - high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); - /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 2cdfc0b1195c..4fbe354dc9b4 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -562,7 +562,6 @@ void __init mem_init(void) BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); #endif - high_memory = __va((max_pfn << PAGE_SHIFT)); memblock_free_all(); #ifdef CONFIG_PA11 diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 157c9ca51541..ac6d41e86243 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -295,7 +295,6 @@ static void __init setup_bootmem(void) phys_ram_end = memblock_end_of_DRAM(); min_low_pfn = PFN_UP(phys_ram_base); max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); - high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ad567e2100b7..4bd6f316d71f 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -159,8 +159,6 @@ void __init mem_init(void) cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - pv_init(); kfence_split_mapping(); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 72aea5cd1b85..6d459ffba4bc 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -330,13 +330,6 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - pg_data_t *pgdat; - - high_memory = NULL; - for_each_online_pgdat(pgdat) - high_memory = max_t(void *, high_memory, - __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - memblock_free_all(); /* Set this up early, so we can take care of the zero page */ diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6b58da14edc6..81a468a9c223 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -275,7 +275,6 @@ void __init mem_init(void) taint_real_pages(); - high_memory = __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); for (i = 0; sp_banks[i].num_bytes != 0; i++) { diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 05882bca5b73..34d46adb9571 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2505,8 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - high_memory = __va(last_valid_pfn << PAGE_SHIFT); - memblock_free_all(); /* diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 6414cbf00572..f24a3ce37ab7 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -385,7 +385,6 @@ int __init linux_main(int argc, char **argv, char **envp) high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; - high_memory = (void *) end_iomem; start_vm = VMALLOC_START; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ff8604007b08..74ac686d441a 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -972,8 +972,6 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = e820__end_of_low_ram_pfn(); else max_low_pfn = max_pfn; - - high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; #endif /* Find and reserve MPTABLE area */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 6d2f8cb9451e..801b659ead0c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -643,9 +643,6 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; -#else - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 65fda406e6f2..442ef3facff0 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -41,9 +41,6 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); - high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; -#else - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 9f1b0d5fccc7..9b662477b3d4 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -164,8 +164,6 @@ void __init mem_init(void) { free_highpages(); - high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); } diff --git a/mm/memory.c b/mm/memory.c index a1d7664855f2..3900225d99c5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -113,14 +113,6 @@ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) return pte_marker_uffd_wp(vmf->orig_pte); } -/* - * A number of key systems in x86 including ioremap() rely on the assumption - * that high_memory defines the upper bound on direct map memory, then end - * of ZONE_NORMAL. - */ -void *high_memory; -EXPORT_SYMBOL(high_memory); - /* * Randomize the address space (stacks, mmaps, brk, etc.). * diff --git a/mm/mm_init.c b/mm/mm_init.c index 7fd48d2d5064..bd7071c32a44 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -45,6 +45,13 @@ struct page *mem_map; EXPORT_SYMBOL(mem_map); #endif +/* + * high_memory defines the upper bound on direct map memory, then end + * of ZONE_NORMAL. + */ +void *high_memory; +EXPORT_SYMBOL(high_memory); + #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; @@ -1778,6 +1785,27 @@ static bool arch_has_descending_max_zone_pfns(void) return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); } +static void set_high_memory(void) +{ + phys_addr_t highmem = memblock_end_of_DRAM(); + + /* + * Some architectures (e.g. ARM) set high_memory very early and + * use it in arch setup code. + * If an architecture already set high_memory don't overwrite it + */ + if (high_memory) + return; + +#ifdef CONFIG_HIGHMEM + if (arch_has_descending_max_zone_pfns() || + highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])) + highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]); +#endif + + high_memory = phys_to_virt(highmem - 1) + 1; +} + /** * free_area_init - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone @@ -1900,6 +1928,8 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* disable hash distribution for systems with a single node */ fixup_hashdist(); + + set_high_memory(); } /** diff --git a/mm/nommu.c b/mm/nommu.c index 43751726f977..15a396ce2553 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -42,8 +42,6 @@ #include #include "internal.h" -void *high_memory; -EXPORT_SYMBOL(high_memory); unsigned long highest_memmap_pfn; int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; int heap_stack_gap = 0; From 6faea3422e3b4e8de44a55aa3e6e843320da66d2 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:50:01 +0200 Subject: [PATCH 343/431] arch, mm: streamline HIGHMEM freeing All architectures that support HIGHMEM have their code that frees high memory pages to the buddy allocator while __free_memory_core() is limited to freeing only low memory. There is no actual reason for that. The memory map is completely ready by the time memblock_free_all() is called and high pages can be released to the buddy allocator along with low memory. Remove low memory limit from __free_memory_core() and drop per-architecture code that frees high memory pages. Link: https://lkml.kernel.org/r/20250313135003.836600-12-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Dave Hansen [x86] Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arc/mm/init.c | 6 +----- arch/arm/mm/init.c | 29 ----------------------------- arch/csky/mm/init.c | 14 -------------- arch/microblaze/mm/init.c | 16 ---------------- arch/mips/mm/init.c | 20 -------------------- arch/powerpc/mm/mem.c | 14 -------------- arch/sparc/mm/init_32.c | 25 ------------------------- arch/x86/include/asm/highmem.h | 3 --- arch/x86/include/asm/numa.h | 4 ---- arch/x86/include/asm/numa_32.h | 13 ------------- arch/x86/mm/Makefile | 2 -- arch/x86/mm/highmem_32.c | 34 ---------------------------------- arch/x86/mm/init_32.c | 28 ---------------------------- arch/xtensa/mm/init.c | 29 ----------------------------- include/linux/mm.h | 1 - mm/memblock.c | 3 +-- 16 files changed, 2 insertions(+), 239 deletions(-) delete mode 100644 arch/x86/include/asm/numa_32.h delete mode 100644 arch/x86/mm/highmem_32.c diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 05025122e965..11ce638731c9 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -160,11 +160,7 @@ void __init setup_arch_memory(void) static void __init highmem_init(void) { #ifdef CONFIG_HIGHMEM - unsigned long tmp; - memblock_phys_free(high_mem_start, high_mem_sz); - for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); #endif } @@ -176,8 +172,8 @@ static void __init highmem_init(void) */ void __init mem_init(void) { - memblock_free_all(); highmem_init(); + memblock_free_all(); BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d4bcc745a044..7bb5ce02b9b5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -237,33 +237,6 @@ static inline void poison_init_mem(void *s, size_t count) *p++ = 0xe7fddef0; } -static void __init free_highpages(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn; - phys_addr_t range_start, range_end; - u64 i; - - /* set highmem page free */ - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, - &range_start, &range_end, NULL) { - unsigned long start = PFN_UP(range_start); - unsigned long end = PFN_DOWN(range_end); - - /* Ignore complete lowmem entries */ - if (end <= max_low) - continue; - - /* Truncate partial highmem entries */ - if (start < max_low) - start = max_low; - - for (; start < end; start++) - free_highmem_page(pfn_to_page(start)); - } -#endif -} - /* * mem_init() marks the free areas in the mem_map and tells us how much * memory is free. This is done after various parts of the system have @@ -283,8 +256,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - free_highpages(); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index a22801aa503a..3914c2b873da 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -44,21 +44,7 @@ EXPORT_SYMBOL(empty_zero_page); void __init mem_init(void) { -#ifdef CONFIG_HIGHMEM - unsigned long tmp; -#endif - memblock_free_all(); - -#ifdef CONFIG_HIGHMEM - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - /* FIXME not sure about */ - if (!memblock_is_reserved(tmp << PAGE_SHIFT)) - free_highmem_page(page); - } -#endif } void free_initmem(void) diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 7e2e342e84c5..3e664e0efc33 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -52,19 +52,6 @@ static void __init highmem_init(void) map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); } - -static void __meminit highmem_setup(void) -{ - unsigned long pfn; - - for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { - struct page *page = pfn_to_page(pfn); - - /* FIXME not sure about */ - if (!memblock_is_reserved(pfn << PAGE_SHIFT)) - free_highmem_page(page); - } -} #endif /* CONFIG_HIGHMEM */ /* @@ -122,9 +109,6 @@ void __init mem_init(void) { /* this will put all memory onto the freelists */ memblock_free_all(); -#ifdef CONFIG_HIGHMEM - highmem_setup(); -#endif mem_init_done = 1; } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index ed9dde6a00f7..075177e817ac 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -425,25 +425,6 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -static inline void __init mem_init_free_highmem(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - if (cpu_has_dc_aliases) - return; - - for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { - struct page *page = pfn_to_page(tmp); - - if (!memblock_is_memory(PFN_PHYS(tmp))) - SetPageReserved(page); - else - free_highmem_page(page); - } -#endif -} - void __init mem_init(void) { /* @@ -454,7 +435,6 @@ void __init mem_init(void) maar_init(); setup_zero_pages(); /* Setup zeroed pages. */ - mem_init_free_highmem(); memblock_free_all(); #ifdef CONFIG_64BIT diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c7708c8fad29..1bc94bca9944 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -297,20 +297,6 @@ void __init mem_init(void) memblock_free_all(); -#ifdef CONFIG_HIGHMEM - { - unsigned long pfn, highmem_mapnr; - - highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; - for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { - phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; - struct page *page = pfn_to_page(pfn); - if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) - free_highmem_page(page); - } - } -#endif /* CONFIG_HIGHMEM */ - #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 81a468a9c223..043e9b6fadd0 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,18 +232,6 @@ static void __init taint_real_pages(void) } } -static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long tmp; - -#ifdef CONFIG_DEBUG_HIGHMEM - printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); -#endif - - for (tmp = start_pfn; tmp < end_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -} - void __init mem_init(void) { int i; @@ -276,19 +264,6 @@ void __init mem_init(void) taint_real_pages(); memblock_free_all(); - - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - - if (end_pfn <= highstart_pfn) - continue; - - if (start_pfn < highstart_pfn) - start_pfn = highstart_pfn; - - map_high_region(start_pfn, end_pfn); - } } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 731ee7cc40a5..585bdadba47d 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h @@ -69,9 +69,6 @@ extern unsigned long highstart_pfn, highend_pfn; arch_flush_lazy_mmu_mode(); \ } while (0) -extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn); - #endif /* __KERNEL__ */ #endif /* _ASM_X86_HIGHMEM_H */ diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 5469d7a7c40f..53ba39ce010c 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h @@ -41,10 +41,6 @@ static inline int numa_cpu_node(int cpu) } #endif /* CONFIG_NUMA */ -#ifdef CONFIG_X86_32 -# include -#endif - #ifdef CONFIG_NUMA extern void numa_set_node(int cpu, int node); extern void numa_clear_node(int cpu); diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h deleted file mode 100644 index 9c8e9e85be77..000000000000 --- a/arch/x86/include/asm/numa_32.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_X86_NUMA_32_H -#define _ASM_X86_NUMA_32_H - -#ifdef CONFIG_HIGHMEM -extern void set_highmem_pages_init(void); -#else -static inline void set_highmem_pages_init(void) -{ -} -#endif - -#endif /* _ASM_X86_NUMA_32_H */ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index e0c99a8760ca..32035d5be5a0 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -42,8 +42,6 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o -obj-$(CONFIG_HIGHMEM) += highmem_32.o - KASAN_SANITIZE_kasan_init_$(BITS).o := n obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c deleted file mode 100644 index d9efa35711ee..000000000000 --- a/arch/x86/mm/highmem_32.c +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include -#include -#include /* for totalram_pages */ -#include -#include - -void __init set_highmem_pages_init(void) -{ - struct zone *zone; - int nid; - - /* - * Explicitly reset zone->managed_pages because set_highmem_pages_init() - * is invoked before memblock_free_all() - */ - reset_all_zones_managed_pages(); - for_each_zone(zone) { - unsigned long zone_start_pfn, zone_end_pfn; - - if (!is_highmem(zone)) - continue; - - zone_start_pfn = zone->zone_start_pfn; - zone_end_pfn = zone_start_pfn + zone->spanned_pages; - - nid = zone_to_nid(zone); - printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", - zone->name, nid, zone_start_pfn, zone_end_pfn); - - add_highpages_with_active_regions(nid, zone_start_pfn, - zone_end_pfn); - } -} diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 801b659ead0c..9ee8ec2bc5d1 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -394,23 +394,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) pkmap_page_table = virt_to_kpte(vaddr); } - -void __init add_highpages_with_active_regions(int nid, - unsigned long start_pfn, unsigned long end_pfn) -{ - phys_addr_t start, end; - u64 i; - - for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { - unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), - start_pfn, end_pfn); - unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), - start_pfn, end_pfn); - for ( ; pfn < e_pfn; pfn++) - if (pfn_valid(pfn)) - free_highmem_page(pfn_to_page(pfn)); - } -} #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { @@ -715,17 +698,6 @@ void __init mem_init(void) #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif - /* - * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to - * be done before memblock_free_all(). Memblock use free low memory for - * temporary data (see find_range_array()) and for this purpose can use - * pages that was already passed to the buddy allocator, hence marked as - * not accessible in the page tables when compiled with - * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not - * important here. - */ - set_highmem_pages_init(); - /* this will put all low memory onto the freelists */ memblock_free_all(); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 9b662477b3d4..47ecbe28263e 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -129,41 +129,12 @@ void __init zones_init(void) print_vm_layout(); } -static void __init free_highpages(void) -{ -#ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn; - phys_addr_t range_start, range_end; - u64 i; - - /* set highmem page free */ - for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, - &range_start, &range_end, NULL) { - unsigned long start = PFN_UP(range_start); - unsigned long end = PFN_DOWN(range_end); - - /* Ignore complete lowmem entries */ - if (end <= max_low) - continue; - - /* Truncate partial highmem entries */ - if (start < max_low) - start = max_low; - - for (; start < end; start++) - free_highmem_page(pfn_to_page(start)); - } -#endif -} - /* * Initialize memory pages. */ void __init mem_init(void) { - free_highpages(); - memblock_free_all(); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 8c4cb8c28507..6c519a5098d4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3275,7 +3275,6 @@ extern void reserve_bootmem_region(phys_addr_t start, /* Free the reserved page into the buddy system, so it gets managed. */ void free_reserved_page(struct page *page); -#define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) { diff --git a/mm/memblock.c b/mm/memblock.c index 95af35fd1389..64ae678cd1d1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2164,8 +2164,7 @@ static unsigned long __init __free_memory_core(phys_addr_t start, phys_addr_t end) { unsigned long start_pfn = PFN_UP(start); - unsigned long end_pfn = min_t(unsigned long, - PFN_DOWN(end), max_low_pfn); + unsigned long end_pfn = PFN_DOWN(end); if (start_pfn >= end_pfn) return 0; From 0d98484ee3330c35d1dc0da0be0871b3596cbe0d Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:50:02 +0200 Subject: [PATCH 344/431] arch, mm: introduce arch_mm_preinit Currently, implementation of mem_init() in every architecture consists of one or more of the following: * initializations that must run before page allocator is active, for instance swiotlb_init() * a call to memblock_free_all() to release all the memory to the buddy allocator * initializations that must run after page allocator is ready and there is no arch-specific hook other than mem_init() for that, like for example register_page_bootmem_info() in x86 and sparc64 or simple setting of mem_init_done = 1 in several architectures * a bunch of semi-related stuff that apparently had no better place to live, for example a ton of BUILD_BUG_ON()s in parisc. Introduce arch_mm_preinit() that will be the first thing called from mm_core_init(). On architectures that have initializations that must happen before the page allocator is ready, move those into arch_mm_preinit() along with the code that does not depend on ordering with page allocator setup. On several architectures this results in reduction of mem_init() to a single call to memblock_free_all() that allows its consolidation next. Link: https://lkml.kernel.org/r/20250313135003.836600-13-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Dave Hansen [x86] Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arc/mm/init.c | 13 ++++++------- arch/arm/mm/init.c | 21 ++++++++++++--------- arch/arm64/mm/init.c | 21 ++++++++++++--------- arch/mips/mm/init.c | 11 +++++++---- arch/powerpc/mm/mem.c | 9 ++++++--- arch/riscv/mm/init.c | 8 ++++++-- arch/s390/mm/init.c | 5 ++++- arch/sparc/mm/init_32.c | 5 ++++- arch/um/kernel/mem.c | 7 +++++-- arch/x86/mm/init_32.c | 6 +++++- arch/x86/mm/init_64.c | 5 ++++- include/linux/mm.h | 1 + mm/mm_init.c | 5 +++++ 13 files changed, 77 insertions(+), 40 deletions(-) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 11ce638731c9..90715b4a0bfa 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -157,11 +157,16 @@ void __init setup_arch_memory(void) free_area_init(max_zone_pfn); } -static void __init highmem_init(void) +void __init arch_mm_preinit(void) { #ifdef CONFIG_HIGHMEM memblock_phys_free(high_mem_start, high_mem_sz); #endif + + BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); + BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE); + BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE); + BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE); } /* @@ -172,13 +177,7 @@ static void __init highmem_init(void) */ void __init mem_init(void) { - highmem_init(); memblock_free_all(); - - BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); - BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE); - BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE); - BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE); } #ifdef CONFIG_HIGHMEM diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7bb5ce02b9b5..7222100b0631 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -237,12 +237,7 @@ static inline void poison_init_mem(void *s, size_t count) *p++ = 0xe7fddef0; } -/* - * mem_init() marks the free areas in the mem_map and tells us how much - * memory is free. This is done after various parts of the system have - * claimed their memory after the kernel image. - */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { #ifdef CONFIG_ARM_LPAE swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE); @@ -253,9 +248,6 @@ void __init mem_init(void) memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. @@ -271,6 +263,17 @@ void __init mem_init(void) #endif } +/* + * mem_init() marks the free areas in the mem_map and tells us how much + * memory is free. This is done after various parts of the system have + * claimed their memory after the kernel image. + */ +void __init mem_init(void) +{ + /* this will put all unused low memory onto the freelists */ + memblock_free_all(); +} + #ifdef CONFIG_STRICT_KERNEL_RWX struct section_perm { const char *name; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 53a0b105890b..2312e3812043 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -357,12 +357,7 @@ void __init bootmem_init(void) memblock_dump_all(); } -/* - * mem_init() marks the free areas in the mem_map and tells us how much memory - * is free. This is done after various parts of the system have claimed their - * memory after the kernel image. - */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { unsigned int flags = SWIOTLB_VERBOSE; bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); @@ -386,9 +381,6 @@ void __init mem_init(void) swiotlb_init(swiotlb, flags); swiotlb_update_mem_attributes(); - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. @@ -414,6 +406,17 @@ void __init mem_init(void) } } +/* + * mem_init() marks the free areas in the mem_map and tells us how much memory + * is free. This is done after various parts of the system have claimed their + * memory after the kernel image. + */ +void __init mem_init(void) +{ + /* this will put all unused low memory onto the freelists */ + memblock_free_all(); +} + void free_initmem(void) { void *lm_init_begin = lm_alias(__init_begin); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 075177e817ac..eec38e7735dd 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -425,7 +425,7 @@ void __init paging_init(void) static struct kcore_list kcore_kseg0; #endif -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE @@ -435,7 +435,6 @@ void __init mem_init(void) maar_init(); setup_zero_pages(); /* Setup zeroed pages. */ - memblock_free_all(); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) @@ -446,13 +445,17 @@ void __init mem_init(void) #endif } #else /* CONFIG_NUMA */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { setup_zero_pages(); /* This comes from node 0 */ - memblock_free_all(); } #endif /* !CONFIG_NUMA */ +void __init mem_init(void) +{ + memblock_free_all(); +} + void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1bc94bca9944..68efdaf14e58 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -273,7 +273,7 @@ void __init paging_init(void) mark_nonram_nosave(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* * book3s is limited to 16 page sizes due to encoding this in @@ -295,8 +295,6 @@ void __init mem_init(void) kasan_late_init(); - memblock_free_all(); - #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up @@ -329,6 +327,11 @@ void __init mem_init(void) #endif /* CONFIG_PPC32 */ } +void __init mem_init(void) +{ + memblock_free_all(); +} + void free_initmem(void) { ppc_md.progress = ppc_printk_progress; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index ac6d41e86243..9efadabf6be1 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -171,7 +171,7 @@ static void __init print_vm_layout(void) static void print_vm_layout(void) { } #endif /* CONFIG_DEBUG_VM */ -void __init mem_init(void) +void __init arch_mm_preinit(void) { bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit); #ifdef CONFIG_FLATMEM @@ -192,11 +192,15 @@ void __init mem_init(void) } swiotlb_init(swiotlb, SWIOTLB_VERBOSE); - memblock_free_all(); print_vm_layout(); } +void __init mem_init(void) +{ + memblock_free_all(); +} + /* Limit the memory size via mem. */ static phys_addr_t memory_limit; #ifdef CONFIG_XIP_KERNEL diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 4bd6f316d71f..e771b7458d8b 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -154,7 +154,7 @@ static void pv_init(void) swiotlb_update_mem_attributes(); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); @@ -163,7 +163,10 @@ void __init mem_init(void) kfence_split_mapping(); setup_zero_pages(); /* Setup zeroed pages. */ +} +void __init mem_init(void) +{ /* this will put all low memory onto the freelists */ memblock_free_all(); } diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 043e9b6fadd0..e16c32c5728f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,7 +232,7 @@ static void __init taint_real_pages(void) } } -void __init mem_init(void) +void __init arch_mm_preinit(void) { int i; @@ -262,7 +262,10 @@ void __init mem_init(void) memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); +} +void __init mem_init(void) +{ memblock_free_all(); } diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index befed230aac2..cce387438e60 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,7 @@ int kmalloc_ok = 0; /* Used during early boot */ static unsigned long brk_end; -void __init mem_init(void) +void __init arch_mm_preinit(void) { /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); @@ -66,10 +66,13 @@ void __init mem_init(void) map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); memblock_free((void *)brk_end, uml_reserved - brk_end); uml_reserved = brk_end; + max_pfn = max_low_pfn; +} +void __init mem_init(void) +{ /* this will put all low memory onto the freelists */ memblock_free_all(); - max_pfn = max_low_pfn; kmalloc_ok = 1; } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 9ee8ec2bc5d1..16664c5464b5 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -691,13 +691,17 @@ static void __init test_wp_bit(void) panic("Linux doesn't support CPUs with broken WP."); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif +} + +void __init mem_init(void) +{ /* this will put all low memory onto the freelists */ memblock_free_all(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6e8e4ef5312a..a88f7db8089e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1348,10 +1348,13 @@ static void __init preallocate_vmalloc_pages(void) panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); } -void __init mem_init(void) +void __init arch_mm_preinit(void) { pci_iommu_alloc(); +} +void __init mem_init(void) +{ /* clear_bss() already clear the empty_zero_page */ /* this will put all memory onto the freelists */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 6c519a5098d4..c417e5634a58 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -43,6 +43,7 @@ struct folio_batch; extern int sysctl_page_lock_unfairness; +void arch_mm_preinit(void); void mm_core_init(void); void init_mm_internals(void); diff --git a/mm/mm_init.c b/mm/mm_init.c index bd7071c32a44..6844de516a50 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2734,11 +2734,16 @@ static void __init mem_init_print_info(void) ); } +void __init __weak arch_mm_preinit(void) +{ +} + /* * Set up kernel memory allocators */ void __init mm_core_init(void) { + arch_mm_preinit(); hugetlb_bootmem_alloc(); /* Initializations relying on SMP setup */ From 8afa901c147a41f92e83943cddf154bbb7995ee6 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Thu, 13 Mar 2025 15:50:03 +0200 Subject: [PATCH 345/431] arch, mm: make releasing of memory to page allocator more explicit The point where the memory is released from memblock to the buddy allocator is hidden inside arch-specific mem_init()s and the call to memblock_free_all() is needlessly duplicated in every artiste cure and after introduction of arch_mm_preinit() hook, mem_init() implementation on many architecture only contains the call to memblock_free_all(). Pull memblock_free_all() call into mm_core_init() and drop mem_init() on relevant architectures to make it more explicit where the free memory is released from memblock to the buddy allocator and to reduce code duplication in architecture specific code. Link: https://lkml.kernel.org/r/20250313135003.836600-14-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Dave Hansen [x86] Acked-by: Geert Uytterhoeven [m68k] Tested-by: Mark Brown Cc: Alexander Gordeev Cc: Andreas Larsson Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Borislav Betkov Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Guo Ren (csky) Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiaxun Yang Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Madhavan Srinivasan Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russel King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleinxer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/alpha/mm/init.c | 6 ------ arch/arc/mm/init.c | 11 ----------- arch/arm/mm/init.c | 11 ----------- arch/arm64/mm/init.c | 11 ----------- arch/csky/mm/init.c | 5 ----- arch/hexagon/mm/init.c | 18 ------------------ arch/loongarch/kernel/numa.c | 5 ----- arch/loongarch/mm/init.c | 5 ----- arch/m68k/mm/init.c | 2 -- arch/microblaze/mm/init.c | 3 --- arch/mips/mm/init.c | 5 ----- arch/nios2/mm/init.c | 6 ------ arch/openrisc/mm/init.c | 3 --- arch/parisc/mm/init.c | 2 -- arch/powerpc/mm/mem.c | 5 ----- arch/riscv/mm/init.c | 5 ----- arch/s390/mm/init.c | 6 ------ arch/sh/mm/init.c | 2 -- arch/sparc/mm/init_32.c | 5 ----- arch/sparc/mm/init_64.c | 2 -- arch/um/kernel/mem.c | 2 -- arch/x86/mm/init_32.c | 3 --- arch/x86/mm/init_64.c | 2 -- arch/xtensa/mm/init.c | 9 --------- include/linux/memblock.h | 1 - mm/internal.h | 3 ++- mm/mm_init.c | 5 +++++ 27 files changed, 7 insertions(+), 136 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3ab2d2f3c917..2d491b8cdab9 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -273,12 +273,6 @@ srm_paging_stop (void) } #endif -void __init -mem_init(void) -{ - memblock_free_all(); -} - static const pgprot_t protection_map[16] = { [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR), diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 90715b4a0bfa..a73cc94f806e 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -169,17 +169,6 @@ void __init arch_mm_preinit(void) BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE); } -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - memblock_free_all(); -} - #ifdef CONFIG_HIGHMEM int pfn_valid(unsigned long pfn) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7222100b0631..54bdca025c9f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -263,17 +263,6 @@ void __init arch_mm_preinit(void) #endif } -/* - * mem_init() marks the free areas in the mem_map and tells us how much - * memory is free. This is done after various parts of the system have - * claimed their memory after the kernel image. - */ -void __init mem_init(void) -{ - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); -} - #ifdef CONFIG_STRICT_KERNEL_RWX struct section_perm { const char *name; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 2312e3812043..4b966d5709d2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -406,17 +406,6 @@ void __init arch_mm_preinit(void) } } -/* - * mem_init() marks the free areas in the mem_map and tells us how much memory - * is free. This is done after various parts of the system have claimed their - * memory after the kernel image. - */ -void __init mem_init(void) -{ - /* this will put all unused low memory onto the freelists */ - memblock_free_all(); -} - void free_initmem(void) { void *lm_init_begin = lm_alias(__init_begin); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 3914c2b873da..573da66b2543 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -42,11 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -void __init mem_init(void) -{ - memblock_free_all(); -} - void free_initmem(void) { free_initmem_default(-1); diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index d412c2314509..34eb9d424b96 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -43,24 +43,6 @@ DEFINE_SPINLOCK(kmap_gen_lock); /* checkpatch says don't init this to 0. */ unsigned long long kmap_generation; -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Fixes up more stuff for HIGHMEM - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - /* No idea where this is actually declared. Seems to evade LXR. */ - memblock_free_all(); - - /* - * To-Do: someone somewhere should wipe out the bootmem map - * after we're done? - */ -} - void sync_icache_dcache(pte_t pte) { unsigned long addr; diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 8eb489725b1a..30a72fd528c0 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -387,11 +387,6 @@ void __init paging_init(void) free_area_init(zones_size); } -void __init mem_init(void) -{ - memblock_free_all(); -} - int pcibus_to_node(struct pci_bus *bus) { return dev_to_node(&bus->dev); diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 6affa3609188..fdb7f73ad160 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -75,11 +75,6 @@ void __init paging_init(void) free_area_init(max_zone_pfns); } - -void __init mem_init(void) -{ - memblock_free_all(); -} #endif /* !CONFIG_NUMA */ void __ref free_initmem(void) diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 8b11d0d545aa..488411af1b3f 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -121,7 +121,5 @@ static inline void init_pointer_tables(void) void __init mem_init(void) { - /* this will put all memory onto the freelists */ - memblock_free_all(); init_pointer_tables(); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 3e664e0efc33..65f0d1fb8a2a 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -107,9 +107,6 @@ void __init setup_memory(void) void __init mem_init(void) { - /* this will put all memory onto the freelists */ - memblock_free_all(); - mem_init_done = 1; } diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index eec38e7735dd..a673d3d68254 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -451,11 +451,6 @@ void __init arch_mm_preinit(void) } #endif /* !CONFIG_NUMA */ -void __init mem_init(void) -{ - memblock_free_all(); -} - void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 4ba8dfa0d238..94efa3de3933 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -60,12 +60,6 @@ void __init paging_init(void) (unsigned long)empty_zero_page + PAGE_SIZE); } -void __init mem_init(void) -{ - /* this will put all memory onto the freelists */ - memblock_free_all(); -} - void __init mmu_init(void) { flush_tlb_all(); diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 72c5952607ac..be1c2eb8bb94 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -196,9 +196,6 @@ void __init mem_init(void) /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - memblock_free_all(); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 4fbe354dc9b4..14270715d754 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -562,8 +562,6 @@ void __init mem_init(void) BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); #endif - memblock_free_all(); - #ifdef CONFIG_PA11 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 68efdaf14e58..d8fe11b64259 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -327,11 +327,6 @@ void __init arch_mm_preinit(void) #endif /* CONFIG_PPC32 */ } -void __init mem_init(void) -{ - memblock_free_all(); -} - void free_initmem(void) { ppc_md.progress = ppc_printk_progress; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 9efadabf6be1..79b649f6de72 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -196,11 +196,6 @@ void __init arch_mm_preinit(void) print_vm_layout(); } -void __init mem_init(void) -{ - memblock_free_all(); -} - /* Limit the memory size via mem. */ static phys_addr_t memory_limit; #ifdef CONFIG_XIP_KERNEL diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index e771b7458d8b..5b7b7b281334 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -165,12 +165,6 @@ void __init arch_mm_preinit(void) setup_zero_pages(); /* Setup zeroed pages. */ } -void __init mem_init(void) -{ - /* this will put all low memory onto the freelists */ - memblock_free_all(); -} - unsigned long memory_block_size_bytes(void) { /* diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 6d459ffba4bc..99e302eeeec1 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -330,8 +330,6 @@ unsigned int mem_init_done = 0; void __init mem_init(void) { - memblock_free_all(); - /* Set this up early, so we can take care of the zero page */ cpu_cache_init(); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index e16c32c5728f..fdc93dd12c3e 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -264,11 +264,6 @@ void __init arch_mm_preinit(void) taint_real_pages(); } -void __init mem_init(void) -{ - memblock_free_all(); -} - void sparc_flush_page_to_ram(struct page *page) { unsigned long vaddr = (unsigned long)page_address(page); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 34d46adb9571..760818950464 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2505,8 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - memblock_free_all(); - /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index cce387438e60..379f33a1babf 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -71,8 +71,6 @@ void __init arch_mm_preinit(void) void __init mem_init(void) { - /* this will put all low memory onto the freelists */ - memblock_free_all(); kmalloc_ok = 1; } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 16664c5464b5..95b2758b4e4d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -702,9 +702,6 @@ void __init arch_mm_preinit(void) void __init mem_init(void) { - /* this will put all low memory onto the freelists */ - memblock_free_all(); - after_bootmem = 1; x86_init.hyper.init_after_bootmem(); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a88f7db8089e..d67f15386ea2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1357,8 +1357,6 @@ void __init mem_init(void) { /* clear_bss() already clear the empty_zero_page */ - /* this will put all memory onto the freelists */ - memblock_free_all(); after_bootmem = 1; x86_init.hyper.init_after_bootmem(); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 47ecbe28263e..cc52733a0649 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -129,15 +129,6 @@ void __init zones_init(void) print_vm_layout(); } -/* - * Initialize memory pages. - */ - -void __init mem_init(void) -{ - memblock_free_all(); -} - static void __init parse_memmap_one(char *p) { char *oldp; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e79eb6ac516f..ef5a1ecc6e59 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -133,7 +133,6 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size); -void memblock_free_all(void); void memblock_free(void *ptr, size_t size); void reset_all_zones_managed_pages(void); diff --git a/mm/internal.h b/mm/internal.h index 558c8e2a3d94..2f52a65272c1 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1475,7 +1475,8 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, } extern bool mirrored_kernelcore; -extern bool memblock_has_mirror(void); +bool memblock_has_mirror(void); +void memblock_free_all(void); static __always_inline void vma_set_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, diff --git a/mm/mm_init.c b/mm/mm_init.c index 6844de516a50..c82b0162f1cb 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2738,6 +2738,10 @@ void __init __weak arch_mm_preinit(void) { } +void __init __weak mem_init(void) +{ +} + /* * Set up kernel memory allocators */ @@ -2761,6 +2765,7 @@ void __init mm_core_init(void) report_meminit(); kmsan_init_shadow(); stack_depot_early_init(); + memblock_free_all(); mem_init(); kmem_cache_init(); /* From 4c9ea539ad59ec60676930dacee02b7adde2e0c0 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:58:56 -0800 Subject: [PATCH 346/431] mm/damon/sysfs: validate user inputs from damon_sysfs_commit_input() Patch series "mm/damon/sysfs: commit parameters online via damon_call()". Due to the lack of ways to synchronously access DAMON internal data, DAMON sysfs interface is using damon_callback hooks with its own synchronization mechanism. The mechanism is built on top of damon_callback hooks in an ineifficient and complicated way. Patch series "mm/damon: replace most damon_callback usages in sysfs with new core functions", which starts with commit e035320fd38e ("mm/damon/sysfs-schemes: remove unnecessary schemes existence check in damon_sysfs_schemes_clear_regions()") introduced two new DAMON kernel API functions that providing the synchronous access, replaced most damon_callback hooks usage in DAMON sysfs interface, and cleaned up unnecessary code. Continue the replacement and cleanup works. Update the last DAMON sysfs' usage of its own synchronization mechanism, namely online DAMON parameters commit, to use damon_call() instead of the damon_callback hooks and the hard-to-maintain core-external synchronization mechanism. Then remove the no more be used code due to the change, and more unused code that just not yet cleaned up. The first four patches (patches 1-4) of this series makes DAMON sysfs interface's online parameters commit to use damon_call(). Then, following three patches (patches 5-7) remove the DAMON sysfs interface's own synchronization mechanism and its usages, which is no more be used by anyone due to the first four patches. Finally, six patches (8-13) do more cleanup of outdated comment and unused code. This patch (of 13): Online DAMON parameters commit via DAMON sysfs interface can make kdamond stop. This behavior was made because it can make the implementation simpler. The implementation tries committing the parameter without validation. If it finds something wrong in the middle of the parameters update, it returns error without reverting the partially committed parameters back. It is safe though, since it immediately breaks kdamond main loop in the case of the error return. Users can make the wrong parameters by mistake, though. Stopping kdamond in the case is not very useful behavior. Also this makes it difficult to utilize damon_call() instead of damon_callback hook for online parameters update, since damon_call() cannot immediately break kdamond main loop in the middle. Validate the input parameters and return error when it fails before starting parameters updates. In case of mistakenly wrong parameters, kdamond can continue running with the old and valid parameters. Link: https://lkml.kernel.org/r/20250306175908.66300-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250306175908.66300-2-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index ccd435d234b9..87e4c6e3614e 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1449,11 +1449,11 @@ static struct damon_ctx *damon_sysfs_build_ctx( * damon_sysfs_commit_input() - Commit user inputs to a running kdamond. * @kdamond: The kobject wrapper for the associated kdamond. * - * If the sysfs input is wrong, the kdamond will be terminated. + * Returns error if the sysfs input is wrong. */ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) { - struct damon_ctx *param_ctx; + struct damon_ctx *param_ctx, *test_ctx; int err; if (!damon_sysfs_kdamond_running(kdamond)) @@ -1465,7 +1465,15 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); if (IS_ERR(param_ctx)) return PTR_ERR(param_ctx); + test_ctx = damon_new_ctx(); + err = damon_commit_ctx(test_ctx, param_ctx); + if (err) { + damon_sysfs_destroy_targets(test_ctx); + damon_destroy_ctx(test_ctx); + goto out; + } err = damon_commit_ctx(kdamond->damon_ctx, param_ctx); +out: damon_sysfs_destroy_targets(param_ctx); damon_destroy_ctx(param_ctx); return err; From bf74bdfd2edb60ab44b48a6ef705207dc889dc3d Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:58:57 -0800 Subject: [PATCH 347/431] mm/damon/core: invoke kdamond_call() after merging is done if possible kdamond_call() callers may iterate the regions, so better to call it when the number of regions is as small as possible. It is when kdamond_merge_regions() is finished. Invoke it on the point. This change is also aimed to make future changes for carrying online parameters commit with damon_call() easier. The commit operation should be able to make sequence between other aggregation interval based operations including regioins merging and aggregation reset. Placing damon_call() invocation after the regions merging makes the sequence handling simpler. Link: https://lkml.kernel.org/r/20250306175908.66300-3-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index ebbb22840435..b9a9db1a90b4 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2421,7 +2421,6 @@ static int kdamond_fn(void *data) if (ctx->callback.after_sampling && ctx->callback.after_sampling(ctx)) break; - kdamond_call(ctx, false); kdamond_usleep(sample_interval); ctx->passed_sample_intervals++; @@ -2439,9 +2438,10 @@ static int kdamond_fn(void *data) } /* - * do kdamond_apply_schemes() after kdamond_merge_regions() if - * possible, to reduce overhead + * do kdamond_call() and kdamond_apply_schemes() after + * kdamond_merge_regions() if possible, to reduce overhead */ + kdamond_call(ctx, false); if (!list_empty(&ctx->schemes)) kdamond_apply_schemes(ctx); else From 258d941e5877f5fd40d5e636540c0a00458b8825 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:58:58 -0800 Subject: [PATCH 348/431] mm/damon/core: make damon_set_attrs() be safe to be called from damon_call() Currently all DAMON kernel API callers do online DAMON parameters commit from damon_callback->after_aggregation because only those are safe place to call the DAMON monitoring attributes update function, namely damon_set_attrs(). Because damon_callback hooks provide no synchronization, the callers work in asynchronous ways or implement their own inefficient and complicated synchronization mechanisms. It also means online DAMON parameters commit can take up to one aggregation interval. On large systems having long aggregation intervals, that can be too slow. The synchronization can be done in more efficient and simple way while removing the latency constraint if it can be done using damon_call(). The fact that damon_call() can be executed in the middle of the aggregation makes damon_set_attrs() unsafe to be called from it, though. Two real problems can occur in the case. First, converting the not yet completely aggregated nr_accesses for new user-set intervals can arguably degrade the accuracy or at least make the logic complicated. Second, kdamond_reset_aggregated() will not be called after the monitoring results update, so next aggregation starts from unclean state. This can result in inconsistent and unexpected nr_accesses_bp. Make it safe as follows. Catch the middle-of-the-aggregation case from damon_set_attrs() by checking the passed_sample_intervals and next_aggregationsis of the context. And pass the information to nr_accesses conversion logic. The logic works as before if it is not the case (called after the current aggregation is completed). If it is the case (committing parameters in the middle of the aggregation), it drops the nr_accesses information that so far aggregated, and make the status same to the beginning of this aggregation, but as if the last aggregation was started with the updated sampling/aggregation intervals. The middle-of-aggregastion check introduce yet another edge case, though. This happens because kdamond_tune_intervals() can also call damon_set_attrs() with the middle-of-aggregation check. Consider damon_call() for parameters commit and kdamond_tune_intervals() are called in same iteration of kdamond main loop. Because kdamond_tune_interval() is called for aggregation intervals, it should be the end of the aggregation. The first damon_set_attrs() call from kdamond_call() understands it is the end of the aggregation and correctly handle it. But, because the damon_set_attrs() updated next_aggregation_sis of the context. Hence, the second damon_set_attrs() invocation from kdamond_tune_interval() believes it is called in the middle of the aggregation. It therefore resets aggregated information so far. After that, kdamond_reset_interval() is called and double-reset the aggregated information. Avoid this case, too, by setting the next_aggregation_sis before kdamond_tune_intervals() is invoked. Link: https://lkml.kernel.org/r/20250306175908.66300-4-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 56 +++++++++++++++++++++++++++++-------- mm/damon/tests/core-kunit.h | 6 ++-- 2 files changed, 48 insertions(+), 14 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index b9a9db1a90b4..2be4099e0666 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -603,11 +603,25 @@ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, } static void damon_update_monitoring_result(struct damon_region *r, - struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) + struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, + bool aggregating) { - r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, - old_attrs, new_attrs); - r->nr_accesses_bp = r->nr_accesses * 10000; + if (!aggregating) { + r->nr_accesses = damon_nr_accesses_for_new_attrs( + r->nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->nr_accesses * 10000; + } else { + /* + * if this is called in the middle of the aggregation, reset + * the aggregations we made so far for this aggregation + * interval. In other words, make the status like + * kdamond_reset_aggregated() is called. + */ + r->last_nr_accesses = damon_nr_accesses_for_new_attrs( + r->last_nr_accesses, old_attrs, new_attrs); + r->nr_accesses_bp = r->last_nr_accesses * 10000; + r->nr_accesses = 0; + } r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); } @@ -620,7 +634,7 @@ static void damon_update_monitoring_result(struct damon_region *r, * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. */ static void damon_update_monitoring_results(struct damon_ctx *ctx, - struct damon_attrs *new_attrs) + struct damon_attrs *new_attrs, bool aggregating) { struct damon_attrs *old_attrs = &ctx->attrs; struct damon_target *t; @@ -635,7 +649,7 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx, damon_for_each_target(t, ctx) damon_for_each_region(r, t) damon_update_monitoring_result( - r, old_attrs, new_attrs); + r, old_attrs, new_attrs, aggregating); } /* @@ -662,10 +676,10 @@ static bool damon_valid_intervals_goal(struct damon_attrs *attrs) * @ctx: monitoring context * @attrs: monitoring attributes * - * This function should be called while the kdamond is not running, or an - * access check results aggregation is not ongoing (e.g., from - * &struct damon_callback->after_aggregation or - * &struct damon_callback->after_wmarks_check callbacks). + * This function should be called while the kdamond is not running, an access + * check results aggregation is not ongoing (e.g., from &struct + * damon_callback->after_aggregation or &struct + * damon_callback->after_wmarks_check callbacks), or from damon_call(). * * Every time interval is in micro-seconds. * @@ -676,6 +690,8 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) unsigned long sample_interval = attrs->sample_interval ? attrs->sample_interval : 1; struct damos *s; + bool aggregating = ctx->passed_sample_intervals < + ctx->next_aggregation_sis; if (!damon_valid_intervals_goal(attrs)) return -EINVAL; @@ -696,7 +712,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) ctx->next_ops_update_sis = ctx->passed_sample_intervals + attrs->ops_update_interval / sample_interval; - damon_update_monitoring_results(ctx, attrs); + damon_update_monitoring_results(ctx, attrs, aggregating); ctx->attrs = *attrs; damon_for_each_scheme(s, ctx) @@ -2453,6 +2469,24 @@ static int kdamond_fn(void *data) if (ctx->attrs.intervals_goal.aggrs && ctx->passed_sample_intervals >= ctx->next_intervals_tune_sis) { + /* + * ctx->next_aggregation_sis might be updated + * from kdamond_call(). In the case, + * damon_set_attrs() which will be called from + * kdamond_tune_interval() may wrongly think + * this is in the middle of the current + * aggregation, and make aggregation + * information reset for all regions. Then, + * following kdamond_reset_aggregated() call + * will make the region information invalid, + * particularly for ->nr_accesses_bp. + * + * Reset ->next_aggregation_sis to avoid that. + * It will anyway correctly updated after this + * if caluse. + */ + ctx->next_aggregation_sis = + next_aggregation_sis; ctx->next_intervals_tune_sis += ctx->attrs.aggr_samples * ctx->attrs.intervals_goal.aggrs; diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index 532c6a6f21f9..be0fea9ee5fc 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -348,19 +348,19 @@ static void damon_test_update_monitoring_result(struct kunit *test) new_attrs = (struct damon_attrs){ .sample_interval = 100, .aggr_interval = 10000,}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 1000}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 2); new_attrs = (struct damon_attrs){ .sample_interval = 1, .aggr_interval = 100}; - damon_update_monitoring_result(r, &old_attrs, &new_attrs); + damon_update_monitoring_result(r, &old_attrs, &new_attrs, false); KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); KUNIT_EXPECT_EQ(test, r->age, 20); From 3301f1861d34f53911a30a8f5f41b9141bd8ed39 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:58:59 -0800 Subject: [PATCH 349/431] mm/damon/sysfs: handle commit command using damon_call() DAMON sysfs interface is using damon_callback->after_aggregation hook with its self-implemented synchronization mechanism for the hook. It is inefficient, complicated, and take up to one aggregation interval to complete, which can be long on some configs. Use damon_call() instead. It provides a synchronization mechanism that built inside DAMON's core layer, so more efficient than DAMON sysfs interface's own one. Also it isolates the implementation inside the core layer, and hence it makes the code easier to maintain. Finally, it takes up to one sampling interval, which is much shorter than the aggregation interval in common setups. Link: https://lkml.kernel.org/r/20250306175908.66300-5-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 87e4c6e3614e..c55a2cee4b74 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1451,8 +1451,9 @@ static struct damon_ctx *damon_sysfs_build_ctx( * * Returns error if the sysfs input is wrong. */ -static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_commit_input(void *data) { + struct damon_sysfs_kdamond *kdamond = data; struct damon_ctx *param_ctx, *test_ctx; int err; @@ -1550,11 +1551,6 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, if (!kdamond || kdamond->damon_ctx != c) goto out; switch (damon_sysfs_cmd_request.cmd) { - case DAMON_SYSFS_CMD_COMMIT: - if (!after_aggregation) - goto out; - err = damon_sysfs_commit_input(kdamond); - break; default: break; } @@ -1712,11 +1708,7 @@ static int damon_sysfs_update_schemes_tried_regions( * @cmd: The command to handle. * @kdamond: The kobject wrapper for the associated kdamond. * - * This function handles a DAMON sysfs command for a kdamond. For commands - * that need to access running DAMON context-internal data, it requests - * handling of the command to the DAMON callback - * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled, - * or the context is completed. + * This function handles a DAMON sysfs command for a kdamond. * * Return: 0 on success, negative error code otherwise. */ @@ -1730,6 +1722,9 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_COMMIT: + return damon_sysfs_damon_call( + damon_sysfs_commit_input, kdamond); case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: return damon_sysfs_damon_call( damon_sysfs_commit_schemes_quota_goals, From 8b40db0edf3c4e02369509ace9c29f558f7b5cba Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:00 -0800 Subject: [PATCH 350/431] mm/damon/sysfs: remove damon_sysfs_cmd_request code from damon_sysfs_handle_cmd() damon_sysfs_handle_cmd() handles user requests that it can directly handle on its own. For requests that need to be handled from damon_callback hooks, it uses DAMON sysfs interface's own synchronous damon_callback hooks management mechanism, namely damon_sysfs_cmd_request. Now all user requests are handled without damon_callback hooks, so damon_sysfs_cmd_request client code in damon_sysfs_andle_cmd() does nothing in real. Remove the unnecessary code. Link: https://lkml.kernel.org/r/20250306175908.66300-6-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index c55a2cee4b74..166161f12c26 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1715,8 +1715,6 @@ static int damon_sysfs_update_schemes_tried_regions( static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, struct damon_sysfs_kdamond *kdamond) { - bool need_wait = true; - switch (cmd) { case DAMON_SYSFS_CMD_ON: return damon_sysfs_turn_damon_on(kdamond); @@ -1747,38 +1745,8 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_damon_call( damon_sysfs_upd_tuned_intervals, kdamond); default: - break; - } - - /* Pass the command to DAMON callback for safe DAMON context access */ - if (damon_sysfs_cmd_request.kdamond) - return -EBUSY; - if (!damon_sysfs_kdamond_running(kdamond)) return -EINVAL; - damon_sysfs_cmd_request.cmd = cmd; - damon_sysfs_cmd_request.kdamond = kdamond; - - /* - * wait until damon_sysfs_cmd_request_callback() handles the request - * from kdamond context - */ - mutex_unlock(&damon_sysfs_lock); - while (need_wait) { - schedule_timeout_idle(msecs_to_jiffies(100)); - if (!mutex_trylock(&damon_sysfs_lock)) - continue; - if (!damon_sysfs_cmd_request.kdamond) { - /* damon_sysfs_cmd_request_callback() handled */ - need_wait = false; - } else if (!damon_sysfs_kdamond_running(kdamond)) { - /* kdamond has already finished */ - need_wait = false; - damon_sysfs_cmd_request.kdamond = NULL; - } - mutex_unlock(&damon_sysfs_lock); } - mutex_lock(&damon_sysfs_lock); - return 0; } static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, From 311f34ff85d2a0fb36b3566ef45dc21ee5089476 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:01 -0800 Subject: [PATCH 351/431] mm/damon/sysfs: remove damon_sysfs_cmd_request_callback() and its callers damon_sysfs_cmd_request_callback() is the damon_callback hook functions that were used to handle user requests that need to read and/or write DAMON internal data. All the usages are now updated to use damon_call() or damos_walk(), though. Remove it and its callers. Link: https://lkml.kernel.org/r/20250306175908.66300-7-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 62 ------------------------------------------------ 1 file changed, 62 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 166161f12c26..e5bcf019086f 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1529,65 +1529,6 @@ static int damon_sysfs_upd_tuned_intervals(void *data) return 0; } -/* - * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. - * @c: The DAMON context of the callback. - * @active: Whether @c is not deactivated due to watermarks. - * @after_aggr: Whether this is called from after_aggregation() callback. - * - * This function is periodically called back from the kdamond thread for @c. - * Then, it checks if there is a waiting DAMON sysfs request and handles it. - */ -static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, - bool after_aggregation) -{ - struct damon_sysfs_kdamond *kdamond; - int err = 0; - - /* avoid deadlock due to concurrent state_store('off') */ - if (!mutex_trylock(&damon_sysfs_lock)) - return 0; - kdamond = damon_sysfs_cmd_request.kdamond; - if (!kdamond || kdamond->damon_ctx != c) - goto out; - switch (damon_sysfs_cmd_request.cmd) { - default: - break; - } - /* Mark the request as invalid now. */ - damon_sysfs_cmd_request.kdamond = NULL; -out: - mutex_unlock(&damon_sysfs_lock); - return err; -} - -static int damon_sysfs_after_wmarks_check(struct damon_ctx *c) -{ - /* - * after_wmarks_check() is called back while the context is deactivated - * by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, false, false); -} - -static int damon_sysfs_after_sampling(struct damon_ctx *c) -{ - /* - * after_sampling() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, false); -} - -static int damon_sysfs_after_aggregation(struct damon_ctx *c) -{ - /* - * after_aggregation() is called back only while the context is not - * deactivated by watermarks. - */ - return damon_sysfs_cmd_request_callback(c, true, true); -} - static struct damon_ctx *damon_sysfs_build_ctx( struct damon_sysfs_context *sys_ctx) { @@ -1603,9 +1544,6 @@ static struct damon_ctx *damon_sysfs_build_ctx( return ERR_PTR(err); } - ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check; - ctx->callback.after_sampling = damon_sysfs_after_sampling; - ctx->callback.after_aggregation = damon_sysfs_after_aggregation; ctx->callback.before_terminate = damon_sysfs_before_terminate; return ctx; } From d682f5f643420aa5b06d34c679f3fb3fd60fbe14 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:02 -0800 Subject: [PATCH 352/431] mm/damon/sysfs: remove damon_sysfs_cmd_request and its readers damon_sysfs_cmd_request is DAMON sysfs interface's own synchronization mechanism for accessing DAMON internal data via damon_callback hooks. All the users are now migrated to damon_call() and damos_walk(), so nobody really uses it. No one writes to the data structure but reading code is still remained. Remove the reading code and the entire data structure. Link: https://lkml.kernel.org/r/20250306175908.66300-8-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index e5bcf019086f..1af6aff35d84 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1238,25 +1238,6 @@ static const char * const damon_sysfs_cmd_strs[] = { "update_tuned_intervals", }; -/* - * struct damon_sysfs_cmd_request - A request to the DAMON callback. - * @cmd: The command that needs to be handled by the callback. - * @kdamond: The kobject wrapper that associated to the kdamond thread. - * - * This structure represents a sysfs command request that need to access some - * DAMON context-internal data. Because DAMON context-internal data can be - * safely accessed from DAMON callbacks without additional synchronization, the - * request will be handled by the DAMON callback. None-``NULL`` @kdamond means - * the request is valid. - */ -struct damon_sysfs_cmd_request { - enum damon_sysfs_cmd cmd; - struct damon_sysfs_kdamond *kdamond; -}; - -/* Current DAMON callback request. Protected by damon_sysfs_lock. */ -static struct damon_sysfs_cmd_request damon_sysfs_cmd_request; - static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -1555,8 +1536,6 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) if (damon_sysfs_kdamond_running(kdamond)) return -EBUSY; - if (damon_sysfs_cmd_request.kdamond == kdamond) - return -EBUSY; /* TODO: support multiple contexts per kdamond */ if (kdamond->contexts->nr != 1) return -EINVAL; @@ -1796,8 +1775,7 @@ static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds, int i; for (i = 0; i < nr_kdamonds; i++) { - if (damon_sysfs_kdamond_running(kdamonds[i]) || - damon_sysfs_cmd_request.kdamond == kdamonds[i]) + if (damon_sysfs_kdamond_running(kdamonds[i])) return true; } From 52f7c351fc3e0bc372b5e3da21244f7e068ba9ec Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:03 -0800 Subject: [PATCH 353/431] mm/damon/sysfs-schemes: remove obsolete comment for damon_sysfs_schemes_clear_regions() The comment on damon_sysfs_schemes_clear_regions() function is obsolete, since it has updated to directly called from DAMON sysfs interface code. Remove the outdated comment. Link: https://lkml.kernel.org/r/20250306175908.66300-9-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 1895d2d2c295..985cfc750a90 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -2341,7 +2341,6 @@ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, } } -/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes) { From 53058c762afff714ceaec14b87cf8fdce3b6d33e Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:04 -0800 Subject: [PATCH 354/431] mm/damon: remove damon_callback->private The field was added to let users keep their personal data to use inside of the callbacks. However, no one is actively using that now. Remove it. Link: https://lkml.kernel.org/r/20250306175908.66300-10-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index eed008b64a23..dab4bb0fe39d 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -609,12 +609,10 @@ struct damon_operations { * @after_aggregation: Called after each aggregation. * @before_damos_apply: Called before applying DAMOS action. * @before_terminate: Called before terminating the monitoring. - * @private: User private data. * * The monitoring thread (&damon_ctx.kdamond) calls @before_start and * @before_terminate just before starting and finishing the monitoring, - * respectively. Therefore, those are good places for installing and cleaning - * @private. + * respectively. * * The monitoring thread calls @after_wmarks_check after each DAMON-based * operation schemes' watermarks check. If users need to make changes to the @@ -630,8 +628,6 @@ struct damon_operations { * If any callback returns non-zero, monitoring stops. */ struct damon_callback { - void *private; - int (*before_start)(struct damon_ctx *context); int (*after_wmarks_check)(struct damon_ctx *context); int (*after_sampling)(struct damon_ctx *context); From 07da21855b270c17b2a2d20e644c1419fcaafdd1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:05 -0800 Subject: [PATCH 355/431] mm/damon: remove ->before_start of damon_callback The function pointer field was added to be used as a place to do some initialization works just before DAMON starts working. However, nobody is using it now. Remove it. Link: https://lkml.kernel.org/r/20250306175908.66300-11-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 7 ++----- mm/damon/core.c | 2 -- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index dab4bb0fe39d..043de2408c65 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -603,16 +603,14 @@ struct damon_operations { /** * struct damon_callback - Monitoring events notification callbacks. * - * @before_start: Called before starting the monitoring. * @after_wmarks_check: Called after each schemes' watermarks check. * @after_sampling: Called after each sampling. * @after_aggregation: Called after each aggregation. * @before_damos_apply: Called before applying DAMOS action. * @before_terminate: Called before terminating the monitoring. * - * The monitoring thread (&damon_ctx.kdamond) calls @before_start and - * @before_terminate just before starting and finishing the monitoring, - * respectively. + * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just + * before finishing the monitoring. * * The monitoring thread calls @after_wmarks_check after each DAMON-based * operation schemes' watermarks check. If users need to make changes to the @@ -628,7 +626,6 @@ struct damon_operations { * If any callback returns non-zero, monitoring stops. */ struct damon_callback { - int (*before_start)(struct damon_ctx *context); int (*after_wmarks_check)(struct damon_ctx *context); int (*after_sampling)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); diff --git a/mm/damon/core.c b/mm/damon/core.c index 2be4099e0666..9175a49985d5 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2408,8 +2408,6 @@ static int kdamond_fn(void *data) if (ctx->ops.init) ctx->ops.init(ctx); - if (ctx->callback.before_start && ctx->callback.before_start(ctx)) - goto done; ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, sizeof(*ctx->regions_score_histogram), GFP_KERNEL); if (!ctx->regions_score_histogram) From cedee98f68875605dad644a95a63eae04de250b2 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:06 -0800 Subject: [PATCH 356/431] mm/damon: remove damon_callback->after_sampling The callback was used by DAMON sysfs interface for reading DAMON internal data. But it is no more being used, and damon_call() can do similar works in a better way. Remove it. Link: https://lkml.kernel.org/r/20250306175908.66300-12-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 11 ++++------- mm/damon/core.c | 3 --- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 043de2408c65..5aa277f4c948 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -604,7 +604,6 @@ struct damon_operations { * struct damon_callback - Monitoring events notification callbacks. * * @after_wmarks_check: Called after each schemes' watermarks check. - * @after_sampling: Called after each sampling. * @after_aggregation: Called after each aggregation. * @before_damos_apply: Called before applying DAMOS action. * @before_terminate: Called before terminating the monitoring. @@ -617,17 +616,15 @@ struct damon_operations { * attributes of the monitoring context while it's deactivated due to the * watermarks, this is the good place to do. * - * The monitoring thread calls @after_sampling and @after_aggregation for each - * of the sampling intervals and aggregation intervals, respectively. - * Therefore, users can safely access the monitoring results without additional - * protection. For the reason, users are recommended to use these callback for - * the accesses to the results. + * The monitoring thread calls @after_aggregation for each of the aggregation + * intervals. Therefore, users can safely access the monitoring results + * without additional protection. For the reason, users are recommended to use + * these callback for the accesses to the results. * * If any callback returns non-zero, monitoring stops. */ struct damon_callback { int (*after_wmarks_check)(struct damon_ctx *context); - int (*after_sampling)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); int (*before_damos_apply)(struct damon_ctx *context, struct damon_target *target, diff --git a/mm/damon/core.c b/mm/damon/core.c index 9175a49985d5..812b1c70c723 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2432,9 +2432,6 @@ static int kdamond_fn(void *data) if (ctx->ops.prepare_access_checks) ctx->ops.prepare_access_checks(ctx); - if (ctx->callback.after_sampling && - ctx->callback.after_sampling(ctx)) - break; kdamond_usleep(sample_interval); ctx->passed_sample_intervals++; From 99ce7c9c6d855716356f2f839c53905592fd780b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:07 -0800 Subject: [PATCH 357/431] mm/damon: remove damon_callback->before_damos_apply The hook was introduced to let DAMON kernel API users access DAMOS schemes-eligible regions in a safe way. Now it is no more used by anyone, and the functionality is provided in a better way by damos_walk(). Remove it. Link: https://lkml.kernel.org/r/20250306175908.66300-13-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 5 ----- mm/damon/core.c | 13 ++++--------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 5aa277f4c948..be7b281fb922 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -605,7 +605,6 @@ struct damon_operations { * * @after_wmarks_check: Called after each schemes' watermarks check. * @after_aggregation: Called after each aggregation. - * @before_damos_apply: Called before applying DAMOS action. * @before_terminate: Called before terminating the monitoring. * * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just @@ -626,10 +625,6 @@ struct damon_operations { struct damon_callback { int (*after_wmarks_check)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); - int (*before_damos_apply)(struct damon_ctx *context, - struct damon_target *target, - struct damon_region *region, - struct damos *scheme); void (*before_terminate)(struct damon_ctx *context); }; diff --git a/mm/damon/core.c b/mm/damon/core.c index 812b1c70c723..177716847f4e 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1731,7 +1731,6 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct timespec64 begin, end; unsigned long sz_applied = 0; unsigned long sz_ops_filter_passed = 0; - int err = 0; /* * We plan to support multiple context per kdamond, as DAMON sysfs * implies with 'nr_contexts' file. Nevertheless, only single context @@ -1771,14 +1770,10 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (damos_filter_out(c, t, r, s)) return; ktime_get_coarse_ts64(&begin); - if (c->callback.before_damos_apply) - err = c->callback.before_damos_apply(c, t, r, s); - if (!err) { - trace_damos_before_apply(cidx, sidx, tidx, r, - damon_nr_regions(t), do_trace); - sz_applied = c->ops.apply_scheme(c, t, r, s, - &sz_ops_filter_passed); - } + trace_damos_before_apply(cidx, sidx, tidx, r, + damon_nr_regions(t), do_trace); + sz_applied = c->ops.apply_scheme(c, t, r, s, + &sz_ops_filter_passed); damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - From 105f830fa35c49ada7db785a7f9b70386f193529 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 6 Mar 2025 09:59:08 -0800 Subject: [PATCH 358/431] mm/damon: remove damon_operations->reset_aggregated The operations layer hook was introduced to let operations set do any aggregation data reset if needed. But it is not really be used now. Remove it. Link: https://lkml.kernel.org/r/20250306175908.66300-14-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 7 +------ mm/damon/core.c | 2 -- mm/damon/paddr.c | 1 - mm/damon/vaddr.c | 1 - 4 files changed, 1 insertion(+), 10 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index be7b281fb922..3db4f77261f5 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -542,7 +542,6 @@ enum damon_ops_id { * @update: Update operations-related data structures. * @prepare_access_checks: Prepare next access check of target regions. * @check_accesses: Check the accesses to target regions. - * @reset_aggregated: Reset aggregated accesses monitoring results. * @get_scheme_score: Get the score of a region for a scheme. * @apply_scheme: Apply a DAMON-based operation scheme. * @target_valid: Determine if the target is valid. @@ -554,8 +553,7 @@ enum damon_ops_id { * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting * the monitoring, @update after each &damon_attrs.ops_update_interval, and * @check_accesses, @target_valid and @prepare_access_checks after each - * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after - * each &damon_attrs.aggr_interval. + * &damon_attrs.sample_interval. * * Each &struct damon_operations instance having valid @id can be registered * via damon_register_ops() and selected by damon_select_ops() later. @@ -570,8 +568,6 @@ enum damon_ops_id { * last preparation and update the number of observed accesses of each region. * It should also return max number of observed accesses that made as a result * of its update. The value will be used for regions adjustment threshold. - * @reset_aggregated should reset the access monitoring results that aggregated - * by @check_accesses. * @get_scheme_score should return the priority score of a region for a scheme * as an integer in [0, &DAMOS_MAX_SCORE]. * @apply_scheme is called from @kdamond when a region for user provided @@ -589,7 +585,6 @@ struct damon_operations { void (*update)(struct damon_ctx *context); void (*prepare_access_checks)(struct damon_ctx *context); unsigned int (*check_accesses)(struct damon_ctx *context); - void (*reset_aggregated)(struct damon_ctx *context); int (*get_scheme_score)(struct damon_ctx *context, struct damon_target *t, struct damon_region *r, struct damos *scheme); diff --git a/mm/damon/core.c b/mm/damon/core.c index 177716847f4e..fc1eba3da419 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2490,8 +2490,6 @@ static int kdamond_fn(void *data) kdamond_reset_aggregated(ctx); kdamond_split_regions(ctx); - if (ctx->ops.reset_aggregated) - ctx->ops.reset_aggregated(ctx); } if (ctx->passed_sample_intervals >= next_ops_update_sis) { diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index fba8b3c8ba30..b08847ef9b81 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -621,7 +621,6 @@ static int __init damon_pa_initcall(void) .update = NULL, .prepare_access_checks = damon_pa_prepare_access_checks, .check_accesses = damon_pa_check_accesses, - .reset_aggregated = NULL, .target_valid = NULL, .cleanup = NULL, .apply_scheme = damon_pa_apply_scheme, diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index a6174f725bd7..e6d99106a7f9 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -710,7 +710,6 @@ static int __init damon_va_initcall(void) .update = damon_va_update, .prepare_access_checks = damon_va_prepare_access_checks, .check_accesses = damon_va_check_accesses, - .reset_aggregated = NULL, .target_valid = damon_va_target_valid, .cleanup = NULL, .apply_scheme = damon_va_apply_scheme, From 11e88e9265ec192cff33fc2e43e36c211851b32c Mon Sep 17 00:00:00 2001 From: Dev Jain Date: Thu, 6 Mar 2025 20:13:15 +0530 Subject: [PATCH 359/431] mm: remove redundant return in set_huge_zero_folio() It is the responsibility of the caller to check pmd_none(); in any case, we are not achieving anything by returning since there is no return value to tell the caller that we succeeded or not. So remove this check. Link: https://lkml.kernel.org/r/20250306144315.21907-1-dev.jain@arm.com Signed-off-by: Dev Jain Reviewed-by: David Hildenbrand Cc: Matthew Wilcow (Oracle) Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7433369d5d1f..80cf15116ce7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1309,8 +1309,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm, struct folio *zero_folio) { pmd_t entry; - if (!pmd_none(*pmd)) - return; entry = mk_pmd(&zero_folio->page, vma->vm_page_prot); entry = pmd_mkhuge(entry); pgtable_trans_huge_deposit(mm, pmd, pgtable); From 9039b9096ea27a20f0349d1537537663c935c8ed Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Thu, 6 Mar 2025 17:44:50 -0500 Subject: [PATCH 360/431] mm: page_ext: add an iteration API for page extensions Patch series "mm: page_ext: Introduce new iteration API", v3. Introduction ============ [ Thanks to David Hildenbrand for identifying the root cause of this issue and proving guidance on how to fix it. The new API idea, bugs and misconceptions are all mine though ] Currently, trying to reserve 1G pages with page_owner=on and sparsemem causes a crash. The reproducer is very simple: 1. Build the kernel with CONFIG_SPARSEMEM=y and the table extensions 2. Pass 'default_hugepagesz=1 page_owner=on' in the kernel command-line 3. Reserve one 1G page at run-time, this should crash (see patch 1 for backtrace) [ A crash with page_table_check is also possible, but harder to trigger ] Apparently, starting with commit cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") we now pass the full allocation order to page extension clients and the page extension implementation assumes that all PFNs of an allocation range will be stored in the same memory section (which is not true for 1G pages). To fix this, this series introduces a new iteration API for page extension objects. The API checks if the next page extension object can be retrieved from the current section or if it needs to look up for it in another section. Please, find all details in patch 1. I tested this series on arm64 and x86 by reserving 1G pages at run-time and doing kernel builds (always with page_owner=on and page_table_check=on). This patch (of 3): The page extension implementation assumes that all page extensions of a given page order are stored in the same memory section. The function page_ext_next() relies on this assumption by adding an offset to the current object to return the next adjacent page extension. This behavior works as expected for flatmem but fails for sparsemem when using 1G pages. The commit cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") exposes this issue, making it possible for a crash when using page_owner or page_table_check page extensions. The problem is that for 1G pages, the page extensions may span memory section boundaries and be stored in different memory sections. This issue was not visible before commit cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") because alloc_contig_pages() never passed more than MAX_PAGE_ORDER to post_alloc_hook(). However, the series introducing mentioned commit changed this behavior allowing the full 1G page order to be passed. Reproducer: 1. Build the kernel with CONFIG_SPARSEMEM=y and table extensions support 2. Pass 'default_hugepagesz=1 page_owner=on' in the kernel command-line 3. Reserve one 1G page at run-time, this should crash (backtrace below) To address this issue, this commit introduces a new API for iterating through page extensions. The main iteration macro is for_each_page_ext() and it must be called with the RCU read lock taken. Here's an usage example: """ struct page_ext_iter iter; struct page_ext *page_ext; ... rcu_read_lock(); for_each_page_ext(page, 1 << order, page_ext, iter) { struct my_page_ext *obj = get_my_page_ext_obj(page_ext); ... } rcu_read_unlock(); """ The loop construct uses page_ext_iter_next() which checks to see if we have crossed sections in the iteration. In this case, page_ext_iter_next() retrieves the next page_ext object from another section. Thanks to David Hildenbrand for helping identify the root cause and providing suggestions on how to fix and optmize the solution (final implementation and bugs are all mine through). Lastly, here's the backtrace, without kasan you can get random crashes: [ 76.052526] BUG: KASAN: slab-out-of-bounds in __update_page_owner_handle+0x238/0x298 [ 76.060283] Write of size 4 at addr ffff07ff96240038 by task tee/3598 [ 76.066714] [ 76.068203] CPU: 88 UID: 0 PID: 3598 Comm: tee Kdump: loaded Not tainted 6.13.0-rep1 #3 [ 76.076202] Hardware name: WIWYNN Mt.Jade Server System B81.030Z1.0007/Mt.Jade Motherboard, BIOS 2.10.20220810 (SCP: 2.10.20220810) 2022/08/10 [ 76.088972] Call trace: [ 76.091411] show_stack+0x20/0x38 (C) [ 76.095073] dump_stack_lvl+0x80/0xf8 [ 76.098733] print_address_description.constprop.0+0x88/0x398 [ 76.104476] print_report+0xa8/0x278 [ 76.108041] kasan_report+0xa8/0xf8 [ 76.111520] __asan_report_store4_noabort+0x20/0x30 [ 76.116391] __update_page_owner_handle+0x238/0x298 [ 76.121259] __set_page_owner+0xdc/0x140 [ 76.125173] post_alloc_hook+0x190/0x1d8 [ 76.129090] alloc_contig_range_noprof+0x54c/0x890 [ 76.133874] alloc_contig_pages_noprof+0x35c/0x4a8 [ 76.138656] alloc_gigantic_folio.isra.0+0x2c0/0x368 [ 76.143616] only_alloc_fresh_hugetlb_folio.isra.0+0x24/0x150 [ 76.149353] alloc_pool_huge_folio+0x11c/0x1f8 [ 76.153787] set_max_huge_pages+0x364/0xca8 [ 76.157961] __nr_hugepages_store_common+0xb0/0x1a0 [ 76.162829] nr_hugepages_store+0x108/0x118 [ 76.167003] kobj_attr_store+0x3c/0x70 [ 76.170745] sysfs_kf_write+0xfc/0x188 [ 76.174492] kernfs_fop_write_iter+0x274/0x3e0 [ 76.178927] vfs_write+0x64c/0x8e0 [ 76.182323] ksys_write+0xf8/0x1f0 [ 76.185716] __arm64_sys_write+0x74/0xb0 [ 76.189630] invoke_syscall.constprop.0+0xd8/0x1e0 [ 76.194412] do_el0_svc+0x164/0x1e0 [ 76.197891] el0_svc+0x40/0xe0 [ 76.200939] el0t_64_sync_handler+0x144/0x168 [ 76.205287] el0t_64_sync+0x1ac/0x1b0 Link: https://lkml.kernel.org/r/cover.1741301089.git.luizcap@redhat.com Link: https://lkml.kernel.org/r/a45893880b7e1601082d39d2c5c8b50bcc096305.1741301089.git.luizcap@redhat.com Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") Signed-off-by: Luiz Capitulino Acked-by: David Hildenbrand Cc: Johannes Weiner Cc: Luiz Capitulino Cc: Muchun Song Cc: Pasha Tatashin Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/page_ext.h | 93 ++++++++++++++++++++++++++++++++++++++++ mm/page_ext.c | 13 ++++++ 2 files changed, 106 insertions(+) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e4b48a0dda24..76c817162d2f 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #define __LINUX_PAGE_EXT_H #include +#include #include struct pglist_data; @@ -69,16 +70,31 @@ extern void page_ext_init(void); static inline void page_ext_init_flatmem_late(void) { } + +static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) +{ + /* + * page_ext is allocated per memory section. Once we cross a + * memory section, we have to fetch the new pointer. + */ + return next_pfn % PAGES_PER_SECTION; +} #else extern void page_ext_init_flatmem(void); extern void page_ext_init_flatmem_late(void); static inline void page_ext_init(void) { } + +static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) +{ + return true; +} #endif extern struct page_ext *page_ext_get(const struct page *page); extern void page_ext_put(struct page_ext *page_ext); +extern struct page_ext *page_ext_lookup(unsigned long pfn); static inline void *page_ext_data(struct page_ext *page_ext, struct page_ext_operations *ops) @@ -93,6 +109,83 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr) return next; } +struct page_ext_iter { + unsigned long index; + unsigned long start_pfn; + struct page_ext *page_ext; +}; + +/** + * page_ext_iter_begin() - Prepare for iterating through page extensions. + * @iter: page extension iterator. + * @pfn: PFN of the page we're interested in. + * + * Must be called with RCU read lock taken. + * + * Return: NULL if no page_ext exists for this page. + */ +static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter, + unsigned long pfn) +{ + iter->index = 0; + iter->start_pfn = pfn; + iter->page_ext = page_ext_lookup(pfn); + + return iter->page_ext; +} + +/** + * page_ext_iter_next() - Get next page extension + * @iter: page extension iterator. + * + * Must be called with RCU read lock taken. + * + * Return: NULL if no next page_ext exists. + */ +static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter) +{ + unsigned long pfn; + + if (WARN_ON_ONCE(!iter->page_ext)) + return NULL; + + iter->index++; + pfn = iter->start_pfn + iter->index; + + if (page_ext_iter_next_fast_possible(pfn)) + iter->page_ext = page_ext_next(iter->page_ext); + else + iter->page_ext = page_ext_lookup(pfn); + + return iter->page_ext; +} + +/** + * page_ext_iter_get() - Get current page extension + * @iter: page extension iterator. + * + * Return: NULL if no page_ext exists for this iterator. + */ +static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter) +{ + return iter->page_ext; +} + +/** + * for_each_page_ext(): iterate through page_ext objects. + * @__page: the page we're interested in + * @__pgcount: how many pages to iterate through + * @__page_ext: struct page_ext pointer where the current page_ext + * object is returned + * @__iter: struct page_ext_iter object (defined in the stack) + * + * IMPORTANT: must be called with RCU read lock taken. + */ +#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \ + for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\ + __page_ext && __iter.index < __pgcount; \ + __page_ext = page_ext_iter_next(&__iter)) + #else /* !CONFIG_PAGE_EXTENSION */ struct page_ext; diff --git a/mm/page_ext.c b/mm/page_ext.c index 641d93f6af4c..c351fdfe9e9a 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -507,6 +507,19 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) #endif +/** + * page_ext_lookup() - Lookup a page extension for a PFN. + * @pfn: PFN of the page we're interested in. + * + * Must be called with RCU read lock taken and @pfn must be valid. + * + * Return: NULL if no page_ext exists for this page. + */ +struct page_ext *page_ext_lookup(unsigned long pfn) +{ + return lookup_page_ext(pfn_to_page(pfn)); +} + /** * page_ext_get() - Get the extended information for a page. * @page: The page we're interested in. From 4e30b94cdad659d8ec5d32b61159138ce8d4ad1b Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Thu, 6 Mar 2025 17:44:51 -0500 Subject: [PATCH 361/431] mm: page_table_check: use new iteration API The page_ext_next() function assumes that page extension objects for a page order allocation always reside in the same memory section, which may not be true and could lead to crashes. Use the new page_ext iteration API instead. Link: https://lkml.kernel.org/r/ca2d53a020fe1cd65c442627ff6c0c40d591cbd8.1741301089.git.luizcap@redhat.com Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") Signed-off-by: Luiz Capitulino Acked-by: David Hildenbrand Cc: Johannes Weiner Cc: Muchun Song Cc: Pasha Tatashin Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/page_table_check.c | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/mm/page_table_check.c b/mm/page_table_check.c index c2b3600429a0..68109ee93841 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext) */ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) { + struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; - unsigned long i; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - BUG_ON(PageSlab(page)); anon = PageAnon(page); - for (i = 0; i < pgcnt; i++) { + rcu_read_lock(); + for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { @@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0); } - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } /* @@ -102,24 +97,20 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, bool rw) { + struct page_ext_iter iter; struct page_ext *page_ext; struct page *page; - unsigned long i; bool anon; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - BUG_ON(PageSlab(page)); anon = PageAnon(page); - for (i = 0; i < pgcnt; i++) { + rcu_read_lock(); + for_each_page_ext(page, pgcnt, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); if (anon) { @@ -129,9 +120,8 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0); } - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } /* @@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, */ void __page_table_check_zero(struct page *page, unsigned int order) { + struct page_ext_iter iter; struct page_ext *page_ext; - unsigned long i; BUG_ON(PageSlab(page)); - page_ext = page_ext_get(page); - - if (!page_ext) - return; - - for (i = 0; i < (1ul << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { struct page_table_check *ptc = get_page_table_check(page_ext); BUG_ON(atomic_read(&ptc->anon_map_count)); BUG_ON(atomic_read(&ptc->file_map_count)); - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) From 3a812bed3d32ae8c7443d1dd82f20b9a7e503ed2 Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Thu, 6 Mar 2025 17:44:52 -0500 Subject: [PATCH 362/431] mm: page_owner: use new iteration API The page_ext_next() function assumes that page extension objects for a page order allocation always reside in the same memory section, which may not be true and could lead to crashes. Use the new page_ext iteration API instead. Link: https://lkml.kernel.org/r/93c80b040960fa2ebab4a9729073f77a30649862.1741301089.git.luizcap@redhat.com Fixes: cf54f310d0d3 ("mm/hugetlb: use __GFP_COMP for gigantic folios") Signed-off-by: Luiz Capitulino Acked-by: David Hildenbrand Cc: Johannes Weiner Cc: Muchun Song Cc: Pasha Tatashin Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/page_owner.c | 86 +++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 46 deletions(-) diff --git a/mm/page_owner.c b/mm/page_owner.c index a409e2561a8f..849d4a471b6c 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -229,17 +229,19 @@ static void dec_stack_record_count(depot_stack_handle_t handle, handle); } -static inline void __update_page_owner_handle(struct page_ext *page_ext, +static inline void __update_page_owner_handle(struct page *page, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) { - int i; + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - for (i = 0; i < (1 << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { page_owner = get_page_owner(page_ext); page_owner->handle = handle; page_owner->order = order; @@ -252,20 +254,22 @@ static inline void __update_page_owner_handle(struct page_ext *page_ext, sizeof(page_owner->comm)); __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); - page_ext = page_ext_next(page_ext); } + rcu_read_unlock(); } -static inline void __update_page_owner_free_handle(struct page_ext *page_ext, +static inline void __update_page_owner_free_handle(struct page *page, depot_stack_handle_t handle, unsigned short order, pid_t pid, pid_t tgid, u64 free_ts_nsec) { - int i; + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - for (i = 0; i < (1 << order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << order, page_ext, iter) { page_owner = get_page_owner(page_ext); /* Only __reset_page_owner() wants to clear the bit */ if (handle) { @@ -275,8 +279,8 @@ static inline void __update_page_owner_free_handle(struct page_ext *page_ext, page_owner->free_ts_nsec = free_ts_nsec; page_owner->free_pid = current->pid; page_owner->free_tgid = current->tgid; - page_ext = page_ext_next(page_ext); } + rcu_read_unlock(); } void __reset_page_owner(struct page *page, unsigned short order) @@ -293,11 +297,11 @@ void __reset_page_owner(struct page *page, unsigned short order) page_owner = get_page_owner(page_ext); alloc_handle = page_owner->handle; + page_ext_put(page_ext); handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); - __update_page_owner_free_handle(page_ext, handle, order, current->pid, + __update_page_owner_free_handle(page, handle, order, current->pid, current->tgid, free_ts_nsec); - page_ext_put(page_ext); if (alloc_handle != early_handle) /* @@ -313,19 +317,13 @@ void __reset_page_owner(struct page *page, unsigned short order) noinline void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) { - struct page_ext *page_ext; u64 ts_nsec = local_clock(); depot_stack_handle_t handle; handle = save_stack(gfp_mask); - - page_ext = page_ext_get(page); - if (unlikely(!page_ext)) - return; - __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1, + __update_page_owner_handle(page, handle, order, gfp_mask, -1, ts_nsec, current->pid, current->tgid, current->comm); - page_ext_put(page_ext); inc_stack_record_count(handle, gfp_mask, 1 << order); } @@ -344,44 +342,42 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) void __split_page_owner(struct page *page, int old_order, int new_order) { - int i; - struct page_ext *page_ext = page_ext_get(page); + struct page_ext_iter iter; + struct page_ext *page_ext; struct page_owner *page_owner; - if (unlikely(!page_ext)) - return; - - for (i = 0; i < (1 << old_order); i++) { + rcu_read_lock(); + for_each_page_ext(page, 1 << old_order, page_ext, iter) { page_owner = get_page_owner(page_ext); page_owner->order = new_order; - page_ext = page_ext_next(page_ext); } - page_ext_put(page_ext); + rcu_read_unlock(); } void __folio_copy_owner(struct folio *newfolio, struct folio *old) { - int i; - struct page_ext *old_ext; - struct page_ext *new_ext; + struct page_ext *page_ext; + struct page_ext_iter iter; struct page_owner *old_page_owner; struct page_owner *new_page_owner; depot_stack_handle_t migrate_handle; - old_ext = page_ext_get(&old->page); - if (unlikely(!old_ext)) + page_ext = page_ext_get(&old->page); + if (unlikely(!page_ext)) return; - new_ext = page_ext_get(&newfolio->page); - if (unlikely(!new_ext)) { - page_ext_put(old_ext); - return; - } + old_page_owner = get_page_owner(page_ext); + page_ext_put(page_ext); + + page_ext = page_ext_get(&newfolio->page); + if (unlikely(!page_ext)) + return; + + new_page_owner = get_page_owner(page_ext); + page_ext_put(page_ext); - old_page_owner = get_page_owner(old_ext); - new_page_owner = get_page_owner(new_ext); migrate_handle = new_page_owner->handle; - __update_page_owner_handle(new_ext, old_page_owner->handle, + __update_page_owner_handle(&newfolio->page, old_page_owner->handle, old_page_owner->order, old_page_owner->gfp_mask, old_page_owner->last_migrate_reason, old_page_owner->ts_nsec, old_page_owner->pid, @@ -391,7 +387,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old) * will be freed after migration. Keep them until then as they may be * useful. */ - __update_page_owner_free_handle(new_ext, 0, old_page_owner->order, + __update_page_owner_free_handle(&newfolio->page, 0, old_page_owner->order, old_page_owner->free_pid, old_page_owner->free_tgid, old_page_owner->free_ts_nsec); @@ -400,14 +396,12 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old) * for the new one and the old folio otherwise there will be an imbalance * when subtracting those pages from the stack. */ - for (i = 0; i < (1 << new_page_owner->order); i++) { + rcu_read_lock(); + for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) { + old_page_owner = get_page_owner(page_ext); old_page_owner->handle = migrate_handle; - old_ext = page_ext_next(old_ext); - old_page_owner = get_page_owner(old_ext); } - - page_ext_put(new_ext); - page_ext_put(old_ext); + rcu_read_unlock(); } void pagetypeinfo_showmixedcount_print(struct seq_file *m, @@ -813,7 +807,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) goto ext_put_continue; /* Found early allocated page */ - __update_page_owner_handle(page_ext, early_handle, 0, 0, + __update_page_owner_handle(page, early_handle, 0, 0, -1, local_clock(), current->pid, current->tgid, current->comm); count++; From f0e11a997ab438ce91a7dc9a6dd64c0c4a6af112 Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Thu, 6 Mar 2025 15:21:31 +0800 Subject: [PATCH 363/431] mm/vmalloc: refactor __vmalloc_node_range_noprof() According to the code logic, the first parameter of the sub-function __get_vm_area_node() should be size instead of real_size. Then in __get_vm_area_node(), the size will be aligned, so the redundant alignment operation is deleted. The use of the real_size variable causes code redundancy, so it is removed to simplify the code. The real prefix is generally used to indicate the adjusted value of a parameter, but according to the code logic, it should indicate the original value, so it is recommended to rename it to original_align. Link: https://lkml.kernel.org/r/20250306072131.800499-1-liuye@kylinos.cn Signed-off-by: Liu Ye Reviewed-by: "Uladzislau Rezki (Sony)" Cc: Christop Hellwig Signed-off-by: Andrew Morton --- mm/vmalloc.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 61981ee1c9d2..3ed720a787ec 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3771,8 +3771,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, struct vm_struct *area; void *ret; kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; - unsigned long real_size = size; - unsigned long real_align = align; + unsigned long original_align = align; unsigned int shift = PAGE_SHIFT; if (WARN_ON_ONCE(!size)) @@ -3781,7 +3780,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, exceeds total pages", - real_size); + size); return NULL; } @@ -3798,19 +3797,18 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, else shift = arch_vmap_pte_supported_shift(size); - align = max(real_align, 1UL << shift); - size = ALIGN(real_size, 1UL << shift); + align = max(original_align, 1UL << shift); } again: - area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | + area = __get_vm_area_node(size, align, shift, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) { bool nofail = gfp_mask & __GFP_NOFAIL; warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, vm_struct allocation failed%s", - real_size, (nofail) ? ". Retrying." : ""); + size, (nofail) ? ". Retrying." : ""); if (nofail) { schedule_timeout_uninterruptible(1); goto again; @@ -3860,7 +3858,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, (gfp_mask & __GFP_SKIP_ZERO)) kasan_flags |= KASAN_VMALLOC_INIT; /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ - area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); + area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -3869,17 +3867,15 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, */ clear_vm_uninitialized_flag(area); - size = PAGE_ALIGN(size); if (!(vm_flags & VM_DEFER_KMEMLEAK)) - kmemleak_vmalloc(area, size, gfp_mask); + kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); return area->addr; fail: if (shift > PAGE_SHIFT) { shift = PAGE_SHIFT; - align = real_align; - size = real_size; + align = original_align; goto again; } From d9a04a2615c0b8767a42dc26a8c26383e8513cdc Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 6 Mar 2025 09:31:42 -0500 Subject: [PATCH 364/431] mm: swap_cgroup: remove double initialization of locals Fixes: 6769183166b3 ("mm/swap_cgroup: decouple swap cgroup recording and clearing") Signed-off-by: Johannes Weiner Reviewed-by: Muchun Song Cc: Chris Li Cc: Kairui Song Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/swap_cgroup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 1007c30f12e2..de779fed8c21 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -92,8 +92,7 @@ void swap_cgroup_record(struct folio *folio, unsigned short id, */ unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { - pgoff_t offset = swp_offset(ent); - pgoff_t end = offset + nr_ents; + pgoff_t offset, end; struct swap_cgroup *map; unsigned short old, iter = 0; From fa17ad58f8328e5c089377fed55ca8ed62f7cd1d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 26 Feb 2025 16:31:29 +0000 Subject: [PATCH 365/431] hugetlb: convert hugetlb_vma_maps_page() to hugetlb_vma_maps_pfn() pte_page() is more expensive than pte_pfn() (often it's defined as pfn_to_page(pte_pfn())), so it makes sense to do the conversion to pfn once (by calling folio_pfn()) rather than convert the pfn to a page each time. While this is a very small advantage, the main motivation is removing a reference to folio->page. Link: https://lkml.kernel.org/r/20250226163131.3795869-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0fc179a59830..a427d41fbca0 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -338,8 +338,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio) * mutex for the page in the mapping. So, we can not race with page being * faulted into the vma. */ -static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, - unsigned long addr, struct page *page) +static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) { pte_t *ptep, pte; @@ -351,7 +351,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, if (huge_pte_none(pte) || !pte_present(pte)) return false; - if (pte_page(pte) == page) + if (pte_pfn(pte) == pfn) return true; return false; @@ -396,7 +396,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h, { struct rb_root_cached *root = &mapping->i_mmap; struct hugetlb_vma_lock *vma_lock; - struct page *page = &folio->page; + unsigned long pfn = folio_pfn(folio); struct vm_area_struct *vma; unsigned long v_start; unsigned long v_end; @@ -412,7 +412,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h, v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (!hugetlb_vma_maps_page(vma, v_start, page)) + if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; if (!hugetlb_vma_trylock_write(vma)) { @@ -462,7 +462,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h, */ v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); - if (hugetlb_vma_maps_page(vma, v_start, page)) + if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); From fcc09f5b56601e618c3dafc9fdd74882924d9143 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 26 Feb 2025 16:31:30 +0000 Subject: [PATCH 366/431] hugetlb: convert adjust_range_hwpoison() to take a folio Remove a use of folio->page by passing the folio into adjust_range_hwpoison(). We need to convert to a page eventually, but that can happen inside adjust_range_hwpoison(). Link: https://lkml.kernel.org/r/20250226163131.3795869-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a427d41fbca0..a6fcaf8317a0 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -193,19 +193,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } /* - * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset. + * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. * Returns the maximum number of bytes one can read without touching the 1st raw - * HWPOISON subpage. + * HWPOISON page. * * The implementation borrows the iteration logic from copy_page_to_iter*. */ -static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) +static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, + size_t bytes) { + struct page *page; size_t n = 0; size_t res = 0; - /* First subpage to start the loop. */ - page = nth_page(page, offset / PAGE_SIZE); + /* First page to start the loop. */ + page = folio_page(folio, offset / PAGE_SIZE); offset %= PAGE_SIZE; while (1) { if (is_raw_hwpoison_page_in_hugepage(page)) @@ -278,10 +280,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) else { /* * Adjust how many bytes safe to read without - * touching the 1st raw HWPOISON subpage after + * touching the 1st raw HWPOISON page after * offset. */ - want = adjust_range_hwpoison(&folio->page, offset, nr); + want = adjust_range_hwpoison(folio, offset, nr); if (want == 0) { folio_put(folio); retval = -EIO; From 3fec86f8aa8c7ae84567a0d1396e84ada96141d8 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:54 -0500 Subject: [PATCH 367/431] xarray: add xas_try_split() to split a multi-index entry Patch series "Buddy allocator like (or non-uniform) folio split", v10. This patchset adds a new buddy allocator like (or non-uniform) large folio split from a order-n folio to order-m with m < n. It reduces 1. the total number of after-split folios from 2^(n-m) to n-m+1; 2. the amount of memory needed for multi-index xarray split from 2^(n/6-m/6) to n/6-m/6, assuming XA_CHUNK_SHIFT=6; 3. keep more large folios after a split from all order-m folios to order-(n-1) to order-m folios. For example, to split an order-9 to order-0, folio split generates 10 (or 11 for anonymous memory) folios instead of 512, allocates 1 xa_node instead of 8, and leaves 1 order-8, 1 order-7, ..., 1 order-1 and 2 order-0 folios (or 4 order-0 for anonymous memory) instead of 512 order-0 folios. Instead of duplicating existing split_huge_page*() code, __folio_split() is introduced as the shared backend code for both split_huge_page_to_list_to_order() and folio_split(). __folio_split() can support both uniform split and buddy allocator like (or non-uniform) split. All existing split_huge_page*() users can be gradually converted to use folio_split() if possible. In this patchset, I converted truncate_inode_partial_folio() to use folio_split(). xfstests quick group passed for both tmpfs and xfs. I also semi-replicated Hugh's test[12] and ran it without any issue for almost 24 hours. This patch (of 8): A preparation patch for non-uniform folio split, which always split a folio into half iteratively, and minimal xarray entry split. Currently, xas_split_alloc() and xas_split() always split all slots from a multi-index entry. They cost the same number of xa_node as the to-be-split slots. For example, to split an order-9 entry, which takes 2^(9-6)=8 slots, assuming XA_CHUNK_SHIFT is 6 (!CONFIG_BASE_SMALL), 8 xa_node are needed. Instead xas_try_split() is intended to be used iteratively to split the order-9 entry into 2 order-8 entries, then split one order-8 entry, based on the given index, to 2 order-7 entries, ..., and split one order-1 entry to 2 order-0 entries. When splitting the order-6 entry and a new xa_node is needed, xas_try_split() will try to allocate one if possible. As a result, xas_try_split() would only need 1 xa_node instead of 8. When a new xa_node is needed during the split, xas_try_split() can try to allocate one but no more. -ENOMEM will be return if a node cannot be allocated. -EINVAL will be return if a sibling node is split or cascade split happens, where two or more new nodes are needed, and these are not supported by xas_try_split(). xas_split_alloc() and xas_split() split an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | | | ------- --- --- ------- | | ... | | V V V V ----------- ----------- ----------- ----------- | xa_node | | xa_node | ... | xa_node | | xa_node | ----------- ----------- ----------- ----------- xas_try_split() splits an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | V ----------- | xa_node | ----------- Link: https://lkml.kernel.org/r/20250307174001.242794-1-ziy@nvidia.com Link: https://lkml.kernel.org/r/20250307174001.242794-2-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Miaohe Lin Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Zi Yan Cc: Kairui Song Signed-off-by: Andrew Morton --- Documentation/core-api/xarray.rst | 14 +++- include/linux/xarray.h | 6 ++ lib/test_xarray.c | 52 ++++++++++++ lib/xarray.c | 132 +++++++++++++++++++++++++++--- tools/testing/radix-tree/Makefile | 1 + 5 files changed, 192 insertions(+), 13 deletions(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index f6a3eef4fe7f..c6c91cbd0c3c 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -489,7 +489,19 @@ Storing ``NULL`` into any index of a multi-index entry will set the entry at every index to ``NULL`` and dissolve the tie. A multi-index entry can be split into entries occupying smaller ranges by calling xas_split_alloc() without the xa_lock held, followed by taking the lock -and calling xas_split(). +and calling xas_split() or calling xas_try_split() with xa_lock. The +difference between xas_split_alloc()+xas_split() and xas_try_alloc() is +that xas_split_alloc() + xas_split() split the entry from the original +order to the new order in one shot uniformly, whereas xas_try_split() +iteratively splits the entry containing the index non-uniformly. +For example, to split an order-9 entry, which takes 2^(9-6)=8 slots, +assuming ``XA_CHUNK_SHIFT`` is 6, xas_split_alloc() + xas_split() need +8 xa_node. xas_try_split() splits the order-9 entry into +2 order-8 entries, then split one order-8 entry, based on the given index, +to 2 order-7 entries, ..., and split one order-1 entry to 2 order-0 entries. +When splitting the order-6 entry and a new xa_node is needed, xas_try_split() +will try to allocate one if possible. As a result, xas_try_split() would only +need 1 xa_node instead of 8. Functions and structures ======================== diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 0b618ec04115..4010195201c9 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1555,6 +1555,7 @@ int xa_get_order(struct xarray *, unsigned long index); int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order); #else static inline int xa_get_order(struct xarray *xa, unsigned long index) { @@ -1576,6 +1577,11 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { } + +static inline void xas_try_split(struct xa_state *xas, void *entry, + unsigned int order) +{ +} #endif /** diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0e865bab4a10..080a39d22e73 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1858,6 +1858,54 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } +static void check_split_2(struct xarray *xa, unsigned long index, + unsigned int order, unsigned int new_order) +{ + XA_STATE_ORDER(xas, xa, index, new_order); + unsigned int i, found; + void *entry; + + xa_store_order(xa, index, order, xa, GFP_KERNEL); + xa_set_mark(xa, index, XA_MARK_1); + + /* allocate a node for xas_try_split() */ + xas_set_err(&xas, -ENOMEM); + XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL)); + + xas_lock(&xas); + xas_try_split(&xas, xa, order); + if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) && + new_order < order - 1) { + XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL); + xas_unlock(&xas); + goto out; + } + for (i = 0; i < (1 << order); i += (1 << new_order)) + __xa_store(xa, index + i, xa_mk_index(index + i), 0); + xas_unlock(&xas); + + for (i = 0; i < (1 << order); i++) { + unsigned int val = index + (i & ~((1 << new_order) - 1)); + XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); + } + + xa_set_mark(xa, index, XA_MARK_0); + XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); + + xas_set_order(&xas, index, 0); + found = 0; + rcu_read_lock(); + xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) { + found++; + XA_BUG_ON(xa, xa_is_internal(entry)); + } + rcu_read_unlock(); + XA_BUG_ON(xa, found != 1 << (order - new_order)); +out: + xas_destroy(&xas); + xa_destroy(xa); +} + static noinline void check_split(struct xarray *xa) { unsigned int order, new_order; @@ -1869,6 +1917,10 @@ static noinline void check_split(struct xarray *xa) check_split_1(xa, 0, order, new_order); check_split_1(xa, 1UL << order, order, new_order); check_split_1(xa, 3UL << order, order, new_order); + + check_split_2(xa, 0, order, new_order); + check_split_2(xa, 1UL << order, order, new_order); + check_split_2(xa, 3UL << order, order, new_order); } } } diff --git a/lib/xarray.c b/lib/xarray.c index 116e9286c64e..3bae48558e21 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas) xas->xa_alloc = node = next; } } +EXPORT_SYMBOL_GPL(xas_destroy); /** * xas_nomem() - Allocate memory if needed. @@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset, } } +static void __xas_init_node_for_split(struct xa_state *xas, + struct xa_node *node, void *entry) +{ + unsigned int i; + void *sibling = NULL; + unsigned int mask = xas->xa_sibs; + + if (!node) + return; + node->array = xas->xa; + for (i = 0; i < XA_CHUNK_SIZE; i++) { + if ((i & mask) == 0) { + RCU_INIT_POINTER(node->slots[i], entry); + sibling = xa_mk_sibling(i); + } else { + RCU_INIT_POINTER(node->slots[i], sibling); + } + } +} + /** * xas_split_alloc() - Allocate memory for splitting an entry. * @xas: XArray operation state. @@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; - unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) @@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, return; do { - unsigned int i; - void *sibling = NULL; struct xa_node *node; node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; - node->array = xas->xa; - for (i = 0; i < XA_CHUNK_SIZE; i++) { - if ((i & mask) == 0) { - RCU_INIT_POINTER(node->slots[i], entry); - sibling = xa_mk_sibling(i); - } else { - RCU_INIT_POINTER(node->slots[i], sibling); - } - } + + __xas_init_node_for_split(xas, node, entry); RCU_INIT_POINTER(node->parent, xas->xa_alloc); xas->xa_alloc = node; } while (sibs-- > 0); @@ -1122,6 +1133,103 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); + +/** + * xas_try_split() - Try to split a multi-index entry. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: Current entry order. + * + * The size of the new entries is set in @xas. The value in @entry is + * copied to all the replacement entries. If and only if one new xa_node is + * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is + * NULL. If more new xa_node are needed, the function gives EINVAL error. + * + * Context: Any context. The caller should hold the xa_lock. + */ +void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int offset, marks; + struct xa_node *node; + void *curr = xas_load(xas); + int values = 0; + gfp_t gfp = GFP_NOWAIT; + + node = xas->xa_node; + if (xas_top(node)) + return; + + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + + marks = node_get_marks(node, xas->xa_offset); + + offset = xas->xa_offset + sibs; + + if (xas->xa_shift < node->shift) { + struct xa_node *child = xas->xa_alloc; + unsigned int expected_sibs = + (1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1; + + /* + * No support for splitting sibling entries + * (horizontally) or cascade split (vertically), which + * requires two or more new xa_nodes. + * Since if one xa_node allocation fails, + * it is hard to free the prior allocations. + */ + if (sibs || xas->xa_sibs != expected_sibs) { + xas_destroy(xas); + xas_set_err(xas, -EINVAL); + return; + } + + if (!child) { + child = kmem_cache_alloc_lru(radix_tree_node_cachep, + xas->xa_lru, gfp); + if (!child) { + xas_destroy(xas); + xas_set_err(xas, -ENOMEM); + return; + } + RCU_INIT_POINTER(child->parent, xas->xa_alloc); + } + __xas_init_node_for_split(xas, child, entry); + + xas->xa_alloc = rcu_dereference_raw(child->parent); + child->shift = node->shift - XA_CHUNK_SHIFT; + child->offset = offset; + child->count = XA_CHUNK_SIZE; + child->nr_values = xa_is_value(entry) ? + XA_CHUNK_SIZE : 0; + RCU_INIT_POINTER(child->parent, node); + node_set_marks(node, offset, child, xas->xa_sibs, + marks); + rcu_assign_pointer(node->slots[offset], + xa_mk_node(child)); + if (xa_is_value(curr)) + values--; + xas_update(xas, child); + + } else { + do { + unsigned int canon = offset - xas->xa_sibs; + + node_set_marks(node, canon, NULL, 0, marks); + rcu_assign_pointer(node->slots[canon], entry); + while (offset > canon) + rcu_assign_pointer(node->slots[offset--], + xa_mk_sibling(canon)); + values += (xa_is_value(entry) - xa_is_value(curr)) * + (xas->xa_sibs + 1); + } while (offset-- > xas->xa_offset); + } + + node->nr_values += values; + xas_update(xas, node); +} +EXPORT_SYMBOL_GPL(xas_try_split); #endif /** diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 8b3591a51e1f..b2a6660bbd92 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -14,6 +14,7 @@ include ../shared/shared.mk main: $(OFILES) +xarray.o: ../../../lib/test_xarray.c idr-test.o: ../../../lib/test_ida.c idr-test: idr-test.o $(CORE_OFILES) From 00527733d0dc806a72bb9a56cfbd6c44d5f74872 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:55 -0500 Subject: [PATCH 368/431] mm/huge_memory: add two new (not yet used) functions for folio_split() This is a preparation patch, both added functions are not used yet. The added __split_unmapped_folio() is able to split a folio with its mapping removed in two manners: 1) uniform split (the existing way), and 2) buddy allocator like (or non-uniform) split. The added __split_folio_to_order() can split a folio into any lower order. For uniform split, __split_unmapped_folio() calls it once to split the given folio to the new order. For buddy allocator like (non-uniform) split, __split_unmapped_folio() calls it (folio_order - new_order) times and each time splits the folio containing the given page to one lower order. [ziy@nvidia.com: unfreeze head folio after page cache entries are updated] Link: https://lkml.kernel.org/r/0F15DA7F-1977-412F-9A3E-F06B515D4BD2@nvidia.com [ziy@nvidia.com: use NULL instead of 0 for folio->private assignment] Link: https://lkml.kernel.org/r/1E11B9DD-3A87-4C9C-8FB4-E1324FB6A21A@nvidia.com Link: https://lkml.kernel.org/r/20250307174001.242794-3-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/huge_memory.c | 354 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 353 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 80cf15116ce7..c5661104ecca 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3265,7 +3265,6 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) static void lru_add_page_tail(struct folio *folio, struct page *tail, struct lruvec *lruvec, struct list_head *list) { - VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); VM_BUG_ON_FOLIO(PageLRU(tail), folio); lockdep_assert_held(&lruvec->lru_lock); @@ -3517,6 +3516,359 @@ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) caller_pins; } +/* + * It splits @folio into @new_order folios and copies the @folio metadata to + * all the resulting folios. + */ +static void __split_folio_to_order(struct folio *folio, int old_order, + int new_order) +{ + long new_nr_pages = 1 << new_order; + long nr_pages = 1 << old_order; + long i; + + /* + * Skip the first new_nr_pages, since the new folio from them have all + * the flags from the original folio. + */ + for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) { + struct page *new_head = &folio->page + i; + + /* + * Careful: new_folio is not a "real" folio before we cleared PageTail. + * Don't pass it around before clear_compound_head(). + */ + struct folio *new_folio = (struct folio *)new_head; + + VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head); + + /* + * Clone page flags before unfreezing refcount. + * + * After successful get_page_unless_zero() might follow flags change, + * for example lock_page() which set PG_waiters. + * + * Note that for mapped sub-pages of an anonymous THP, + * PG_anon_exclusive has been cleared in unmap_folio() and is stored in + * the migration entry instead from where remap_page() will restore it. + * We can still have PG_anon_exclusive set on effectively unmapped and + * unreferenced sub-pages of an anonymous THP: we can simply drop + * PG_anon_exclusive (-> PG_mappedtodisk) for these here. + */ + new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + new_folio->flags |= (folio->flags & + ((1L << PG_referenced) | + (1L << PG_swapbacked) | + (1L << PG_swapcache) | + (1L << PG_mlocked) | + (1L << PG_uptodate) | + (1L << PG_active) | + (1L << PG_workingset) | + (1L << PG_locked) | + (1L << PG_unevictable) | +#ifdef CONFIG_ARCH_USES_PG_ARCH_2 + (1L << PG_arch_2) | +#endif +#ifdef CONFIG_ARCH_USES_PG_ARCH_3 + (1L << PG_arch_3) | +#endif + (1L << PG_dirty) | + LRU_GEN_MASK | LRU_REFS_MASK)); + + new_folio->mapping = folio->mapping; + new_folio->index = folio->index + i; + + /* + * page->private should not be set in tail pages. Fix up and warn once + * if private is unexpectedly set. + */ + if (unlikely(new_folio->private)) { + VM_WARN_ON_ONCE_PAGE(true, new_head); + new_folio->private = NULL; + } + + if (folio_test_swapcache(folio)) + new_folio->swap.val = folio->swap.val + i; + + /* Page flags must be visible before we make the page non-compound. */ + smp_wmb(); + + /* + * Clear PageTail before unfreezing page refcount. + * + * After successful get_page_unless_zero() might follow put_page() + * which needs correct compound_head(). + */ + clear_compound_head(new_head); + if (new_order) { + prep_compound_page(new_head, new_order); + folio_set_large_rmappable(new_folio); + } + + if (folio_test_young(folio)) + folio_set_young(new_folio); + if (folio_test_idle(folio)) + folio_set_idle(new_folio); + + folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); + } + + if (new_order) + folio_set_order(folio, new_order); + else + ClearPageCompound(&folio->page); +} + +/* + * It splits an unmapped @folio to lower order smaller folios in two ways. + * @folio: the to-be-split folio + * @new_order: the smallest order of the after split folios (since buddy + * allocator like split generates folios with orders from @folio's + * order - 1 to new_order). + * @split_at: in buddy allocator like split, the folio containing @split_at + * will be split until its order becomes @new_order. + * @lock_at: the folio containing @lock_at is left locked for caller. + * @list: the after split folios will be added to @list if it is not NULL, + * otherwise to LRU lists. + * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory. + * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller + * @mapping: @folio->mapping + * @uniform_split: if the split is uniform or not (buddy allocator like split) + * + * + * 1. uniform split: the given @folio into multiple @new_order small folios, + * where all small folios have the same order. This is done when + * uniform_split is true. + * 2. buddy allocator like (non-uniform) split: the given @folio is split into + * half and one of the half (containing the given page) is split into half + * until the given @page's order becomes @new_order. This is done when + * uniform_split is false. + * + * The high level flow for these two methods are: + * 1. uniform split: a single __split_folio_to_order() is called to split the + * @folio into @new_order, then we traverse all the resulting folios one by + * one in PFN ascending order and perform stats, unfreeze, adding to list, + * and file mapping index operations. + * 2. non-uniform split: in general, folio_order - @new_order calls to + * __split_folio_to_order() are made in a for loop to split the @folio + * to one lower order at a time. The resulting small folios are processed + * like what is done during the traversal in 1, except the one containing + * @page, which is split in next for loop. + * + * After splitting, the caller's folio reference will be transferred to the + * folio containing @page. The other folios may be freed if they are not mapped. + * + * In terms of locking, after splitting, + * 1. uniform split leaves @page (or the folio contains it) locked; + * 2. buddy allocator like (non-uniform) split leaves @folio locked. + * + * + * For !uniform_split, when -ENOMEM is returned, the original folio might be + * split. The caller needs to check the input folio. + */ +static int __split_unmapped_folio(struct folio *folio, int new_order, + struct page *split_at, struct page *lock_at, + struct list_head *list, pgoff_t end, + struct xa_state *xas, struct address_space *mapping, + bool uniform_split) +{ + struct lruvec *lruvec; + struct address_space *swap_cache = NULL; + struct folio *origin_folio = folio; + struct folio *next_folio = folio_next(folio); + struct folio *new_folio; + struct folio *next; + int order = folio_order(folio); + int split_order; + int start_order = uniform_split ? new_order : order - 1; + int nr_dropped = 0; + int ret = 0; + bool stop_split = false; + + if (folio_test_swapcache(folio)) { + VM_BUG_ON(mapping); + + /* a swapcache folio can only be uniformly split to order-0 */ + if (!uniform_split || new_order != 0) + return -EINVAL; + + swap_cache = swap_address_space(folio->swap); + xa_lock(&swap_cache->i_pages); + } + + if (folio_test_anon(folio)) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ + lruvec = folio_lruvec_lock(folio); + + folio_clear_has_hwpoisoned(folio); + + /* + * split to new_order one order at a time. For uniform split, + * folio is split to new_order directly. + */ + for (split_order = start_order; + split_order >= new_order && !stop_split; + split_order--) { + int old_order = folio_order(folio); + struct folio *release; + struct folio *end_folio = folio_next(folio); + + /* order-1 anonymous folio is not supported */ + if (folio_test_anon(folio) && split_order == 1) + continue; + if (uniform_split && split_order != new_order) + continue; + + if (mapping) { + /* + * uniform split has xas_split_alloc() called before + * irq is disabled to allocate enough memory, whereas + * non-uniform split can handle ENOMEM. + */ + if (uniform_split) + xas_split(xas, folio, old_order); + else { + xas_set_order(xas, folio->index, split_order); + xas_try_split(xas, folio, old_order); + if (xas_error(xas)) { + ret = xas_error(xas); + stop_split = true; + goto after_split; + } + } + } + + /* + * Reset any memcg data overlay in the tail pages. + * folio_nr_pages() is unreliable until prep_compound_page() + * was called again. + */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + + + /* complete memcg works before add pages to LRU */ + split_page_memcg(&folio->page, old_order, split_order); + split_page_owner(&folio->page, old_order, split_order); + pgalloc_tag_split(folio, old_order, split_order); + + __split_folio_to_order(folio, old_order, split_order); + +after_split: + /* + * Iterate through after-split folios and perform related + * operations. But in buddy allocator like split, the folio + * containing the specified page is skipped until its order + * is new_order, since the folio will be worked on in next + * iteration. + */ + for (release = folio; release != end_folio; release = next) { + next = folio_next(release); + /* + * for buddy allocator like split, the folio containing + * page will be split next and should not be released, + * until the folio's order is new_order or stop_split + * is set to true by the above xas_split() failure. + */ + if (release == page_folio(split_at)) { + folio = release; + if (split_order != new_order && !stop_split) + continue; + } + if (folio_test_anon(release)) { + mod_mthp_stat(folio_order(release), + MTHP_STAT_NR_ANON, 1); + } + + /* + * origin_folio should be kept frozon until page cache + * entries are updated with all the other after-split + * folios to prevent others seeing stale page cache + * entries. + */ + if (release == origin_folio) + continue; + + folio_ref_unfreeze(release, 1 + + ((mapping || swap_cache) ? + folio_nr_pages(release) : 0)); + + lru_add_page_tail(origin_folio, &release->page, + lruvec, list); + + /* Some pages can be beyond EOF: drop them from cache */ + if (release->index >= end) { + if (shmem_mapping(mapping)) + nr_dropped += folio_nr_pages(release); + else if (folio_test_clear_dirty(release)) + folio_account_cleaned(release, + inode_to_wb(mapping->host)); + __filemap_remove_folio(release, NULL); + folio_put_refs(release, folio_nr_pages(release)); + } else if (mapping) { + __xa_store(&mapping->i_pages, + release->index, release, 0); + } else if (swap_cache) { + __xa_store(&swap_cache->i_pages, + swap_cache_index(release->swap), + release, 0); + } + } + } + + /* + * Unfreeze origin_folio only after all page cache entries, which used + * to point to it, have been updated with new folios. Otherwise, + * a parallel folio_try_get() can grab origin_folio and its caller can + * see stale page cache entries. + */ + folio_ref_unfreeze(origin_folio, 1 + + ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0)); + + unlock_page_lruvec(lruvec); + + if (swap_cache) + xa_unlock(&swap_cache->i_pages); + if (mapping) + xa_unlock(&mapping->i_pages); + + /* Caller disabled irqs, so they are still disabled here */ + local_irq_enable(); + + if (nr_dropped) + shmem_uncharge(mapping->host, nr_dropped); + + remap_page(origin_folio, 1 << order, + folio_test_anon(origin_folio) ? + RMP_USE_SHARED_ZEROPAGE : 0); + + /* + * At this point, folio should contain the specified page. + * For uniform split, it is left for caller to unlock. + * For buddy allocator like split, the first after-split folio is left + * for caller to unlock. + */ + for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) { + next = folio_next(new_folio); + if (new_folio == page_folio(lock_at)) + continue; + + folio_unlock(new_folio); + /* + * Subpages may be freed if there wasn't any mapping + * like if add_to_swap() is running on a lru page that + * had its mapping zapped. And freeing these pages + * requires taking the lru_lock so we do the put_page + * of the tail pages after the split is complete. + */ + free_page_and_swap_cache(&new_folio->page); + } + return ret; +} + /* * This function splits a large folio into smaller folios of order @new_order. * @page can point to any page of the large folio to split. The split operation From 6384dd1d18de7b84bc346981419e63a5fa72ced4 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:56 -0500 Subject: [PATCH 369/431] mm/huge_memory: move folio split common code to __folio_split() This is a preparation patch for folio_split(). In the upcoming patch folio_split() will share folio unmapping and remapping code with split_huge_page_to_list_to_order(), so move the code to a common function __folio_split() first. Add a TODO for splitting large shmem folio in swap cache. Link: https://lkml.kernel.org/r/20250307174001.242794-4-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/huge_memory.c | 112 ++++++++++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c5661104ecca..768c5a6662ae 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3869,57 +3869,9 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return ret; } -/* - * This function splits a large folio into smaller folios of order @new_order. - * @page can point to any page of the large folio to split. The split operation - * does not change the position of @page. - * - * Prerequisites: - * - * 1) The caller must hold a reference on the @page's owning folio, also known - * as the large folio. - * - * 2) The large folio must be locked. - * - * 3) The folio must not be pinned. Any unexpected folio references, including - * GUP pins, will result in the folio not getting split; instead, the caller - * will receive an -EAGAIN. - * - * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not - * supported for non-file-backed folios, because folio->_deferred_list, which - * is used by partially mapped folios, is stored in subpage 2, but an order-1 - * folio only has subpages 0 and 1. File-backed order-1 folios are supported, - * since they do not use _deferred_list. - * - * After splitting, the caller's folio reference will be transferred to @page, - * resulting in a raised refcount of @page after this call. The other pages may - * be freed if they are not mapped. - * - * If @list is null, tail pages will be added to LRU list, otherwise, to @list. - * - * Pages in @new_order will inherit the mapping, flags, and so on from the - * huge page. - * - * Returns 0 if the huge page was split successfully. - * - * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if - * the folio was concurrently removed from the page cache. - * - * Returns -EBUSY when trying to split the huge zeropage, if the folio is - * under writeback, if fs-specific folio metadata cannot currently be - * released, or if some unexpected race happened (e.g., anon VMA disappeared, - * truncation). - * - * Callers should ensure that the order respects the address space mapping - * min-order if one is set for non-anonymous folios. - * - * Returns -EINVAL when trying to split to an order that is incompatible - * with the folio. Splitting to order 0 is compatible with all folios. - */ -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, - unsigned int new_order) +static int __folio_split(struct folio *folio, unsigned int new_order, + struct page *page, struct list_head *list) { - struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); /* reset xarray order to new order after split */ XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); @@ -3995,6 +3947,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, mapping = folio->mapping; /* Truncated ? */ + /* + * TODO: add support for large shmem folio in swap cache. + * When shmem is in swap cache, mapping is NULL and + * folio_test_swapcache() is true. + */ if (!mapping) { ret = -EBUSY; goto out; @@ -4129,6 +4086,61 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, return ret; } +/* + * This function splits a large folio into smaller folios of order @new_order. + * @page can point to any page of the large folio to split. The split operation + * does not change the position of @page. + * + * Prerequisites: + * + * 1) The caller must hold a reference on the @page's owning folio, also known + * as the large folio. + * + * 2) The large folio must be locked. + * + * 3) The folio must not be pinned. Any unexpected folio references, including + * GUP pins, will result in the folio not getting split; instead, the caller + * will receive an -EAGAIN. + * + * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not + * supported for non-file-backed folios, because folio->_deferred_list, which + * is used by partially mapped folios, is stored in subpage 2, but an order-1 + * folio only has subpages 0 and 1. File-backed order-1 folios are supported, + * since they do not use _deferred_list. + * + * After splitting, the caller's folio reference will be transferred to @page, + * resulting in a raised refcount of @page after this call. The other pages may + * be freed if they are not mapped. + * + * If @list is null, tail pages will be added to LRU list, otherwise, to @list. + * + * Pages in @new_order will inherit the mapping, flags, and so on from the + * huge page. + * + * Returns 0 if the huge page was split successfully. + * + * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if + * the folio was concurrently removed from the page cache. + * + * Returns -EBUSY when trying to split the huge zeropage, if the folio is + * under writeback, if fs-specific folio metadata cannot currently be + * released, or if some unexpected race happened (e.g., anon VMA disappeared, + * truncation). + * + * Callers should ensure that the order respects the address space mapping + * min-order if one is set for non-anonymous folios. + * + * Returns -EINVAL when trying to split to an order that is incompatible + * with the folio. Splitting to order 0 is compatible with all folios. + */ +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order) +{ + struct folio *folio = page_folio(page); + + return __folio_split(folio, new_order, page, list); +} + int min_order_for_split(struct folio *folio) { if (folio_test_anon(folio)) From 58729c04cf1092b87aeef0bf0998c9e2e4771133 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:57 -0500 Subject: [PATCH 370/431] mm/huge_memory: add buddy allocator like (non-uniform) folio_split() folio_split() splits a large folio in the same way as buddy allocator splits a large free page for allocation. The purpose is to minimize the number of folios after the split. For example, if user wants to free the 3rd subpage in a order-9 folio, folio_split() will split the order-9 folio as: O-0, O-0, O-0, O-0, O-2, O-3, O-4, O-5, O-6, O-7, O-8 if it is anon, since anon folio does not support order-1 yet. ----------------------------------------------------------------- | | | | | | | | | |O-0|O-0|O-0|O-0| O-2 |...| O-7 | O-8 | | | | | | | | | | ----------------------------------------------------------------- O-1, O-0, O-0, O-2, O-3, O-4, O-5, O-6, O-7, O-9 if it is pagecache --------------------------------------------------------------- | | | | | | | | | O-1 |O-0|O-0| O-2 |...| O-7 | O-8 | | | | | | | | | --------------------------------------------------------------- It generates fewer folios (i.e., 11 or 10) than existing page split approach, which splits the order-9 to 512 order-0 folios. It also reduces the number of new xa_node needed during a pagecache folio split from 8 to 1, potentially decreasing the folio split failure rate due to memory constraints. folio_split() and existing split_huge_page_to_list_to_order() share the folio unmapping and remapping code in __folio_split() and the common backend split code in __split_unmapped_folio() using uniform_split variable to distinguish their operations. uniform_split_supported() and non_uniform_split_supported() are added to factor out check code and will be used outside __folio_split() in the following commit. Link: https://lkml.kernel.org/r/20250307174001.242794-5-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/huge_memory.c | 170 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 128 insertions(+), 42 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 768c5a6662ae..6322fc138d92 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3869,12 +3869,85 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return ret; } +static bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns) +{ + if (folio_test_anon(folio)) { + /* order-1 is not supported for anonymous THP. */ + VM_WARN_ONCE(warns && new_order == 1, + "Cannot split to order-1 folio"); + return new_order != 1; + } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + !mapping_large_folio_support(folio->mapping)) { + /* + * No split if the file system does not support large folio. + * Note that we might still have THPs in such mappings due to + * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping + * does not actually support large folios properly. + */ + VM_WARN_ONCE(warns, + "Cannot split file folio to non-0 order"); + return false; + } + + /* Only swapping a whole PMD-mapped folio is supported */ + if (folio_test_swapcache(folio)) { + VM_WARN_ONCE(warns, + "Cannot split swapcache folio to non-0 order"); + return false; + } + + return true; +} + +/* See comments in non_uniform_split_supported() */ +static bool uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns) +{ + if (folio_test_anon(folio)) { + VM_WARN_ONCE(warns && new_order == 1, + "Cannot split to order-1 folio"); + return new_order != 1; + } else if (new_order) { + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + !mapping_large_folio_support(folio->mapping)) { + VM_WARN_ONCE(warns, + "Cannot split file folio to non-0 order"); + return false; + } + } + + if (new_order && folio_test_swapcache(folio)) { + VM_WARN_ONCE(warns, + "Cannot split swapcache folio to non-0 order"); + return false; + } + + return true; +} + +/* + * __folio_split: split a folio at @split_at to a @new_order folio + * @folio: folio to split + * @new_order: the order of the new folio + * @split_at: a page within the new folio + * @lock_at: a page within @folio to be left locked to caller + * @list: after-split folios will be put on it if non NULL + * @uniform_split: perform uniform split or not (non-uniform split) + * + * It calls __split_unmapped_folio() to perform uniform and non-uniform split. + * It is in charge of checking whether the split is supported or not and + * preparing @folio for __split_unmapped_folio(). + * + * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be + * split but not to @new_order, the caller needs to check) + */ static int __folio_split(struct folio *folio, unsigned int new_order, - struct page *page, struct list_head *list) + struct page *split_at, struct page *lock_at, + struct list_head *list, bool uniform_split) { struct deferred_split *ds_queue = get_deferred_split_queue(folio); - /* reset xarray order to new order after split */ - XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); + XA_STATE(xas, &folio->mapping->i_pages, folio->index); bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; struct anon_vma *anon_vma = NULL; @@ -3886,32 +3959,17 @@ static int __folio_split(struct folio *folio, unsigned int new_order, VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (folio != page_folio(split_at) || folio != page_folio(lock_at)) + return -EINVAL; + if (new_order >= folio_order(folio)) return -EINVAL; - if (is_anon) { - /* order-1 is not supported for anonymous THP. */ - if (new_order == 1) { - VM_WARN_ONCE(1, "Cannot split to order-1 folio"); - return -EINVAL; - } - } else if (new_order) { - /* - * No split if the file system does not support large folio. - * Note that we might still have THPs in such mappings due to - * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping - * does not actually support large folios properly. - */ - if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - !mapping_large_folio_support(folio->mapping)) { - VM_WARN_ONCE(1, - "Cannot split file folio to non-0 order"); - return -EINVAL; - } - } + if (uniform_split && !uniform_split_supported(folio, new_order, true)) + return -EINVAL; - /* Only swapping a whole PMD-mapped folio is supported */ - if (folio_test_swapcache(folio) && new_order) + if (!uniform_split && + !non_uniform_split_supported(folio, new_order, true)) return -EINVAL; is_hzp = is_huge_zero_folio(folio); @@ -3973,21 +4031,24 @@ static int __folio_split(struct folio *folio, unsigned int new_order, goto out; } - xas_split_alloc(&xas, folio, folio_order(folio), gfp); - if (xas_error(&xas)) { - ret = xas_error(&xas); - goto out; + if (uniform_split) { + xas_set_order(&xas, folio->index, new_order); + xas_split_alloc(&xas, folio, folio_order(folio), gfp); + if (xas_error(&xas)) { + ret = xas_error(&xas); + goto out; + } } anon_vma = NULL; i_mmap_lock_read(mapping); /* - *__split_huge_page() may need to trim off pages beyond EOF: - * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, - * which cannot be nested inside the page tree lock. So note - * end now: i_size itself may be changed at any moment, but - * folio lock is good enough to serialize the trimming. + *__split_unmapped_folio() may need to trim off pages beyond + * EOF: but on 32-bit, i_size_read() takes an irq-unsafe + * seqlock, which cannot be nested inside the page tree lock. + * So note end now: i_size itself may be changed at any moment, + * but folio lock is good enough to serialize the trimming. */ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); if (shmem_mapping(mapping)) @@ -4041,7 +4102,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (mapping) { int nr = folio_nr_pages(folio); - xas_split(&xas, folio, folio_order(folio)); if (folio_test_pmd_mappable(folio) && new_order < HPAGE_PMD_ORDER) { if (folio_test_swapbacked(folio)) { @@ -4055,12 +4115,9 @@ static int __folio_split(struct folio *folio, unsigned int new_order, } } - if (is_anon) { - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); - mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order)); - } - __split_huge_page(page, list, end, new_order); - ret = 0; + ret = __split_unmapped_folio(folio, new_order, + split_at, lock_at, list, end, &xas, mapping, + uniform_split); } else { spin_unlock(&ds_queue->split_queue_lock); fail: @@ -4138,7 +4195,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, { struct folio *folio = page_folio(page); - return __folio_split(folio, new_order, page, list); + return __folio_split(folio, new_order, &folio->page, page, list, true); +} + +/* + * folio_split: split a folio at @split_at to a @new_order folio + * @folio: folio to split + * @new_order: the order of the new folio + * @split_at: a page within the new folio + * + * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be + * split but not to @new_order, the caller needs to check) + * + * It has the same prerequisites and returns as + * split_huge_page_to_list_to_order(). + * + * Split a folio at @split_at to a new_order folio, leave the + * remaining subpages of the original folio as large as possible. For example, + * in the case of splitting an order-9 folio at its third order-3 subpages to + * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio. + * After the split, there will be a group of folios with different orders and + * the new folio containing @split_at is marked in bracket: + * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8]. + * + * After split, folio is left locked for caller. + */ +static int folio_split(struct folio *folio, unsigned int new_order, + struct page *split_at, struct list_head *list) +{ + return __folio_split(folio, new_order, split_at, &folio->page, list, + false); } int min_order_for_split(struct folio *folio) From 1f43d5aa24b2741d9bf68b0dc2c5b10e87dc60a9 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:58 -0500 Subject: [PATCH 371/431] mm/huge_memory: remove the old, unused __split_huge_page() Now split_huge_page_to_list_to_order() uses the new backend split code in __split_unmapped_folio(), the old __split_huge_page() and __split_huge_page_tail() can be removed. Link: https://lkml.kernel.org/r/20250307174001.242794-6-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/huge_memory.c | 215 ----------------------------------------------- 1 file changed, 215 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6322fc138d92..995ed685c241 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3284,221 +3284,6 @@ static void lru_add_page_tail(struct folio *folio, struct page *tail, } } -static void __split_huge_page_tail(struct folio *folio, int tail, - struct lruvec *lruvec, struct list_head *list, - unsigned int new_order) -{ - struct page *head = &folio->page; - struct page *page_tail = head + tail; - /* - * Careful: new_folio is not a "real" folio before we cleared PageTail. - * Don't pass it around before clear_compound_head(). - */ - struct folio *new_folio = (struct folio *)page_tail; - - VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); - - /* - * Clone page flags before unfreezing refcount. - * - * After successful get_page_unless_zero() might follow flags change, - * for example lock_page() which set PG_waiters. - * - * Note that for mapped sub-pages of an anonymous THP, - * PG_anon_exclusive has been cleared in unmap_folio() and is stored in - * the migration entry instead from where remap_page() will restore it. - * We can still have PG_anon_exclusive set on effectively unmapped and - * unreferenced sub-pages of an anonymous THP: we can simply drop - * PG_anon_exclusive (-> PG_mappedtodisk) for these here. - */ - page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; - page_tail->flags |= (head->flags & - ((1L << PG_referenced) | - (1L << PG_swapbacked) | - (1L << PG_swapcache) | - (1L << PG_mlocked) | - (1L << PG_uptodate) | - (1L << PG_active) | - (1L << PG_workingset) | - (1L << PG_locked) | - (1L << PG_unevictable) | -#ifdef CONFIG_ARCH_USES_PG_ARCH_2 - (1L << PG_arch_2) | -#endif -#ifdef CONFIG_ARCH_USES_PG_ARCH_3 - (1L << PG_arch_3) | -#endif - (1L << PG_dirty) | - LRU_GEN_MASK | LRU_REFS_MASK)); - - /* ->mapping in first and second tail page is replaced by other uses */ - VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, - page_tail); - new_folio->mapping = folio->mapping; - new_folio->index = folio->index + tail; - - /* - * page->private should not be set in tail pages. Fix up and warn once - * if private is unexpectedly set. - */ - if (unlikely(page_tail->private)) { - VM_WARN_ON_ONCE_PAGE(true, page_tail); - page_tail->private = 0; - } - if (folio_test_swapcache(folio)) - new_folio->swap.val = folio->swap.val + tail; - - /* Page flags must be visible before we make the page non-compound. */ - smp_wmb(); - - /* - * Clear PageTail before unfreezing page refcount. - * - * After successful get_page_unless_zero() might follow put_page() - * which needs correct compound_head(). - */ - clear_compound_head(page_tail); - if (new_order) { - prep_compound_page(page_tail, new_order); - folio_set_large_rmappable(new_folio); - } - - /* Finally unfreeze refcount. Additional reference from page cache. */ - page_ref_unfreeze(page_tail, - 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ? - folio_nr_pages(new_folio) : 0)); - - if (folio_test_young(folio)) - folio_set_young(new_folio); - if (folio_test_idle(folio)) - folio_set_idle(new_folio); - - folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); - - /* - * always add to the tail because some iterators expect new - * pages to show after the currently processed elements - e.g. - * migrate_pages - */ - lru_add_page_tail(folio, page_tail, lruvec, list); -} - -static void __split_huge_page(struct page *page, struct list_head *list, - pgoff_t end, unsigned int new_order) -{ - struct folio *folio = page_folio(page); - struct page *head = &folio->page; - struct lruvec *lruvec; - struct address_space *swap_cache = NULL; - unsigned long offset = 0; - int i, nr_dropped = 0; - unsigned int new_nr = 1 << new_order; - int order = folio_order(folio); - unsigned int nr = 1 << order; - - /* - * Reset any memcg data overlay in the tail pages. folio_nr_pages() - * is unreliable after this point. - */ -#ifdef NR_PAGES_IN_LARGE_FOLIO - folio->_nr_pages = 0; -#endif - - /* complete memcg works before add pages to LRU */ - split_page_memcg(head, order, new_order); - - if (folio_test_anon(folio) && folio_test_swapcache(folio)) { - offset = swap_cache_index(folio->swap); - swap_cache = swap_address_space(folio->swap); - xa_lock(&swap_cache->i_pages); - } - - /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ - lruvec = folio_lruvec_lock(folio); - - folio_clear_has_hwpoisoned(folio); - - for (i = nr - new_nr; i >= new_nr; i -= new_nr) { - struct folio *tail; - __split_huge_page_tail(folio, i, lruvec, list, new_order); - tail = page_folio(head + i); - /* Some pages can be beyond EOF: drop them from page cache */ - if (tail->index >= end) { - if (shmem_mapping(folio->mapping)) - nr_dropped += new_nr; - else if (folio_test_clear_dirty(tail)) - folio_account_cleaned(tail, - inode_to_wb(folio->mapping->host)); - __filemap_remove_folio(tail, NULL); - folio_put_refs(tail, folio_nr_pages(tail)); - } else if (!folio_test_anon(folio)) { - __xa_store(&folio->mapping->i_pages, tail->index, - tail, 0); - } else if (swap_cache) { - __xa_store(&swap_cache->i_pages, offset + i, - tail, 0); - } - } - - if (!new_order) - ClearPageCompound(head); - else { - struct folio *new_folio = (struct folio *)head; - - folio_set_order(new_folio, new_order); - } - unlock_page_lruvec(lruvec); - /* Caller disabled irqs, so they are still disabled here */ - - split_page_owner(head, order, new_order); - pgalloc_tag_split(folio, order, new_order); - - /* See comment in __split_huge_page_tail() */ - if (folio_test_anon(folio)) { - /* Additional pin to swap cache */ - if (folio_test_swapcache(folio)) { - folio_ref_add(folio, 1 + new_nr); - xa_unlock(&swap_cache->i_pages); - } else { - folio_ref_inc(folio); - } - } else { - /* Additional pin to page cache */ - folio_ref_add(folio, 1 + new_nr); - xa_unlock(&folio->mapping->i_pages); - } - local_irq_enable(); - - if (nr_dropped) - shmem_uncharge(folio->mapping->host, nr_dropped); - remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0); - - /* - * set page to its compound_head when split to non order-0 pages, so - * we can skip unlocking it below, since PG_locked is transferred to - * the compound_head of the page and the caller will unlock it. - */ - if (new_order) - page = compound_head(page); - - for (i = 0; i < nr; i += new_nr) { - struct page *subpage = head + i; - struct folio *new_folio = page_folio(subpage); - if (subpage == page) - continue; - folio_unlock(new_folio); - - /* - * Subpages may be freed if there wasn't any mapping - * like if add_to_swap() is running on a lru page that - * had its mapping zapped. And freeing these pages - * requires taking the lru_lock so we do the put_page - * of the tail pages after the split is complete. - */ - free_page_and_swap_cache(subpage); - } -} - /* Racy check whether the huge page can be split */ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) { From 4b94c18d15199658f1a86231663e97d3cc12d8de Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:39:59 -0500 Subject: [PATCH 372/431] mm/huge_memory: add folio_split() to debugfs testing interface This allows to test folio_split() by specifying an additional in folio page offset parameter to split_huge_page debugfs interface. Link: https://lkml.kernel.org/r/20250307174001.242794-7-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/huge_memory.c | 47 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 995ed685c241..c6ad2e4053d8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4331,7 +4331,8 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) } static int split_huge_pages_pid(int pid, unsigned long vaddr_start, - unsigned long vaddr_end, unsigned int new_order) + unsigned long vaddr_end, unsigned int new_order, + long in_folio_offset) { int ret = 0; struct task_struct *task; @@ -4415,8 +4416,16 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, if (!folio_test_anon(folio) && folio->mapping != mapping) goto unlock; - if (!split_folio_to_order(folio, target_order)) - split++; + if (in_folio_offset < 0 || + in_folio_offset >= folio_nr_pages(folio)) { + if (!split_folio_to_order(folio, target_order)) + split++; + } else { + struct page *split_at = folio_page(folio, + in_folio_offset); + if (!folio_split(folio, target_order, split_at, NULL)) + split++; + } unlock: @@ -4439,7 +4448,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, } static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, - pgoff_t off_end, unsigned int new_order) + pgoff_t off_end, unsigned int new_order, + long in_folio_offset) { struct filename *file; struct file *candidate; @@ -4488,8 +4498,15 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (folio->mapping != mapping) goto unlock; - if (!split_folio_to_order(folio, target_order)) - split++; + if (in_folio_offset < 0 || in_folio_offset >= nr_pages) { + if (!split_folio_to_order(folio, target_order)) + split++; + } else { + struct page *split_at = folio_page(folio, + in_folio_offset); + if (!folio_split(folio, target_order, split_at, NULL)) + split++; + } unlock: folio_unlock(folio); @@ -4522,6 +4539,7 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, int pid; unsigned long vaddr_start, vaddr_end; unsigned int new_order = 0; + long in_folio_offset = -1; ret = mutex_lock_interruptible(&split_debug_mutex); if (ret) @@ -4550,30 +4568,33 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, goto out; } - ret = sscanf(tok_buf, "0x%lx,0x%lx,%d", &off_start, - &off_end, &new_order); - if (ret != 2 && ret != 3) { + ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end, + &new_order, &in_folio_offset); + if (ret != 2 && ret != 3 && ret != 4) { ret = -EINVAL; goto out; } - ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order); + ret = split_huge_pages_in_file(file_path, off_start, off_end, + new_order, in_folio_offset); if (!ret) ret = input_len; goto out; } - ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order); + ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start, + &vaddr_end, &new_order, &in_folio_offset); if (ret == 1 && pid == 1) { split_huge_pages_all(); ret = strlen(input_buf); goto out; - } else if (ret != 3 && ret != 4) { + } else if (ret != 3 && ret != 4 && ret != 5) { ret = -EINVAL; goto out; } - ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order); + ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order, + in_folio_offset); if (!ret) ret = strlen(input_buf); out: From 7460b470a131f985a70302a322617121efdd7caa Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:40:00 -0500 Subject: [PATCH 373/431] mm/truncate: use folio_split() in truncate operation Instead of splitting the large folio uniformly during truncation, try to use buddy allocator like folio_split() at the start and the end of a truncation range to minimize the number of resulting folios if it is supported. try_folio_split() is introduced to use folio_split() if supported and it falls back to uniform split otherwise. For example, to truncate a order-4 folio [0, 1, 2, 3, 4, 5, ..., 15] between [3, 10] (inclusive), folio_split() splits the folio at 3 to [0,1], [2], [3], [4..7], [8..15] and [3], [4..7] can be dropped and [8..15] is kept with zeros in [8..10], then another folio_split() is done at 10, so [8..10] can be dropped. One possible optimization is to make folio_split() to split a folio based on a given range, like [3..10] above. But that complicates folio_split(), so it will be investigated when necessary. Link: https://lkml.kernel.org/r/20250226210032.2044041-8-ziy@nvidia.com Link: https://lkml.kernel.org/r/20250307174001.242794-8-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 36 ++++++++++++++++++++++++++++++++++++ mm/huge_memory.c | 6 +++--- mm/truncate.c | 37 ++++++++++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 4 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e57e811cfd3c..e893d546a49f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -345,6 +345,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); +bool uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns); +bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, + bool warns); +int folio_split(struct folio *folio, unsigned int new_order, struct page *page, + struct list_head *list); +/* + * try_folio_split - try to split a @folio at @page using non uniform split. + * @folio: folio to be split + * @page: split to order-0 at the given page + * @list: store the after-split folios + * + * Try to split a @folio at @page using non uniform split to order-0, if + * non uniform split is not supported, fall back to uniform split. + * + * Return: 0: split is successful, otherwise split failed. + */ +static inline int try_folio_split(struct folio *folio, struct page *page, + struct list_head *list) +{ + int ret = min_order_for_split(folio); + + if (ret < 0) + return ret; + + if (!non_uniform_split_supported(folio, 0, false)) + return split_huge_page_to_list_to_order(&folio->page, list, + ret); + return folio_split(folio, ret, page, list); +} static inline int split_huge_page(struct page *page) { struct folio *folio = page_folio(page); @@ -537,6 +567,12 @@ static inline int split_folio_to_list(struct folio *folio, struct list_head *lis return 0; } +static inline int try_folio_split(struct folio *folio, struct page *page, + struct list_head *list) +{ + return 0; +} + static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c6ad2e4053d8..e3ed8e9523f5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3654,7 +3654,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return ret; } -static bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, +bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, bool warns) { if (folio_test_anon(folio)) { @@ -3686,7 +3686,7 @@ static bool non_uniform_split_supported(struct folio *folio, unsigned int new_or } /* See comments in non_uniform_split_supported() */ -static bool uniform_split_supported(struct folio *folio, unsigned int new_order, +bool uniform_split_supported(struct folio *folio, unsigned int new_order, bool warns) { if (folio_test_anon(folio)) { @@ -4005,7 +4005,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, * * After split, folio is left locked for caller. */ -static int folio_split(struct folio *folio, unsigned int new_order, +int folio_split(struct folio *folio, unsigned int new_order, struct page *split_at, struct list_head *list) { return __folio_split(folio, new_order, split_at, &folio->page, list, diff --git a/mm/truncate.c b/mm/truncate.c index 79570045071c..5d98054094d1 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -192,6 +192,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); unsigned int offset, length; + struct page *split_at, *split_at2; if (pos < start) offset = start - pos; @@ -221,8 +222,42 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; - if (split_folio(folio) == 0) + + split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); + split_at2 = folio_page(folio, + PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); + + if (!try_folio_split(folio, split_at, NULL)) { + /* + * try to split at offset + length to make sure folios within + * the range can be dropped, especially to avoid memory waste + * for shmem truncate + */ + struct folio *folio2 = page_folio(split_at2); + + if (!folio_try_get(folio2)) + goto no_split; + + if (!folio_test_large(folio2)) + goto out; + + if (!folio_trylock(folio2)) + goto out; + + /* + * make sure folio2 is large and does not change its mapping. + * Its split result does not matter here. + */ + if (folio_test_large(folio2) && + folio2->mapping == folio->mapping) + try_folio_split(folio2, split_at2, NULL); + + folio_unlock(folio2); +out: + folio_put(folio2); +no_split: return true; + } if (folio_test_dirty(folio)) return false; truncate_inode_folio(folio->mapping, folio); From 80a5c494c89f73907ed659a9233a70253774cdae Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 7 Mar 2025 12:40:01 -0500 Subject: [PATCH 374/431] selftests/mm: add tests for folio_split(), buddy allocator like split It splits page cache folios to orders from 0 to 8 at different in-folio offset. Link: https://lkml.kernel.org/r/20250307174001.242794-9-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Kairui Song Signed-off-by: Andrew Morton --- .../selftests/mm/split_huge_page_test.c | 34 +++++++++++++++---- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index e0304046b1a0..719c5e2a6624 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -456,7 +457,8 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, return -1; } -void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc) +void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc, + int order, int offset) { int fd; char *addr; @@ -474,7 +476,12 @@ void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_l return; err = 0; - write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order); + if (offset == -1) + write_debugfs(PID_FMT, getpid(), (uint64_t)addr, + (uint64_t)addr + fd_size, order); + else + write_debugfs(PID_FMT, getpid(), (uint64_t)addr, + (uint64_t)addr + fd_size, order, offset); for (i = 0; i < fd_size; i++) if (*(addr + i) != (char)i) { @@ -493,9 +500,15 @@ void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_l munmap(addr, fd_size); close(fd); unlink(testfile); - if (err) - ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order); - ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order); + if (offset == -1) { + if (err) + ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order); + ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order); + } else { + if (err) + ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d at in-folio offset %d failed\n", order, offset); + ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d at in-folio offset %d passed\n", order, offset); + } } int main(int argc, char **argv) @@ -506,6 +519,7 @@ int main(int argc, char **argv) char fs_loc_template[] = "/tmp/thp_fs_XXXXXX"; const char *fs_loc; bool created_tmp; + int offset; ksft_print_header(); @@ -517,7 +531,7 @@ int main(int argc, char **argv) if (argc > 1) optional_xfs_path = argv[1]; - ksft_set_plan(1+8+1+9+9); + ksft_set_plan(1+8+1+9+9+8*4+2); pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; @@ -540,7 +554,13 @@ int main(int argc, char **argv) created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template, &fs_loc); for (i = 8; i >= 0; i--) - split_thp_in_pagecache_to_order(fd_size, i, fs_loc); + split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1); + + for (i = 0; i < 9; i++) + for (offset = 0; + offset < pmd_pagesize / pagesize; + offset += MAX(pmd_pagesize / pagesize / 4, 1 << i)) + split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset); cleanup_thp_fs(fs_loc, created_tmp); ksft_finished(); From 200a89c159a7a416115e6e309183c82183bf98aa Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 14 Mar 2025 18:21:12 -0400 Subject: [PATCH 375/431] mm/filemap: use xas_try_split() in __filemap_add_folio() Patch series "Minimize xa_node allocation during xarry split", v3. When splitting a multi-index entry in XArray from order-n to order-m, existing xas_split_alloc()+xas_split() approach requires 2^(n % XA_CHUNK_SHIFT) xa_node allocations. But its callers, __filemap_add_folio() and shmem_split_large_entry(), use at most 1 xa_node. To minimize xa_node allocation and remove the limitation of no split from order-12 (or above) to order-0 (or anything between 0 and 5)[1], xas_try_split() was added[2], which allocates (n / XA_CHUNK_SHIFT - m / XA_CHUNK_SHIFT) xa_node. It is used for non-uniform folio split, but can be used by __filemap_add_folio() and shmem_split_large_entry(). xas_split_alloc() and xas_split() split an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | | | ------- --- --- ------- | | ... | | V V V V ----------- ----------- ----------- ----------- | xa_node | | xa_node | ... | xa_node | | xa_node | ----------- ----------- ----------- ----------- xas_try_split() splits an order-9 to order-0: --------------------------------- | | | | | | | | | | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | | | | | | | | | --------------------------------- | | V ----------- | xa_node | ----------- xas_try_split() is designed to be called iteratively with n = m + 1. xas_try_split_mini_order() is added to minmize the number of calls to xas_try_split() by telling the caller the next minimal order to split to instead of n - 1. Splitting order-n to order-m when m= l * XA_CHUNK_SHIFT does not require xa_node allocation and requires 1 xa_node when n=l * XA_CHUNK_SHIFT and m = n - 1, so it is OK to use xas_try_split() with n > m + 1 when no new xa_node is needed. xfstests quick group test passed on xfs and tmpfs. [1] https://lore.kernel.org/linux-mm/Z6YX3RznGLUD07Ao@casper.infradead.org/ [2] https://lore.kernel.org/linux-mm/20250226210032.2044041-1-ziy@nvidia.com/ This patch (of 2): During __filemap_add_folio(), a shadow entry is covering n slots and a folio covers m slots with m < n is to be added. Instead of splitting all n slots, only the m slots covered by the folio need to be split and the remaining n-m shadow entries can be retained with orders ranging from m to n-1. This method only requires (n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT) new xa_nodes instead of (n % XA_CHUNK_SHIFT) * ((n/XA_CHUNK_SHIFT) - (m/XA_CHUNK_SHIFT)) new xa_nodes, compared to the original xas_split_alloc() + xas_split() one. For example, to insert an order-0 folio when an order-9 shadow entry is present (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of 8. xas_try_split_min_order() is introduced to reduce the number of calls to xas_try_split() during split. Link: https://lkml.kernel.org/r/20250314222113.711703-1-ziy@nvidia.com Link: https://lkml.kernel.org/r/20250314222113.711703-2-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Baolin Wang Cc: Hugh Dickins Cc: Kairui Song Cc: Miaohe Lin Cc: Mattew Wilcox Cc: David Hildenbrand Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/xarray.h | 7 +++++++ lib/xarray.c | 25 +++++++++++++++++++++++ mm/filemap.c | 45 +++++++++++++++++------------------------- 3 files changed, 50 insertions(+), 27 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 4010195201c9..78eede109b1a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1556,6 +1556,7 @@ int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); void xas_try_split(struct xa_state *xas, void *entry, unsigned int order); +unsigned int xas_try_split_min_order(unsigned int order); #else static inline int xa_get_order(struct xarray *xa, unsigned long index) { @@ -1582,6 +1583,12 @@ static inline void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) { } + +static inline unsigned int xas_try_split_min_order(unsigned int order) +{ + return 0; +} + #endif /** diff --git a/lib/xarray.c b/lib/xarray.c index 3bae48558e21..9644b18af18d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1134,6 +1134,28 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } EXPORT_SYMBOL_GPL(xas_split); +/** + * xas_try_split_min_order() - Minimal split order xas_try_split() can accept + * @order: Current entry order. + * + * xas_try_split() can split a multi-index entry to smaller than @order - 1 if + * no new xa_node is needed. This function provides the minimal order + * xas_try_split() supports. + * + * Return: the minimal order xas_try_split() supports + * + * Context: Any context. + * + */ +unsigned int xas_try_split_min_order(unsigned int order) +{ + if (order % XA_CHUNK_SHIFT == 0) + return order == 0 ? 0 : order - 1; + + return order - (order % XA_CHUNK_SHIFT); +} +EXPORT_SYMBOL_GPL(xas_try_split_min_order); + /** * xas_try_split() - Try to split a multi-index entry. * @xas: XArray operation state. @@ -1145,6 +1167,9 @@ EXPORT_SYMBOL_GPL(xas_split); * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is * NULL. If more new xa_node are needed, the function gives EINVAL error. * + * NOTE: use xas_try_split_min_order() to get next split order instead of + * @order - 1 if you want to minmize xas_try_split() calls. + * * Context: Any context. The caller should hold the xa_lock. */ void xas_try_split(struct xa_state *xas, void *entry, unsigned int order) diff --git a/mm/filemap.c b/mm/filemap.c index 152993a86de3..cc69f174f76b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -857,11 +857,10 @@ EXPORT_SYMBOL_GPL(replace_page_cache_folio); noinline int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) { - XA_STATE(xas, &mapping->i_pages, index); - void *alloced_shadow = NULL; - int alloced_order = 0; + XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); bool huge; long nr; + unsigned int forder = folio_order(folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); @@ -870,7 +869,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, mapping_set_update(&xas, mapping); VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); - xas_set_order(&xas, index, folio_order(folio)); huge = folio_test_hugetlb(folio); nr = folio_nr_pages(folio); @@ -880,7 +878,7 @@ noinline int __filemap_add_folio(struct address_space *mapping, folio->index = xas.xa_index; for (;;) { - int order = -1, split_order = 0; + int order = -1; void *entry, *old = NULL; xas_lock_irq(&xas); @@ -898,21 +896,25 @@ noinline int __filemap_add_folio(struct address_space *mapping, order = xas_get_order(&xas); } - /* entry may have changed before we re-acquire the lock */ - if (alloced_order && (old != alloced_shadow || order != alloced_order)) { - xas_destroy(&xas); - alloced_order = 0; - } - if (old) { - if (order > 0 && order > folio_order(folio)) { + if (order > 0 && order > forder) { + unsigned int split_order = max(forder, + xas_try_split_min_order(order)); + /* How to handle large swap entries? */ BUG_ON(shmem_mapping(mapping)); - if (!alloced_order) { - split_order = order; - goto unlock; + + while (order > forder) { + xas_set_order(&xas, index, split_order); + xas_try_split(&xas, old, order); + if (xas_error(&xas)) + goto unlock; + order = split_order; + split_order = + max(xas_try_split_min_order( + split_order), + forder); } - xas_split(&xas, old, order); xas_reset(&xas); } if (shadowp) @@ -936,17 +938,6 @@ noinline int __filemap_add_folio(struct address_space *mapping, unlock: xas_unlock_irq(&xas); - /* split needed, alloc here and retry. */ - if (split_order) { - xas_split_alloc(&xas, old, split_order, gfp); - if (xas_error(&xas)) - goto error; - alloced_shadow = old; - alloced_order = split_order; - xas_reset(&xas); - continue; - } - if (!xas_nomem(&xas, gfp)) break; } From d53c78fffe7ad364397c693522ceb4d152c2aacd Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 14 Mar 2025 18:21:13 -0400 Subject: [PATCH 376/431] mm/shmem: use xas_try_split() in shmem_split_large_entry() During shmem_split_large_entry(), large swap entries are covering n slots and an order-0 folio needs to be inserted. Instead of splitting all n slots, only the 1 slot covered by the folio need to be split and the remaining n-1 shadow entries can be retained with orders ranging from 0 to n-1. This method only requires (n/XA_CHUNK_SHIFT) new xa_nodes instead of (n % XA_CHUNK_SHIFT) * (n/XA_CHUNK_SHIFT) new xa_nodes, compared to the original xas_split_alloc() + xas_split() one. For example, to split an order-9 large swap entry (assuming XA_CHUNK_SHIFT is 6), 1 xa_node is needed instead of 8. xas_try_split_min_order() is used to reduce the number of calls to xas_try_split() during split. Link: https://lkml.kernel.org/r/20250314222113.711703-3-ziy@nvidia.com Signed-off-by: Zi Yan Reviewed-by: Baolin Wang Tested-by: Baolin Wang Cc: Baolin Wang Cc: Hugh Dickins Cc: Kairui Song Cc: Mattew Wilcox Cc: Miaohe Lin Cc: David Hildenbrand Cc: John Hubbard Cc: Kefeng Wang Cc: Kirill A. Shuemov Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/shmem.c | 59 ++++++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 8de9b4a07a8a..405c898266d4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2153,15 +2153,16 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, { struct address_space *mapping = inode->i_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); - void *alloced_shadow = NULL; - int alloced_order = 0, i; + int split_order = 0, entry_order; + int i; /* Convert user data gfp flags to xarray node gfp flags */ gfp &= GFP_RECLAIM_MASK; for (;;) { - int order = -1, split_order = 0; void *old = NULL; + int cur_order; + pgoff_t swap_index; xas_lock_irq(&xas); old = xas_load(&xas); @@ -2170,60 +2171,56 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index, goto unlock; } - order = xas_get_order(&xas); + entry_order = xas_get_order(&xas); - /* Swap entry may have changed before we re-acquire the lock */ - if (alloced_order && - (old != alloced_shadow || order != alloced_order)) { - xas_destroy(&xas); - alloced_order = 0; - } + if (!entry_order) + goto unlock; /* Try to split large swap entry in pagecache */ - if (order > 0) { - if (!alloced_order) { - split_order = order; + cur_order = entry_order; + swap_index = round_down(index, 1 << entry_order); + + split_order = xas_try_split_min_order(cur_order); + + while (cur_order > 0) { + pgoff_t aligned_index = + round_down(index, 1 << cur_order); + pgoff_t swap_offset = aligned_index - swap_index; + + xas_set_order(&xas, index, split_order); + xas_try_split(&xas, old, cur_order); + if (xas_error(&xas)) goto unlock; - } - xas_split(&xas, old, order); /* * Re-set the swap entry after splitting, and the swap * offset of the original large entry must be continuous. */ - for (i = 0; i < 1 << order; i++) { - pgoff_t aligned_index = round_down(index, 1 << order); + for (i = 0; i < 1 << cur_order; + i += (1 << split_order)) { swp_entry_t tmp; - tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); + tmp = swp_entry(swp_type(swap), + swp_offset(swap) + swap_offset + + i); __xa_store(&mapping->i_pages, aligned_index + i, swp_to_radix_entry(tmp), 0); } + cur_order = split_order; + split_order = xas_try_split_min_order(split_order); } unlock: xas_unlock_irq(&xas); - /* split needed, alloc here and retry. */ - if (split_order) { - xas_split_alloc(&xas, old, split_order, gfp); - if (xas_error(&xas)) - goto error; - alloced_shadow = old; - alloced_order = split_order; - xas_reset(&xas); - continue; - } - if (!xas_nomem(&xas, gfp)) break; } -error: if (xas_error(&xas)) return xas_error(&xas); - return alloced_order; + return entry_order; } /* From c637c61c9ed0203d9a1f2ba21fb7a49ddca3ef8f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 10 Mar 2025 09:50:09 -0700 Subject: [PATCH 377/431] mm/damon/sysfs-schemes: avoid Wformat-security warning on damon_sysfs_access_pattern_add_range_dir() When -Wformat-security is given, compiler warns as a potential security issue on damon_sysfs_access_pattern_add_range_dir() as below: mm/damon/sysfs-schemes.c: In function `damon_sysfs_access_pattern_add_range_dir': mm/damon/sysfs-schemes.c:1503:25: warning: format not a string literal and no format arguments [-Wformat-security] 1503 | &access_pattern->kobj, name); | ^ Fix it by using "%s" as the format and the name as the argument. Link: https://lkml.kernel.org/r/20250310165009.652491-1-sj@kernel.org Fixes: 7e84b1f8212a ("mm/damon/sysfs: support DAMON-based Operation Schemes") Signed-off-by: SeongJae Park Cc: Arnd Bergmann Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 985cfc750a90..5023f2b690d6 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1471,7 +1471,7 @@ static int damon_sysfs_access_pattern_add_range_dir( if (!range) return -ENOMEM; err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, - &access_pattern->kobj, name); + &access_pattern->kobj, "%s", name); if (err) kobject_put(&range->kobj); else From 61659efdb35ce6c6ac7639342098f3c4548b794b Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Wed, 12 Mar 2025 09:30:43 +1000 Subject: [PATCH 378/431] drivers/base/memory: improve add_boot_memory_block() Patch series "drivers/base/memory: Two cleanups", v3. Two cleanups to drivers/base/memory. This patch (of 2)L It's unnecessary to count the present sections for the specified block since the block will be added if any section in the block is present. Besides, for_each_present_section_nr() can be reused as Andrew Morton suggested. Improve by using for_each_present_section_nr() and dropping the unnecessary @section_count. No functional changes intended. Link: https://lkml.kernel.org/r/20250311233045.148943-1-gshan@redhat.com Link: https://lkml.kernel.org/r/20250311233045.148943-2-gshan@redhat.com Signed-off-by: Gavin Shan Acked-by: David Hildenbrand Acked-by: Oscar Salvador Cc: Danilo Krummrich Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton --- drivers/base/memory.c | 17 ++++++++--------- include/linux/mmzone.h | 5 +++++ mm/sparse.c | 5 ----- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 4765f2928725..8f3a41d9bfaa 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -818,18 +818,17 @@ static int add_memory_block(unsigned long block_id, unsigned long state, static int __init add_boot_memory_block(unsigned long base_section_nr) { - int section_count = 0; unsigned long nr; - for (nr = base_section_nr; nr < base_section_nr + sections_per_block; - nr++) - if (present_section_nr(nr)) - section_count++; + for_each_present_section_nr(base_section_nr, nr) { + if (nr >= (base_section_nr + sections_per_block)) + break; - if (section_count == 0) - return 0; - return add_memory_block(memory_block_id(base_section_nr), - MEM_ONLINE, NULL, NULL); + return add_memory_block(memory_block_id(base_section_nr), + MEM_ONLINE, NULL, NULL); + } + + return 0; } static int add_hotplug_memory_block(unsigned long block_id, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 550dbba92521..dbb0ad69e17f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -2140,6 +2140,11 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr) return -1; } +#define for_each_present_section_nr(start, section_nr) \ + for (section_nr = next_present_section_nr(start - 1); \ + section_nr != -1; \ + section_nr = next_present_section_nr(section_nr)) + /* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate diff --git a/mm/sparse.c b/mm/sparse.c index ee0234a77c7f..3c012cf83cc2 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -170,11 +170,6 @@ static void __section_mark_present(struct mem_section *ms, ms->section_mem_map |= SECTION_MARKED_PRESENT; } -#define for_each_present_section_nr(start, section_nr) \ - for (section_nr = next_present_section_nr(start-1); \ - section_nr != -1; \ - section_nr = next_present_section_nr(section_nr)) - static inline unsigned long first_present_section_nr(void) { return next_present_section_nr(-1); From 1a24776fca39ed79d7f5e0b0592b5addd784981e Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Wed, 12 Mar 2025 09:30:44 +1000 Subject: [PATCH 379/431] drivers/base/memory: correct the field name in the header Replace @blocks with @memory_blocks to match with the definition of struct memory_group. Link: https://lkml.kernel.org/r/20250311233045.148943-3-gshan@redhat.com Signed-off-by: Gavin Shan Acked-by: David Hildenbrand Acked-by: Oscar Salvador Cc: Danilo Krummrich Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton --- include/linux/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/memory.h b/include/linux/memory.h index c0afee5d126e..12daa6ec7d09 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -25,7 +25,7 @@ /** * struct memory_group - a logical group of memory blocks * @nid: The node id for all memory blocks inside the memory group. - * @blocks: List of all memory blocks belonging to this memory group. + * @memory_blocks: List of all memory blocks belonging to this memory group. * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this * memory group. * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this From 5d89666bd99831cee14abcf201b3867d9f15abae Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 10 Mar 2025 14:04:17 +0000 Subject: [PATCH 380/431] mm: use ptep_get() instead of directly dereferencing pte_t* It is best practice for all pte accesses to go via the arch helpers, to ensure non-torn values and to allow the arch to intervene where needed (contpte for arm64 for example). While in this case it was probably safe to directly dereference, let's tidy it up for consistency. Link: https://lkml.kernel.org/r/20250310140418.1737409-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Lorenzo Stoakes Reviewed-by: Qi Zheng Reviewed-by: Anshuman Khandual Reviewed-by: Dev Jain Signed-off-by: Andrew Morton --- mm/migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/migrate.c b/mm/migrate.c index c0adea67cd62..f3ee6d8d5e2e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -202,7 +202,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, return false; VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); + VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page); if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || mm_forbids_zeropage(pvmw->vma->vm_mm)) From 116eb468956b8532eae4d008803d4957c2220447 Mon Sep 17 00:00:00 2001 From: Enrico Bravi Date: Mon, 10 Mar 2025 12:25:37 +0100 Subject: [PATCH 381/431] mm/shmem: fix functions documentation Add missing parenthesis in @name parameter description. Link: https://lkml.kernel.org/r/20250310112535.84754-1-enrico.bravi@polito.it Signed-off-by: Enrico Bravi Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 405c898266d4..7b738d8d6581 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5841,7 +5841,7 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, * underlying inode. So users of this interface must do LSM checks at a * higher layer. The users are the big_key and shm implementations. LSM * checks are provided at the key or shm level rather than the inode. - * @name: name for dentry (to be seen in /proc//maps + * @name: name for dentry (to be seen in /proc//maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ @@ -5853,7 +5853,7 @@ EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); /** * shmem_file_setup - get an unlinked file living in tmpfs - * @name: name for dentry (to be seen in /proc//maps + * @name: name for dentry (to be seen in /proc//maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ @@ -5866,7 +5866,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup); /** * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs * @mnt: the tmpfs mount where the file will be created - * @name: name for dentry (to be seen in /proc//maps + * @name: name for dentry (to be seen in /proc//maps) * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ From 8c02048d1c6126527f15752a5e0849dc49cefeeb Mon Sep 17 00:00:00 2001 From: Martin Liu Date: Sat, 8 Mar 2025 03:46:00 +0000 Subject: [PATCH 382/431] mm/page_alloc: add trace event for per-zone watermark setup Patch series "Add tracepoints for lowmem reserves, watermarks and totalreserve_pages", v2. This patchset introduces tracepoints to track changes in the lowmem reserves, watermarks and totalreserve_pages. This helps to track the exact timing of such changes and understand their relation to reclaim activities. The tracepoints added are: mm_setup_per_zone_lowmem_reserve mm_setup_per_zone_wmarks mm_calculate_totalreserve_pagesi This patch (of 3): This commit introduces the `mm_setup_per_zone_wmarks` trace event, which provides detailed insights into the kernel's per-zone watermark configuration, offering precise timing and the ability to correlate watermark changes with specific kernel events. While `/proc/zoneinfo` provides some information about zone watermarks, this trace event offers: 1. The ability to link watermark changes to specific kernel events and logic. 2. The ability to capture rapid or short-lived changes in watermarks that may be missed by user-space polling 3. Diagnosing unexpected kswapd activity or excessive direct reclaim triggered by rapidly changing watermarks. Link: https://lkml.kernel.org/r/20250308034606.2036033-1-liumartin@google.com Link: https://lkml.kernel.org/r/20250308034606.2036033-2-liumartin@google.com Signed-off-by: Martin Liu Acked-by: David Rientjes Cc: Steven Rostedt Cc: Martin Liu Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Signed-off-by: Andrew Morton --- include/trace/events/kmem.h | 33 +++++++++++++++++++++++++++++++++ mm/page_alloc.c | 1 + 2 files changed, 34 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index b37eb0a7060f..5fd392dae503 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -342,6 +342,39 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info, __entry->nr_mapped) ); +TRACE_EVENT(mm_setup_per_zone_wmarks, + + TP_PROTO(struct zone *zone), + + TP_ARGS(zone), + + TP_STRUCT__entry( + __field(int, node_id) + __string(name, zone->name) + __field(unsigned long, watermark_min) + __field(unsigned long, watermark_low) + __field(unsigned long, watermark_high) + __field(unsigned long, watermark_promo) + ), + + TP_fast_assign( + __entry->node_id = zone->zone_pgdat->node_id; + __assign_str(name); + __entry->watermark_min = zone->_watermark[WMARK_MIN]; + __entry->watermark_low = zone->_watermark[WMARK_LOW]; + __entry->watermark_high = zone->_watermark[WMARK_HIGH]; + __entry->watermark_promo = zone->_watermark[WMARK_PROMO]; + ), + + TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu", + __entry->node_id, + __get_str(name), + __entry->watermark_min, + __entry->watermark_low, + __entry->watermark_high, + __entry->watermark_promo) +); + /* * Required for uniquely and securely identifying mm in rss_stat tracepoint. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2c6ae7e5aaad..b739367434ce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6006,6 +6006,7 @@ static void __setup_per_zone_wmarks(void) zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; + trace_mm_setup_per_zone_wmarks(zone); spin_unlock_irqrestore(&zone->lock, flags); } From a293aba4a584709889f77a0ad0c45746aecf1b9f Mon Sep 17 00:00:00 2001 From: Martin Liu Date: Sat, 8 Mar 2025 03:46:01 +0000 Subject: [PATCH 383/431] mm/page_alloc: add trace event for per-zone lowmem reserve setup This commit introduces the `mm_setup_per_zone_lowmem_reserve` trace event,which provides detailed insights into the kernel's per-zone lowmem reserve configuration. The trace event provides precise timestamps, allowing developers to 1. Correlate lowmem reserve changes with specific kernel events and able to diagnose unexpected kswapd or direct reclaim behavior triggered by dynamic changes in lowmem reserve. 2. Know memory allocation failures that occur due to insufficient lowmem reserve, by precisely correlating allocation attempts with reserve adjustments. Link: https://lkml.kernel.org/r/20250308034606.2036033-3-liumartin@google.com Signed-off-by: Martin Liu Acked-by: David Rientjes Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Steven Rostedt Signed-off-by: Andrew Morton --- include/trace/events/kmem.h | 27 +++++++++++++++++++++++++++ mm/page_alloc.c | 2 ++ 2 files changed, 29 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 5fd392dae503..9623e68d4d26 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -375,6 +375,33 @@ TRACE_EVENT(mm_setup_per_zone_wmarks, __entry->watermark_promo) ); +TRACE_EVENT(mm_setup_per_zone_lowmem_reserve, + + TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve), + + TP_ARGS(zone, upper_zone, lowmem_reserve), + + TP_STRUCT__entry( + __field(int, node_id) + __string(name, zone->name) + __string(upper_name, upper_zone->name) + __field(long, lowmem_reserve) + ), + + TP_fast_assign( + __entry->node_id = zone->zone_pgdat->node_id; + __assign_str(name); + __assign_str(upper_name); + __entry->lowmem_reserve = lowmem_reserve; + ), + + TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld", + __entry->node_id, + __get_str(name), + __get_str(upper_name), + __entry->lowmem_reserve) +); + /* * Required for uniquely and securely identifying mm in rss_stat tracepoint. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b739367434ce..a82d96cb3044 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5943,6 +5943,8 @@ static void setup_per_zone_lowmem_reserve(void) zone->lowmem_reserve[j] = 0; else zone->lowmem_reserve[j] = managed_pages / ratio; + trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, + zone->lowmem_reserve[j]); } } } From 15766485e4a51bec2dcce304c089a95550720033 Mon Sep 17 00:00:00 2001 From: Martin Liu Date: Sat, 8 Mar 2025 03:46:02 +0000 Subject: [PATCH 384/431] mm/page_alloc: add trace event for totalreserve_pages calculation This commit introduces a new trace event, `mm_calculate_totalreserve_pages`, which reports the new reserve value at the exact time when it takes effect. The `totalreserve_pages` value represents the total amount of memory reserved across all zones and nodes in the system. This reserved memory is crucial for ensuring that critical kernel operations have access to sufficient memory, even under memory pressure. By tracing the `totalreserve_pages` value, developers can gain insights that how the total reserved memory changes over time. Link: https://lkml.kernel.org/r/20250308034606.2036033-4-liumartin@google.com Signed-off-by: Martin Liu Acked-by: David Rientjes Cc: "Masami Hiramatsu (Google)" Cc: Mathieu Desnoyers Cc: Steven Rostedt Signed-off-by: Andrew Morton --- include/trace/events/kmem.h | 18 ++++++++++++++++++ mm/page_alloc.c | 1 + 2 files changed, 19 insertions(+) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 9623e68d4d26..f74925a6cf69 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -402,6 +402,24 @@ TRACE_EVENT(mm_setup_per_zone_lowmem_reserve, __entry->lowmem_reserve) ); +TRACE_EVENT(mm_calculate_totalreserve_pages, + + TP_PROTO(unsigned long totalreserve_pages), + + TP_ARGS(totalreserve_pages), + + TP_STRUCT__entry( + __field(unsigned long, totalreserve_pages) + ), + + TP_fast_assign( + __entry->totalreserve_pages = totalreserve_pages; + ), + + TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages) +); + + /* * Required for uniquely and securely identifying mm in rss_stat tracepoint. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a82d96cb3044..0be1fedd1201 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5914,6 +5914,7 @@ static void calculate_totalreserve_pages(void) } } totalreserve_pages = reserve_pages; + trace_mm_calculate_totalreserve_pages(totalreserve_pages); } /* From 9ecd2f839b2596aaa510f20e18d496c2e3e0fa56 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 12 Mar 2025 09:47:47 -0700 Subject: [PATCH 385/431] mm/madvise: use is_memory_failure() from madvise_do_behavior() Patch series "mm/madvise: cleanup requests validations and classifications". Cleanup madvise entry level code for cleaner request validations and classifications. This patch (of 4): To reduce redundant open-coded checks of CONFIG_MEMORY_FAILURE and MADV_{HWPOISON,SOFT_OFFLINE} in madvise_[un]lock(), is_memory_failure() is introduced. madvise_do_behavior() is still doing the same open-coded check, though. Use is_memory_failure() instead. To avoid build failure on !CONFIG_MEMORY_FAILURE case, implement an empty madvise_inject_error() under the config. Also move the definition of is_memory_failure() inside #ifdef CONFIG_MEMORY_FAILURE clause for madvise_inject_error() definition, to reduce duplicated ifdef clauses. Link: https://lkml.kernel.org/r/20250312164750.59215-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250312164750.59215-2-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Cc: David Hildenbrand Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 49 +++++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 388dc289b5d1..c3ab1f283b18 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1392,7 +1392,32 @@ static int madvise_inject_error(int behavior, return 0; } -#endif + +static bool is_memory_failure(int behavior) +{ + switch (behavior) { + case MADV_HWPOISON: + case MADV_SOFT_OFFLINE: + return true; + default: + return false; + } +} + +#else + +static int madvise_inject_error(int behavior, + unsigned long start, unsigned long end) +{ + return 0; +} + +static bool is_memory_failure(int behavior) +{ + return false; +} + +#endif /* CONFIG_MEMORY_FAILURE */ static bool madvise_behavior_valid(int behavior) @@ -1569,24 +1594,6 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, } #endif /* CONFIG_ANON_VMA_NAME */ -#ifdef CONFIG_MEMORY_FAILURE -static bool is_memory_failure(int behavior) -{ - switch (behavior) { - case MADV_HWPOISON: - case MADV_SOFT_OFFLINE: - return true; - default: - return false; - } -} -#else -static bool is_memory_failure(int behavior) -{ - return false; -} -#endif - static int madvise_lock(struct mm_struct *mm, int behavior) { if (is_memory_failure(behavior)) @@ -1640,10 +1647,8 @@ static int madvise_do_behavior(struct mm_struct *mm, unsigned long end; int error; -#ifdef CONFIG_MEMORY_FAILURE - if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) + if (is_memory_failure(behavior)) return madvise_inject_error(behavior, start, start + len_in); -#endif start = untagged_addr_remote(mm, start); end = start + len; From f4a578d34590db42b4870ac694193413421610c1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 12 Mar 2025 09:47:48 -0700 Subject: [PATCH 386/431] mm/madvise: split out populate behavior check logic madvise_do_behavior() has a long open-coded 'behavior' check for MADV_POPULATE_{READ,WRITE}. It adds multiple layers[1] and make the code arguably take longer time to read. Like is_memory_failure(), split out the check to a separate function. This is not technically removing the additional layer but discourage further extending the switch-case. Also it makes madvise_do_behavior() code shorter and therefore easier to read. [1] https://lore.kernel.org/bd6d0bf1-c79e-46bd-a810-9791efb9ad73@lucifer.local Link: https://lkml.kernel.org/r/20250312164750.59215-3-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Cc: David Hildenbrand Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index c3ab1f283b18..611db868ae38 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1640,6 +1640,17 @@ static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) return true; } +static bool is_madvise_populate(int behavior) +{ + switch (behavior) { + case MADV_POPULATE_READ: + case MADV_POPULATE_WRITE: + return true; + default: + return false; + } +} + static int madvise_do_behavior(struct mm_struct *mm, unsigned long start, size_t len_in, size_t len, int behavior) { @@ -1653,16 +1664,11 @@ static int madvise_do_behavior(struct mm_struct *mm, end = start + len; blk_start_plug(&plug); - switch (behavior) { - case MADV_POPULATE_READ: - case MADV_POPULATE_WRITE: + if (is_madvise_populate(behavior)) error = madvise_populate(mm, start, end, behavior); - break; - default: + else error = madvise_walk_vmas(mm, start, end, behavior, madvise_vma_behavior); - break; - } blk_finish_plug(&plug); return error; } From 0a6ffacb3b42233fe44e0faedfcc8e83f9ad99c6 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 12 Mar 2025 09:47:49 -0700 Subject: [PATCH 387/431] mm/madvise: deduplicate madvise_do_behavior() skip case handlings The logic for checking if a given madvise() request for a single memory range can skip real work, namely madvise_do_behavior(), is duplicated in do_madvise() and vector_madvise(). Split out the logic to a function and reuse it. Link: https://lkml.kernel.org/r/20250312164750.59215-4-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Cc: David Hildenbrand Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 57 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index 611db868ae38..ba006d05c7ea 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1640,6 +1640,31 @@ static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) return true; } +/* + * madvise_should_skip() - Return if the request is invalid or nothing. + * @start: Start address of madvise-requested address range. + * @len_in: Length of madvise-requested address range. + * @behavior: Requested madvise behavor. + * @err: Pointer to store an error code from the check. + * + * If the specified behaviour is invalid or nothing would occur, we skip the + * operation. This function returns true in the cases, otherwise false. In + * the former case we store an error on @err. + */ +static bool madvise_should_skip(unsigned long start, size_t len_in, + int behavior, int *err) +{ + if (!is_valid_madvise(start, len_in, behavior)) { + *err = -EINVAL; + return true; + } + if (start + PAGE_ALIGN(len_in) == start) { + *err = 0; + return true; + } + return false; +} + static bool is_madvise_populate(int behavior) { switch (behavior) { @@ -1747,23 +1772,15 @@ static int madvise_do_behavior(struct mm_struct *mm, */ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) { - unsigned long end; int error; - size_t len; - - if (!is_valid_madvise(start, len_in, behavior)) - return -EINVAL; - - len = PAGE_ALIGN(len_in); - end = start + len; - - if (end == start) - return 0; + if (madvise_should_skip(start, len_in, behavior, &error)) + return error; error = madvise_lock(mm, behavior); if (error) return error; - error = madvise_do_behavior(mm, start, len_in, len, behavior); + error = madvise_do_behavior(mm, start, len_in, PAGE_ALIGN(len_in), + behavior); madvise_unlock(mm, behavior); return error; @@ -1790,19 +1807,13 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, while (iov_iter_count(iter)) { unsigned long start = (unsigned long)iter_iov_addr(iter); size_t len_in = iter_iov_len(iter); - size_t len; + int error; - if (!is_valid_madvise(start, len_in, behavior)) { - ret = -EINVAL; - break; - } - - len = PAGE_ALIGN(len_in); - if (start + len == start) - ret = 0; + if (madvise_should_skip(start, len_in, behavior, &error)) + ret = error; else - ret = madvise_do_behavior(mm, start, len_in, len, - behavior); + ret = madvise_do_behavior(mm, start, len_in, + PAGE_ALIGN(len_in), behavior); /* * An madvise operation is attempting to restart the syscall, * but we cannot proceed as it would not be correct to repeat From be9258a6bf26d2272856214406721762a21aab1b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Wed, 12 Mar 2025 09:47:50 -0700 Subject: [PATCH 388/431] mm/madvise: remove len parameter of madvise_do_behavior() Because madise_should_skip() logic is factored out, making madvise_do_behavior() calculates 'len' on its own rather then receiving it as a parameter makes code simpler. Remove the parameter. Link: https://lkml.kernel.org/r/20250312164750.59215-5-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Cc: David Hildenbrand Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/madvise.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index ba006d05c7ea..b17f684322ad 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1677,7 +1677,7 @@ static bool is_madvise_populate(int behavior) } static int madvise_do_behavior(struct mm_struct *mm, - unsigned long start, size_t len_in, size_t len, int behavior) + unsigned long start, size_t len_in, int behavior) { struct blk_plug plug; unsigned long end; @@ -1686,7 +1686,7 @@ static int madvise_do_behavior(struct mm_struct *mm, if (is_memory_failure(behavior)) return madvise_inject_error(behavior, start, start + len_in); start = untagged_addr_remote(mm, start); - end = start + len; + end = start + PAGE_ALIGN(len_in); blk_start_plug(&plug); if (is_madvise_populate(behavior)) @@ -1779,8 +1779,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh error = madvise_lock(mm, behavior); if (error) return error; - error = madvise_do_behavior(mm, start, len_in, PAGE_ALIGN(len_in), - behavior); + error = madvise_do_behavior(mm, start, len_in, behavior); madvise_unlock(mm, behavior); return error; @@ -1812,8 +1811,7 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, if (madvise_should_skip(start, len_in, behavior, &error)) ret = error; else - ret = madvise_do_behavior(mm, start, len_in, - PAGE_ALIGN(len_in), behavior); + ret = madvise_do_behavior(mm, start, len_in, behavior); /* * An madvise operation is attempting to restart the syscall, * but we cannot proceed as it would not be correct to repeat From c0ebbb3841e07c4493e6fe351698806b09a87a37 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 12 Mar 2025 10:10:13 -0400 Subject: [PATCH 389/431] mm: add missing release barrier on PGDAT_RECLAIM_LOCKED unlock The PGDAT_RECLAIM_LOCKED bit is used to provide mutual exclusion of node reclaim for struct pglist_data using a single bit. It is "locked" with a test_and_set_bit (similarly to a try lock) which provides full ordering with respect to loads and stores done within __node_reclaim(). It is "unlocked" with clear_bit(), which does not provide any ordering with respect to loads and stores done before clearing the bit. The lack of clear_bit() memory ordering with respect to stores within __node_reclaim() can cause a subsequent CPU to fail to observe stores from a prior node reclaim. This is not an issue in practice on TSO (e.g. x86), but it is an issue on weakly-ordered architectures (e.g. arm64). Fix this by using clear_bit_unlock rather than clear_bit to clear PGDAT_RECLAIM_LOCKED with a release memory ordering semantic. This provides stronger memory ordering (release rather than relaxed). Link: https://lkml.kernel.org/r/20250312141014.129725-1-mathieu.desnoyers@efficios.com Fixes: d773ed6b856a ("mm: test and set zone reclaim lock before starting reclaim") Signed-off-by: Mathieu Desnoyers Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Alan Stern Cc: Andrea Parri Cc: Will Deacon Cc: Peter Zijlstra Cc: Boqun Feng Cc: Nicholas Piggin Cc: David Howells Cc: Jade Alglave Cc: Luc Maranget Cc: "Paul E. McKenney" Cc: Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index be00af3763b5..bbd3913e3887 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7581,7 +7581,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); - clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); + clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); if (ret) count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS); From ca868cd77063ee670ade6d5d1554e3f5f223afd7 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 12 Mar 2025 10:10:14 -0400 Subject: [PATCH 390/431] mm: lock PGDAT_RECLAIM_LOCKED with acquire memory ordering The PGDAT_RECLAIM_LOCKED bit is used to provide mutual exclusion of node reclaim for struct pglist_data using a single bit. Use test_and_set_bit_lock rather than test_and_set_bit to test-and-set PGDAT_RECLAIM_LOCKED with an acquire memory ordering semantic. This changes the "lock" acquisition from a full barrier to an acquire memory ordering, which is weaker. The acquire semi-permeable barrier paired with the release on unlock is sufficient for this mutual exclusion use-case. No behavior change intended other than to reduce overhead by using the appropriate barrier. Link: https://lkml.kernel.org/r/20250312141014.129725-2-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Alan Stern Cc: Andrea Parri Cc: Will Deacon Cc: Peter Zijlstra Cc: Boqun Feng Cc: Nicholas Piggin Cc: David Howells Cc: Jade Alglave Cc: Luc Maranget Cc: "Paul E. McKenney" Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index bbd3913e3887..2bc740637a6c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7577,7 +7577,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) return NODE_RECLAIM_NOSCAN; - if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) + if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); From 1a15bb8303b6b104e78028b6c68f76a0d4562134 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 12 Mar 2025 19:28:50 +0800 Subject: [PATCH 391/431] x86/mce: use is_copy_from_user() to determine copy-from-user context Patch series "mm/hwpoison: Fix regressions in memory failure handling", v4. ## 1. What am I trying to do: This patchset resolves two critical regressions related to memory failure handling that have appeared in the upstream kernel since version 5.17, as compared to 5.10 LTS. - copyin case: poison found in user page while kernel copying from user space - instr case: poison found while instruction fetching in user space ## 2. What is the expected outcome and why - For copyin case: Kernel can recover from poison found where kernel is doing get_user() or copy_from_user() if those places get an error return and the kernel return -EFAULT to the process instead of crashing. More specifily, MCE handler checks the fixup handler type to decide whether an in kernel #MC can be recovered. When EX_TYPE_UACCESS is found, the PC jumps to recovery code specified in _ASM_EXTABLE_FAULT() and return a -EFAULT to user space. - For instr case: If a poison found while instruction fetching in user space, full recovery is possible. User process takes #PF, Linux allocates a new page and fills by reading from storage. ## 3. What actually happens and why - For copyin case: kernel panic since v5.17 Commit 4c132d1d844a ("x86/futex: Remove .fixup usage") introduced a new extable fixup type, EX_TYPE_EFAULT_REG, and later patches updated the extable fixup type for copy-from-user operations, changing it from EX_TYPE_UACCESS to EX_TYPE_EFAULT_REG. It breaks previous EX_TYPE_UACCESS handling when posion found in get_user() or copy_from_user(). - For instr case: user process is killed by a SIGBUS signal due to #CMCI and #MCE race When an uncorrected memory error is consumed there is a race between the CMCI from the memory controller reporting an uncorrected error with a UCNA signature, and the core reporting and SRAR signature machine check when the data is about to be consumed. ### Background: why *UN*corrected errors tied to *C*MCI in Intel platform [1] Prior to Icelake memory controllers reported patrol scrub events that detected a previously unseen uncorrected error in memory by signaling a broadcast machine check with an SRAO (Software Recoverable Action Optional) signature in the machine check bank. This was overkill because it's not an urgent problem that no core is on the verge of consuming that bad data. It's also found that multi SRAO UCE may cause nested MCE interrupts and finally become an IERR. Hence, Intel downgrades the machine check bank signature of patrol scrub from SRAO to UCNA (Uncorrected, No Action required), and signal changed to #CMCI. Just to add to the confusion, Linux does take an action (in uc_decode_notifier()) to try to offline the page despite the UC*NA* signature name. ### Background: why #CMCI and #MCE race when poison is consuming in Intel platform [1] Having decided that CMCI/UCNA is the best action for patrol scrub errors, the memory controller uses it for reads too. But the memory controller is executing asynchronously from the core, and can't tell the difference between a "real" read and a speculative read. So it will do CMCI/UCNA if an error is found in any read. Thus: 1) Core is clever and thinks address A is needed soon, issues a speculative read. 2) Core finds it is going to use address A soon after sending the read request 3) The CMCI from the memory controller is in a race with MCE from the core that will soon try to retire the load from address A. Quite often (because speculation has got better) the CMCI from the memory controller is delivered before the core is committed to the instruction reading address A, so the interrupt is taken, and Linux offlines the page (marking it as poison). ## Why user process is killed for instr case Commit 046545a661af ("mm/hwpoison: fix error page recovered but reported "not recovered"") tries to fix noise message "Memory error not recovered" and skips duplicate SIGBUSs due to the race. But it also introduced a bug that kill_accessing_process() return -EHWPOISON for instr case, as result, kill_me_maybe() send a SIGBUS to user process. # 4. The fix, in my opinion, should be: - For copyin case: The key point is whether the error context is in a read from user memory. We do not care about the ex-type if we know its a MOV reading from userspace. is_copy_from_user() return true when both of the following two checks are true: - the current instruction is copy - source address is user memory If copy_user is true, we set m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; Then do_machine_check() will try fixup_exception() first. - For instr case: let kill_accessing_process() return 0 to prevent a SIGBUS. - For patch 3: The return value of memory_failure() is quite important while discussed instr case regression with Tony and Miaohe for patch 2, so add comment about the return value. This patch (of 3): Commit 4c132d1d844a ("x86/futex: Remove .fixup usage") introduced a new extable fixup type, EX_TYPE_EFAULT_REG, and commit 4c132d1d844a ("x86/futex: Remove .fixup usage") updated the extable fixup type for copy-from-user operations, changing it from EX_TYPE_UACCESS to EX_TYPE_EFAULT_REG. The error context for copy-from-user operations no longer functions as an in-kernel recovery context. Consequently, the error context for copy-from-user operations no longer functions as an in-kernel recovery context, resulting in kernel panics with the message: "Machine check: Data load in unrecoverable area of kernel." To address this, it is crucial to identify if an error context involves a read operation from user memory. The function is_copy_from_user() can be utilized to determine: - the current operation is copy - when reading user memory When these conditions are met, is_copy_from_user() will return true, confirming that it is indeed a direct copy from user memory. This check is essential for correctly handling the context of errors in these operations without relying on the extable fixup types that previously allowed for in-kernel recovery. So, use is_copy_from_user() to determine if a context is copy user directly. Link: https://lkml.kernel.org/r/20250312112852.82415-1-xueshuai@linux.alibaba.com Link: https://lkml.kernel.org/r/20250312112852.82415-2-xueshuai@linux.alibaba.com Fixes: 4c132d1d844a ("x86/futex: Remove .fixup usage") Signed-off-by: Shuai Xue Suggested-by: Peter Zijlstra Acked-by: Borislav Petkov (AMD) Tested-by: Tony Luck Cc: Baolin Wang Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Ruidong Tian Cc: Thomas Gleinxer Cc: Yazen Ghannam Cc: Jane Chu Cc: Jarkko Sakkinen Cc: Jonathan Cameron Cc: Signed-off-by: Andrew Morton --- arch/x86/kernel/cpu/mce/severity.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index dac4d64dfb2a..2235a7477436 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs) copy_user = is_copy_from_user(regs); instrumentation_end(); - switch (fixup_type) { - case EX_TYPE_UACCESS: - if (!copy_user) - return IN_KERNEL; - m->kflags |= MCE_IN_KERNEL_COPYIN; - fallthrough; + if (copy_user) { + m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV; + return IN_KERNEL_RECOV; + } + switch (fixup_type) { case EX_TYPE_FAULT_MCE_SAFE: case EX_TYPE_DEFAULT_MCE_SAFE: m->kflags |= MCE_IN_KERNEL_RECOV; From aaf99ac2ceb7c974f758a635723eeaf48596388e Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 12 Mar 2025 19:28:51 +0800 Subject: [PATCH 392/431] mm/hwpoison: do not send SIGBUS to processes with recovered clean pages When an uncorrected memory error is consumed there is a race between the CMCI from the memory controller reporting an uncorrected error with a UCNA signature, and the core reporting and SRAR signature machine check when the data is about to be consumed. - Background: why *UN*corrected errors tied to *C*MCI in Intel platform [1] Prior to Icelake memory controllers reported patrol scrub events that detected a previously unseen uncorrected error in memory by signaling a broadcast machine check with an SRAO (Software Recoverable Action Optional) signature in the machine check bank. This was overkill because it's not an urgent problem that no core is on the verge of consuming that bad data. It's also found that multi SRAO UCE may cause nested MCE interrupts and finally become an IERR. Hence, Intel downgrades the machine check bank signature of patrol scrub from SRAO to UCNA (Uncorrected, No Action required), and signal changed to #CMCI. Just to add to the confusion, Linux does take an action (in uc_decode_notifier()) to try to offline the page despite the UC*NA* signature name. - Background: why #CMCI and #MCE race when poison is consuming in Intel platform [1] Having decided that CMCI/UCNA is the best action for patrol scrub errors, the memory controller uses it for reads too. But the memory controller is executing asynchronously from the core, and can't tell the difference between a "real" read and a speculative read. So it will do CMCI/UCNA if an error is found in any read. Thus: 1) Core is clever and thinks address A is needed soon, issues a speculative read. 2) Core finds it is going to use address A soon after sending the read request 3) The CMCI from the memory controller is in a race with MCE from the core that will soon try to retire the load from address A. Quite often (because speculation has got better) the CMCI from the memory controller is delivered before the core is committed to the instruction reading address A, so the interrupt is taken, and Linux offlines the page (marking it as poison). - Why user process is killed for instr case Commit 046545a661af ("mm/hwpoison: fix error page recovered but reported "not recovered"") tries to fix noise message "Memory error not recovered" and skips duplicate SIGBUSs due to the race. But it also introduced a bug that kill_accessing_process() return -EHWPOISON for instr case, as result, kill_me_maybe() send a SIGBUS to user process. If the CMCI wins that race, the page is marked poisoned when uc_decode_notifier() calls memory_failure(). For dirty pages, memory_failure() invokes try_to_unmap() with the TTU_HWPOISON flag, converting the PTE to a hwpoison entry. As a result, kill_accessing_process(): - call walk_page_range() and return 1 regardless of whether try_to_unmap() succeeds or fails, - call kill_proc() to make sure a SIGBUS is sent - return -EHWPOISON to indicate that SIGBUS is already sent to the process and kill_me_maybe() doesn't have to send it again. However, for clean pages, the TTU_HWPOISON flag is cleared, leaving the PTE unchanged and not converted to a hwpoison entry. Conversely, for clean pages where PTE entries are not marked as hwpoison, kill_accessing_process() returns -EFAULT, causing kill_me_maybe() to send a SIGBUS. Console log looks like this: Memory failure: 0x827ca68: corrupted page was clean: dropped without side effects Memory failure: 0x827ca68: recovery action for clean LRU page: Recovered Memory failure: 0x827ca68: already hardware poisoned mce: Memory error not recovered To fix it, return 0 for "corrupted page was clean", preventing an unnecessary SIGBUS to user process. [1] https://lore.kernel.org/lkml/20250217063335.22257-1-xueshuai@linux.alibaba.com/T/#mba94f1305b3009dd340ce4114d3221fe810d1871 Link: https://lkml.kernel.org/r/20250312112852.82415-3-xueshuai@linux.alibaba.com Fixes: 046545a661af ("mm/hwpoison: fix error page recovered but reported "not recovered"") Signed-off-by: Shuai Xue Tested-by: Tony Luck Acked-by: Miaohe Lin Cc: Baolin Wang Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jane Chu Cc: Jarkko Sakkinen Cc: Jonathan Cameron Cc: Josh Poimboeuf Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Ruidong Tian Cc: Thomas Gleinxer Cc: Yazen Ghannam Cc: Signed-off-by: Andrew Morton --- mm/memory-failure.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6257c7f5e941..00d6da57ab06 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -881,12 +881,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, mmap_read_lock(p->mm); ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); + /* + * ret = 1 when CMCI wins, regardless of whether try_to_unmap() + * succeeds or fails, then kill the process with SIGBUS. + * ret = 0 when poison page is a clean page and it's dropped, no + * SIGBUS is needed. + */ if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); - else - ret = 0; mmap_read_unlock(p->mm); - return ret > 0 ? -EHWPOISON : -EFAULT; + + return ret > 0 ? -EHWPOISON : 0; } /* From d2734f044f84833b2c9ec1b71b542d299d35202b Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 12 Mar 2025 19:28:52 +0800 Subject: [PATCH 393/431] mm: memory-failure: enhance comments for return value of memory_failure() The comments for the return value of memory_failure are not complete, supplement the comments. Link: https://lkml.kernel.org/r/20250312112852.82415-4-xueshuai@linux.alibaba.com Signed-off-by: Shuai Xue Reviewed-by: Jarkko Sakkinen Reviewed-by: Jonathan Cameron Reviewed-by: Yazen Ghannam Reviewed-by: Jane Chu Acked-by: Miaohe Lin Tested-by: Tony Luck Cc: Baolin Wang Cc: Borislav Betkov Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Ruidong Tian Cc: Thomas Gleinxer Signed-off-by: Andrew Morton --- mm/memory-failure.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 00d6da57ab06..b91a33fb6c69 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2215,9 +2215,13 @@ static void kill_procs_now(struct page *p, unsigned long pfn, int flags, * Must run in process context (e.g. a work queue) with interrupts * enabled and no spinlocks held. * - * Return: 0 for successfully handled the memory error, - * -EOPNOTSUPP for hwpoison_filter() filtered the error event, - * < 0(except -EOPNOTSUPP) on failure. + * Return: + * 0 - success, + * -ENXIO - memory not managed by the kernel + * -EOPNOTSUPP - hwpoison_filter() filtered the error event, + * -EHWPOISON - the page was already poisoned, potentially + * kill process, + * other negative values - failure. */ int memory_failure(unsigned long pfn, int flags) { From ce50f4bc42af4d3811d3863ed14b0c33ab5cef81 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Wed, 12 Mar 2025 11:52:45 +0100 Subject: [PATCH 394/431] MAINTAINERS: adjust file entry in MAPLE TREE Commit 0f3b602e1bad ("tools: separate out shared radix-tree components") moves files from radix-tree/linux to shared/linux in the ./tools/testing/ directory, but misses to adjust a file entry in MAPLE TREE. Hence, ./scripts/get_maintainer.pl --self-test=patterns complains about a broken reference. Adjust the file entry in MAPLE TREE. Link: https://lkml.kernel.org/r/20250312105245.216302-1-lukas.bulwahn@redhat.com Signed-off-by: Lukas Bulwahn Acked-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index fb408698086e..e714ea3a7c9f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13922,8 +13922,8 @@ F: include/linux/maple_tree.h F: include/trace/events/maple_tree.h F: lib/maple_tree.c F: lib/test_maple_tree.c -F: tools/testing/radix-tree/linux/maple_tree.h F: tools/testing/radix-tree/maple.c +F: tools/testing/shared/linux/maple_tree.h MARDUK (CREATOR CI40) DEVICE TREE SUPPORT M: Rahul Bedarkar From 456620c5cb238744f3e08a04af935fce25ca2df1 Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Wed, 12 Mar 2025 17:37:17 +0800 Subject: [PATCH 395/431] mm/debug: add line breaks Missing a newline character at the end of the format string. Link: https://lkml.kernel.org/r/20250312093717.364031-1-liuye@kylinos.cn Signed-off-by: Liu Ye Signed-off-by: Andrew Morton --- mm/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/debug.c b/mm/debug.c index 83ef3bd0ccd3..db83e381a8ae 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -173,7 +173,7 @@ static void __dump_page(const struct page *page) void dump_page(const struct page *page, const char *reason) { if (PagePoisoned(page)) - pr_warn("page:%p is uninitialized and poisoned", page); + pr_warn("page:%p is uninitialized and poisoned\n", page); else __dump_page(page); if (reason) From f841ad9ca5007167c02de143980c9dc703f90b3d Mon Sep 17 00:00:00 2001 From: Cyan Yang Date: Wed, 12 Mar 2025 12:38:40 +0800 Subject: [PATCH 396/431] selftests/mm/cow: fix the incorrect error handling Error handling doesn't check the correct return value. This patch will fix it. Link: https://lkml.kernel.org/r/20250312043840.71799-1-cyan.yang@sifive.com Fixes: f4b5fd6946e2 ("selftests/vm: anon_cow: THP tests") Signed-off-by: Cyan Yang Reviewed-by: Dev Jain Reviewed-by: Muhammad Usama Anjum Cc: David Hildenbrand Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/cow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 9446673645eb..f0cb14ea8608 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) mremap_size = thpsize / 2; mremap_mem = mmap(NULL, mremap_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mem == MAP_FAILED) { + if (mremap_mem == MAP_FAILED) { ksft_test_result_fail("mmap() failed\n"); goto munmap; } From 8defffa4c7b5d19a9a480aec675003f9c9e7daf6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 13 Mar 2025 15:14:56 +0000 Subject: [PATCH 397/431] mm: convert lru_add_page_tail() to lru_add_split_folio() Remove three hidden calls to compound_head() and accesses to page->lru. Link: https://lkml.kernel.org/r/20250313151458.4145978-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Signed-off-by: Andrew Morton --- mm/huge_memory.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e3ed8e9523f5..10a86b681cf1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3262,25 +3262,25 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) } } -static void lru_add_page_tail(struct folio *folio, struct page *tail, +static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, struct lruvec *lruvec, struct list_head *list) { - VM_BUG_ON_FOLIO(PageLRU(tail), folio); + VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); lockdep_assert_held(&lruvec->lru_lock); if (list) { /* page reclaim is reclaiming a huge page */ VM_WARN_ON(folio_test_lru(folio)); - get_page(tail); - list_add_tail(&tail->lru, list); + folio_get(new_folio); + list_add_tail(&new_folio->lru, list); } else { /* head is still on lru (and we have it frozen) */ VM_WARN_ON(!folio_test_lru(folio)); if (folio_test_unevictable(folio)) - tail->mlock_count = 0; + new_folio->mlock_count = 0; else - list_add_tail(&tail->lru, &folio->lru); - SetPageLRU(tail); + list_add_tail(&new_folio->lru, &folio->lru); + folio_set_lru(new_folio); } } @@ -3581,8 +3581,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, ((mapping || swap_cache) ? folio_nr_pages(release) : 0)); - lru_add_page_tail(origin_folio, &release->page, - lruvec, list); + lru_add_split_folio(origin_folio, release, lruvec, + list); /* Some pages can be beyond EOF: drop them from cache */ if (release->index >= end) { From 67914ac08604345f620566ccf5bac87b40d5881d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Mar 2025 17:05:32 -0400 Subject: [PATCH 398/431] mm: compaction: push watermark into compaction_suitable() callers Patch series "mm: reliable huge page allocator". This series makes changes to the allocator and reclaim/compaction code to try harder to avoid fragmentation. As a result, this makes huge page allocations cheaper, more reliable and more sustainable. It's a subset of the huge page allocator RFC initially proposed here: https://lore.kernel.org/lkml/20230418191313.268131-1-hannes@cmpxchg.org/ The following results are from a kernel build test, with additional concurrent bursts of THP allocations on a memory-constrained system. Comparing before and after the changes over 15 runs: before after Hugealloc Time mean 52739.45 ( +0.00%) 28904.00 ( -45.19%) Hugealloc Time stddev 56541.26 ( +0.00%) 33464.37 ( -40.81%) Kbuild Real time 197.47 ( +0.00%) 196.59 ( -0.44%) Kbuild User time 1240.49 ( +0.00%) 1231.67 ( -0.71%) Kbuild System time 70.08 ( +0.00%) 59.10 ( -15.45%) THP fault alloc 46727.07 ( +0.00%) 63223.67 ( +35.30%) THP fault fallback 21910.60 ( +0.00%) 5412.47 ( -75.29%) Direct compact fail 195.80 ( +0.00%) 59.07 ( -69.48%) Direct compact success 7.93 ( +0.00%) 2.80 ( -57.46%) Direct compact success rate % 3.51 ( +0.00%) 3.99 ( +10.49%) Compact daemon scanned migrate 3369601.27 ( +0.00%) 2267500.33 ( -32.71%) Compact daemon scanned free 5075474.47 ( +0.00%) 2339773.00 ( -53.90%) Compact direct scanned migrate 161787.27 ( +0.00%) 47659.93 ( -70.54%) Compact direct scanned free 163467.53 ( +0.00%) 40729.67 ( -75.08%) Compact total migrate scanned 3531388.53 ( +0.00%) 2315160.27 ( -34.44%) Compact total free scanned 5238942.00 ( +0.00%) 2380502.67 ( -54.56%) Alloc stall 2371.07 ( +0.00%) 638.87 ( -73.02%) Pages kswapd scanned 2160926.73 ( +0.00%) 4002186.33 ( +85.21%) Pages kswapd reclaimed 533191.07 ( +0.00%) 718577.80 ( +34.77%) Pages direct scanned 400450.33 ( +0.00%) 355172.73 ( -11.31%) Pages direct reclaimed 94441.73 ( +0.00%) 31162.80 ( -67.00%) Pages total scanned 2561377.07 ( +0.00%) 4357359.07 ( +70.12%) Pages total reclaimed 627632.80 ( +0.00%) 749740.60 ( +19.46%) Swap out 47959.53 ( +0.00%) 110084.33 ( +129.53%) Swap in 7276.00 ( +0.00%) 24457.00 ( +236.10%) File refaults 138043.00 ( +0.00%) 188226.93 ( +36.35%) THP latencies are cut in half, and failure rates are cut by 75%. These metrics also hold up over time, while the vanilla kernel sees a steady downward trend in success rates with each subsequent run, owed to the cumulative effects of fragmentation. A more detailed discussion of results is in the patch changelogs. The patches first introduce a vm.defrag_mode sysctl, which enforces the existing ALLOC_NOFRAGMENT alloc flag until after reclaim and compaction have run. They then change kswapd and kcompactd to target pageblocks, which boosts success in the ALLOC_NOFRAGMENT hotpaths. Patches #1 and #2 are somewhat unrelated cleanups, but touch the same code and so are included here to avoid conflicts from re-ordering. This patch (of 5): compaction_suitable() hardcodes the min watermark, with a boost to the low watermark for costly orders. However, compaction_ready() requires order-0 at the high watermark. It currently checks the marks twice. Make the watermark a parameter to compaction_suitable() and have the callers pass in what they require: - compaction_zonelist_suitable() is used by the direct reclaim path, so use the min watermark. - compact_suit_allocation_order() has a watermark in context derived from cc->alloc_flags. The only quirk is that kcompactd doesn't initialize cc->alloc_flags explicitly. There is a direct check in kcompactd_do_work() that passes ALLOC_WMARK_MIN, but there is another check downstack in compact_zone() that ends up passing the unset alloc_flags. Since they default to 0, and that coincides with ALLOC_WMARK_MIN, it is correct. But it's subtle. Set cc->alloc_flags explicitly. - should_continue_reclaim() is direct reclaim, use the min watermark. - Finally, consolidate the two checks in compaction_ready() to a single compaction_suitable() call passing the high watermark. There is a tiny change in behavior: before, compaction_suitable() would check order-0 against min or low, depending on costly order. Then there'd be another high watermark check. Now, the high watermark is passed to compaction_suitable(), and the costly order-boost (low - min) is added on top. This means compaction_ready() sets a marginally higher target for free pages. In a kernelbuild + THP pressure test, though, this didn't show any measurable negative effects on memory pressure or reclaim rates. As the comment above the check says, reclaim is usually stopped short on should_continue_reclaim(), and this just defines the worst-case reclaim cutoff in case compaction is not making any headway. [hughd@google.com: stop oops on out-of-range highest_zoneidx] Link: https://lkml.kernel.org/r/005ace8b-07fa-01d4-b54b-394a3e029c07@google.com Link: https://lkml.kernel.org/r/20250313210647.1314586-1-hannes@cmpxchg.org Link: https://lkml.kernel.org/r/20250313210647.1314586-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Signed-off-by: Hugh Dickins Acked-by: Zi Yan Cc: Mel Gorman Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/compaction.h | 5 ++-- mm/compaction.c | 52 ++++++++++++++++++++------------------ mm/vmscan.c | 26 ++++++++++--------- 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7bf0c521db63..173d9c07a895 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -95,7 +95,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern bool compaction_suitable(struct zone *zone, int order, - int highest_zoneidx); + unsigned long watermark, int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); @@ -113,7 +113,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline bool compaction_suitable(struct zone *zone, int order, - int highest_zoneidx) + unsigned long watermark, + int highest_zoneidx) { return false; } diff --git a/mm/compaction.c b/mm/compaction.c index 2e2d4db33e68..cf32e8053edb 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2381,40 +2381,42 @@ static enum compact_result compact_finished(struct compact_control *cc) } static bool __compaction_suitable(struct zone *zone, int order, - int highest_zoneidx, - unsigned long wmark_target) + unsigned long watermark, int highest_zoneidx, + unsigned long free_pages) { - unsigned long watermark; /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the - * watermark and alloc_flags have to match, or be more pessimistic than - * the check in __isolate_free_page(). We don't use the direct - * compactor's alloc_flags, as they are not relevant for freepage - * isolation. We however do use the direct compactor's highest_zoneidx - * to skip over zones where lowmem reserves would prevent allocation - * even if compaction succeeds. - * For costly orders, we require low watermark instead of min for - * compaction to proceed to increase its chances. + * watermark have to match, or be more pessimistic than the check in + * __isolate_free_page(). + * + * For costly orders, we require a higher watermark for compaction to + * proceed to increase its chances. + * + * We use the direct compactor's highest_zoneidx to skip over zones + * where lowmem reserves would prevent allocation even if compaction + * succeeds. + * * ALLOC_CMA is used, as pages in CMA pageblocks are considered - * suitable migration targets + * suitable migration targets. */ - watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? - low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); + if (order > PAGE_ALLOC_COSTLY_ORDER) + watermark += low_wmark_pages(zone) - min_wmark_pages(zone); return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target); + ALLOC_CMA, free_pages); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? */ -bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) +bool compaction_suitable(struct zone *zone, int order, unsigned long watermark, + int highest_zoneidx) { enum compact_result compact_result; bool suitable; - suitable = __compaction_suitable(zone, order, highest_zoneidx, + suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to @@ -2452,6 +2454,7 @@ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) return suitable; } +/* Used by direct reclaimers */ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) { @@ -2474,8 +2477,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); - if (__compaction_suitable(zone, order, ac->highest_zoneidx, - available)) + if (__compaction_suitable(zone, order, min_wmark_pages(zone), + ac->highest_zoneidx, available)) return true; } @@ -2512,13 +2515,13 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order, */ if (order > PAGE_ALLOC_COSTLY_ORDER && async && !(alloc_flags & ALLOC_CMA)) { - watermark = low_wmark_pages(zone) + compact_gap(order); - if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - 0, zone_page_state(zone, NR_FREE_PAGES))) + if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order), + highest_zoneidx, 0, + zone_page_state(zone, NR_FREE_PAGES))) return COMPACT_SKIPPED; } - if (!compaction_suitable(zone, order, highest_zoneidx)) + if (!compaction_suitable(zone, order, watermark, highest_zoneidx)) return COMPACT_SKIPPED; return COMPACT_CONTINUE; @@ -3081,6 +3084,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, + .alloc_flags = ALLOC_WMARK_MIN, }; enum compact_result ret; @@ -3099,7 +3103,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) continue; ret = compaction_suit_allocation_order(zone, - cc.order, zoneid, ALLOC_WMARK_MIN, + cc.order, zoneid, cc.alloc_flags, false); if (ret != COMPACT_CONTINUE) continue; diff --git a/mm/vmscan.c b/mm/vmscan.c index 2bc740637a6c..3370bdca6868 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -5890,12 +5890,15 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, /* If compaction would go ahead or the allocation would succeed, stop */ for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { + unsigned long watermark = min_wmark_pages(zone); + /* Allocation can already succeed, nothing to do */ - if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + if (zone_watermark_ok(zone, sc->order, watermark, sc->reclaim_idx, 0)) return false; - if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) + if (compaction_suitable(zone, sc->order, watermark, + sc->reclaim_idx)) return false; } @@ -6122,22 +6125,21 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) sc->reclaim_idx, 0)) return true; - /* Compaction cannot yet proceed. Do reclaim. */ - if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) - return false; - /* - * Compaction is already possible, but it takes time to run and there - * are potentially other callers using the pages just freed. So proceed - * with reclaim to make a buffer of free pages available to give - * compaction a reasonable chance of completing and allocating the page. + * Direct reclaim usually targets the min watermark, but compaction + * takes time to run and there are potentially other callers using the + * pages just freed. So target a higher buffer to give compaction a + * reasonable chance of completing and allocating the pages. + * * Note that we won't actually reclaim the whole buffer in one attempt * as the target watermark in should_continue_reclaim() is lower. But if * we are already above the high+gap watermark, don't reclaim at all. */ - watermark = high_wmark_pages(zone) + compact_gap(sc->order); + watermark = high_wmark_pages(zone); + if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx)) + return true; - return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); + return false; } static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) From f46012c0ec9f544998b81b2e3c6c702b9277f596 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Mar 2025 17:05:33 -0400 Subject: [PATCH 399/431] mm: page_alloc: trace type pollution from compaction capturing When the page allocator places pages of a certain migratetype into blocks of another type, it has lasting effects on the ability to compact and defragment down the line. For improving placement and compaction, visibility into such events is crucial. The most common case, allocator fallbacks, is already annotated, but compaction capturing is also allowed to grab pages of a different type. Extend the tracepoint to cover this case. Link: https://lkml.kernel.org/r/20250313210647.1314586-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Zi Yan Cc: Mel Gorman Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0be1fedd1201..5b92b1acda0e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -614,6 +614,10 @@ compaction_capture(struct capture_control *capc, struct page *page, capc->cc->migratetype != MIGRATE_MOVABLE) return false; + if (migratetype != capc->cc->migratetype) + trace_mm_page_alloc_extfrag(page, capc->cc->order, order, + capc->cc->migratetype, migratetype); + capc->page = page; return true; } From e3aa7df331bca08742a212764348246e8e8a874e Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Mar 2025 17:05:34 -0400 Subject: [PATCH 400/431] mm: page_alloc: defrag_mode The page allocator groups requests by migratetype to stave off fragmentation. However, in practice this is routinely defeated by the fact that it gives up *before* invoking reclaim and compaction - which may well produce suitable pages. As a result, fragmentation of physical memory is a common ongoing process in many load scenarios. Fragmentation deteriorates compaction's ability to produce huge pages. Depending on the lifetime of the fragmenting allocations, those effects can be long-lasting or even permanent, requiring drastic measures like forcible idle states or even reboots as the only reliable ways to recover the address space for THP production. In a kernel build test with supplemental THP pressure, the THP allocation rate steadily declines over 15 runs: thp_fault_alloc 61988 56474 57258 50187 52388 55409 52925 47648 43669 40621 36077 41721 36685 34641 33215 This is a hurdle in adopting THP in any environment where hosts are shared between multiple overlapping workloads (cloud environments), and rarely experience true idle periods. To make THP a reliable and predictable optimization, there needs to be a stronger guarantee to avoid such fragmentation. Introduce defrag_mode. When enabled, reclaim/compaction is invoked to its full extent *before* falling back. Specifically, ALLOC_NOFRAGMENT is enforced on the allocator fastpath and the reclaiming slowpath. For now, fallbacks are permitted to avert OOMs. There is a plan to add defrag_mode=2 to prefer OOMs over fragmentation, but this requires additional prep work in compaction and the reserve management to make it ready for all possible allocation contexts. The following test results are from a kernel build with periodic bursts of THP allocations, over 15 runs: vanilla defrag_mode=1 @claimer[unmovable]: 189 103 @claimer[movable]: 92 103 @claimer[reclaimable]: 207 61 @pollute[unmovable from movable]: 25 0 @pollute[unmovable from reclaimable]: 28 0 @pollute[movable from unmovable]: 38835 0 @pollute[movable from reclaimable]: 147136 0 @pollute[reclaimable from unmovable]: 178 0 @pollute[reclaimable from movable]: 33 0 @steal[unmovable from movable]: 11 0 @steal[unmovable from reclaimable]: 5 0 @steal[reclaimable from unmovable]: 107 0 @steal[reclaimable from movable]: 90 0 @steal[movable from reclaimable]: 354 0 @steal[movable from unmovable]: 130 0 Both types of polluting fallbacks are eliminated in this workload. Interestingly, whole block conversions are reduced as well. This is because once a block is claimed for a type, its empty space remains available for future allocations, instead of being padded with fallbacks; this allows the native type to group up instead of spreading out to new blocks. The assumption in the allocator has been that pollution from movable allocations is less harmful than from other types, since they can be reclaimed or migrated out should the space be needed. However, since fallbacks occur *before* reclaim/compaction is invoked, movable pollution will still cause non-movable allocations to spread out and claim more blocks. Without fragmentation, THP rates hold steady with defrag_mode=1: thp_fault_alloc 32478 20725 45045 32130 14018 21711 40791 29134 34458 45381 28305 17265 22584 28454 30850 While the downward trend is eliminated, the keen reader will of course notice that the baseline rate is much smaller than the vanilla kernel's to begin with. This is due to deficiencies in how reclaim and compaction are currently driven: ALLOC_NOFRAGMENT increases the extent to which smaller allocations are competing with THPs for pageblocks, while making no effort themselves to reclaim or compact beyond their own request size. This effect already exists with the current usage of ALLOC_NOFRAGMENT, but is amplified by defrag_mode insisting on whole block stealing much more strongly. Subsequent patches will address defrag_mode reclaim strategy to raise the THP success baseline above the vanilla kernel. Link: https://lkml.kernel.org/r/20250313210647.1314586-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Mel Gorman Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- Documentation/admin-guide/sysctl/vm.rst | 9 +++++++++ mm/page_alloc.c | 27 +++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index f48eaa98d22d..8290177b4f75 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm: - compact_memory - compaction_proactiveness - compact_unevictable_allowed +- defrag_mode - dirty_background_bytes - dirty_background_ratio - dirty_bytes @@ -145,6 +146,14 @@ On CONFIG_PREEMPT_RT the default value is 0 in order to avoid a page fault, due to compaction, which would block the task from becoming active until the fault is resolved. +defrag_mode +=========== + +When set to 1, the page allocator tries harder to avoid fragmentation +and maintain the ability to produce huge pages / higher-order pages. + +It is recommended to enable this right after boot, as fragmentation, +once it occurred, can be long-lasting or even permanent. dirty_background_bytes ====================== diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b92b1acda0e..f849eb7146b9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -273,6 +273,7 @@ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; static int watermark_boost_factor __read_mostly = 15000; static int watermark_scale_factor = 10; +static int defrag_mode; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -3389,6 +3390,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) */ alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); + if (defrag_mode) { + alloc_flags |= ALLOC_NOFRAGMENT; + return alloc_flags; + } + #ifdef CONFIG_ZONE_DMA32 if (!zone) return alloc_flags; @@ -3480,7 +3486,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, continue; } - if (no_fallback && nr_online_nodes > 1 && + if (no_fallback && !defrag_mode && nr_online_nodes > 1 && zone != zonelist_zone(ac->preferred_zoneref)) { int local_nid; @@ -3591,7 +3597,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, * It's possible on a UMA machine to get through all zones that are * fragmented. If avoiding fragmentation, reset and try again. */ - if (no_fallback) { + if (no_fallback && !defrag_mode) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; } @@ -4128,6 +4134,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); + if (defrag_mode) + alloc_flags |= ALLOC_NOFRAGMENT; + return alloc_flags; } @@ -4510,6 +4519,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, &compaction_retries)) goto retry; + /* Reclaim/compaction failed to prevent the fallback */ + if (defrag_mode) { + alloc_flags &= ALLOC_NOFRAGMENT; + goto retry; + } /* * Deal with possible cpuset update races or zonelist updates to avoid @@ -6286,6 +6300,15 @@ static const struct ctl_table page_alloc_sysctl_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_THREE_THOUSAND, }, + { + .procname = "defrag_mode", + .data = &defrag_mode, + .maxlen = sizeof(defrag_mode), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { .procname = "percpu_pagelist_high_fraction", .data = &percpu_pagelist_high_fraction, From 101f9d666e4d730e80caabe02446e8592ac44592 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Mar 2025 17:05:35 -0400 Subject: [PATCH 401/431] mm: page_alloc: defrag_mode kswapd/kcompactd assistance When defrag_mode is enabled, allocation fallbacks strongly prefer whole block conversions instead of polluting or stealing partially used blocks. This means there is a demand for pageblocks even from sub-block requests. Let kswapd/kcompactd help produce them. By the time kswapd gets woken up, normal rmqueue and block conversion fallbacks have been attempted and failed. So always wake kswapd with the block order; it will take care of producing a suitable compaction gap and then chain-wake kcompactd with the block order when its done. VANILLA DEFRAGMODE-ASYNC Hugealloc Time mean 52739.45 ( +0.00%) 34300.36 ( -34.96%) Hugealloc Time stddev 56541.26 ( +0.00%) 36390.42 ( -35.64%) Kbuild Real time 197.47 ( +0.00%) 196.13 ( -0.67%) Kbuild User time 1240.49 ( +0.00%) 1234.74 ( -0.46%) Kbuild System time 70.08 ( +0.00%) 62.62 ( -10.50%) THP fault alloc 46727.07 ( +0.00%) 57054.53 ( +22.10%) THP fault fallback 21910.60 ( +0.00%) 11581.40 ( -47.14%) Direct compact fail 195.80 ( +0.00%) 107.80 ( -44.72%) Direct compact success 7.93 ( +0.00%) 4.53 ( -38.06%) Direct compact success rate % 3.51 ( +0.00%) 3.20 ( -6.89%) Compact daemon scanned migrate 3369601.27 ( +0.00%) 5461033.93 ( +62.07%) Compact daemon scanned free 5075474.47 ( +0.00%) 5824897.93 ( +14.77%) Compact direct scanned migrate 161787.27 ( +0.00%) 58336.93 ( -63.94%) Compact direct scanned free 163467.53 ( +0.00%) 32791.87 ( -79.94%) Compact total migrate scanned 3531388.53 ( +0.00%) 5519370.87 ( +56.29%) Compact total free scanned 5238942.00 ( +0.00%) 5857689.80 ( +11.81%) Alloc stall 2371.07 ( +0.00%) 2424.60 ( +2.26%) Pages kswapd scanned 2160926.73 ( +0.00%) 2657018.33 ( +22.96%) Pages kswapd reclaimed 533191.07 ( +0.00%) 559583.07 ( +4.95%) Pages direct scanned 400450.33 ( +0.00%) 722094.07 ( +80.32%) Pages direct reclaimed 94441.73 ( +0.00%) 107257.80 ( +13.57%) Pages total scanned 2561377.07 ( +0.00%) 3379112.40 ( +31.93%) Pages total reclaimed 627632.80 ( +0.00%) 666840.87 ( +6.25%) Swap out 47959.53 ( +0.00%) 77238.20 ( +61.05%) Swap in 7276.00 ( +0.00%) 11712.80 ( +60.97%) File refaults 138043.00 ( +0.00%) 143438.80 ( +3.91%) With this patch, defrag_mode=1 beats the vanilla kernel in THP success rates and allocation latencies. The trend holds over time: thp_fault_alloc VANILLA DEFRAGMODE-ASYNC 61988 52066 56474 58844 57258 58233 50187 58476 52388 54516 55409 59938 52925 57204 47648 60238 43669 55733 40621 56211 36077 59861 41721 57771 36685 58579 34641 51868 33215 56280 DEFRAGMODE-ASYNC also wins on %sys as ~3/4 of the direct compaction work is shifted to kcompactd. Reclaim activity is higher. Part of that is simply due to the increased memory footprint from higher THP use. The other aspect is that *direct* reclaim/compaction are still going for requested orders rather than targeting the page blocks required for fallbacks, which is less efficient than it could be. However, this is already a useful tradeoff to make, as in many environments peak periods are short and retaining the ability to produce THP through them is more important. Link: https://lkml.kernel.org/r/20250313210647.1314586-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Mel Gorman Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/page_alloc.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f849eb7146b9..5a2ee82f723e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4076,15 +4076,21 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, struct zone *zone; pg_data_t *last_pgdat = NULL; enum zone_type highest_zoneidx = ac->highest_zoneidx; + unsigned int reclaim_order; + + if (defrag_mode) + reclaim_order = max(order, pageblock_order); + else + reclaim_order = order; for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, ac->nodemask) { if (!managed_zone(zone)) continue; - if (last_pgdat != zone->zone_pgdat) { - wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); - last_pgdat = zone->zone_pgdat; - } + if (last_pgdat == zone->zone_pgdat) + continue; + wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); + last_pgdat = zone->zone_pgdat; } } From a211c6550efcc87aa2459ca347bda10721c7a46a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 13 Mar 2025 17:05:36 -0400 Subject: [PATCH 402/431] mm: page_alloc: defrag_mode kswapd/kcompactd watermarks The previous patch added pageblock_order reclaim to kswapd/kcompactd, which helps, but produces only one block at a time. Allocation stalls and THP failure rates are still higher than they could be. To adequately reflect ALLOC_NOFRAGMENT demand for pageblocks, change the watermarking for kswapd & kcompactd: instead of targeting the high watermark in order-0 pages and checking for one suitable block, simply require that the high watermark is entirely met in pageblocks. To this end, track the number of free pages within contiguous pageblocks, then change pgdat_balanced() and compact_finished() to check watermarks against this new value. This further reduces THP latencies and allocation stalls, and improves THP success rates against the previous patch: DEFRAGMODE-ASYNC DEFRAGMODE-ASYNC-WMARKS Hugealloc Time mean 34300.36 ( +0.00%) 28904.00 ( -15.73%) Hugealloc Time stddev 36390.42 ( +0.00%) 33464.37 ( -8.04%) Kbuild Real time 196.13 ( +0.00%) 196.59 ( +0.23%) Kbuild User time 1234.74 ( +0.00%) 1231.67 ( -0.25%) Kbuild System time 62.62 ( +0.00%) 59.10 ( -5.54%) THP fault alloc 57054.53 ( +0.00%) 63223.67 ( +10.81%) THP fault fallback 11581.40 ( +0.00%) 5412.47 ( -53.26%) Direct compact fail 107.80 ( +0.00%) 59.07 ( -44.79%) Direct compact success 4.53 ( +0.00%) 2.80 ( -31.33%) Direct compact success rate % 3.20 ( +0.00%) 3.99 ( +18.66%) Compact daemon scanned migrate 5461033.93 ( +0.00%) 2267500.33 ( -58.48%) Compact daemon scanned free 5824897.93 ( +0.00%) 2339773.00 ( -59.83%) Compact direct scanned migrate 58336.93 ( +0.00%) 47659.93 ( -18.30%) Compact direct scanned free 32791.87 ( +0.00%) 40729.67 ( +24.21%) Compact total migrate scanned 5519370.87 ( +0.00%) 2315160.27 ( -58.05%) Compact total free scanned 5857689.80 ( +0.00%) 2380502.67 ( -59.36%) Alloc stall 2424.60 ( +0.00%) 638.87 ( -73.62%) Pages kswapd scanned 2657018.33 ( +0.00%) 4002186.33 ( +50.63%) Pages kswapd reclaimed 559583.07 ( +0.00%) 718577.80 ( +28.41%) Pages direct scanned 722094.07 ( +0.00%) 355172.73 ( -50.81%) Pages direct reclaimed 107257.80 ( +0.00%) 31162.80 ( -70.95%) Pages total scanned 3379112.40 ( +0.00%) 4357359.07 ( +28.95%) Pages total reclaimed 666840.87 ( +0.00%) 749740.60 ( +12.43%) Swap out 77238.20 ( +0.00%) 110084.33 ( +42.53%) Swap in 11712.80 ( +0.00%) 24457.00 ( +108.80%) File refaults 143438.80 ( +0.00%) 188226.93 ( +31.22%) Also of note is that compaction work overall is reduced. The reason for this is that when free pageblocks are more readily available, allocations are also much more likely to get physically placed in LRU order, instead of being forced to scavenge free space here and there. This means that reclaim by itself has better chances of freeing up whole blocks, and the system relies less on compaction. Comparing all changes to the vanilla kernel: VANILLA DEFRAGMODE-ASYNC-WMARKS Hugealloc Time mean 52739.45 ( +0.00%) 28904.00 ( -45.19%) Hugealloc Time stddev 56541.26 ( +0.00%) 33464.37 ( -40.81%) Kbuild Real time 197.47 ( +0.00%) 196.59 ( -0.44%) Kbuild User time 1240.49 ( +0.00%) 1231.67 ( -0.71%) Kbuild System time 70.08 ( +0.00%) 59.10 ( -15.45%) THP fault alloc 46727.07 ( +0.00%) 63223.67 ( +35.30%) THP fault fallback 21910.60 ( +0.00%) 5412.47 ( -75.29%) Direct compact fail 195.80 ( +0.00%) 59.07 ( -69.48%) Direct compact success 7.93 ( +0.00%) 2.80 ( -57.46%) Direct compact success rate % 3.51 ( +0.00%) 3.99 ( +10.49%) Compact daemon scanned migrate 3369601.27 ( +0.00%) 2267500.33 ( -32.71%) Compact daemon scanned free 5075474.47 ( +0.00%) 2339773.00 ( -53.90%) Compact direct scanned migrate 161787.27 ( +0.00%) 47659.93 ( -70.54%) Compact direct scanned free 163467.53 ( +0.00%) 40729.67 ( -75.08%) Compact total migrate scanned 3531388.53 ( +0.00%) 2315160.27 ( -34.44%) Compact total free scanned 5238942.00 ( +0.00%) 2380502.67 ( -54.56%) Alloc stall 2371.07 ( +0.00%) 638.87 ( -73.02%) Pages kswapd scanned 2160926.73 ( +0.00%) 4002186.33 ( +85.21%) Pages kswapd reclaimed 533191.07 ( +0.00%) 718577.80 ( +34.77%) Pages direct scanned 400450.33 ( +0.00%) 355172.73 ( -11.31%) Pages direct reclaimed 94441.73 ( +0.00%) 31162.80 ( -67.00%) Pages total scanned 2561377.07 ( +0.00%) 4357359.07 ( +70.12%) Pages total reclaimed 627632.80 ( +0.00%) 749740.60 ( +19.46%) Swap out 47959.53 ( +0.00%) 110084.33 ( +129.53%) Swap in 7276.00 ( +0.00%) 24457.00 ( +236.10%) File refaults 138043.00 ( +0.00%) 188226.93 ( +36.35%) THP allocation latencies and %sys time are down dramatically. THP allocation failures are down from nearly 50% to 8.5%. And to recall previous data points, the success rates are steady and reliable without the cumulative deterioration of fragmentation events. Compaction work is down overall. Direct compaction work especially is drastically reduced. As an aside, its success rate of 4% indicates there is room for improvement. For now it's good to rely on it less. Reclaim work is up overall, however direct reclaim work is down. Part of the increase can be attributed to a higher use of THPs, which due to internal fragmentation increase the memory footprint. This is not necessarily an unexpected side-effect for users of THP. However, taken both points together, there may well be some opportunities for fine tuning in the reclaim/compaction coordination. [hannes@cmpxchg.org: fix squawks from rebasing] Link: https://lkml.kernel.org/r/20250314210558.GD1316033@cmpxchg.org Link: https://lkml.kernel.org/r/20250313210647.1314586-6-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Mel Gorman Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 1 + mm/compaction.c | 41 +++++++++++++++++++++++++++++++++-------- mm/internal.h | 1 + mm/page_alloc.c | 29 +++++++++++++++++++++++------ mm/vmscan.c | 15 ++++++++++++++- mm/vmstat.c | 1 + 6 files changed, 73 insertions(+), 15 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index dbb0ad69e17f..37c29f3fbca8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -138,6 +138,7 @@ enum numa_stat_item { enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, + NR_FREE_PAGES_BLOCKS, NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, NR_ZONE_ACTIVE_ANON, diff --git a/mm/compaction.c b/mm/compaction.c index cf32e8053edb..139f00c0308a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2328,6 +2328,22 @@ static enum compact_result __compact_finished(struct compact_control *cc) if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE; + /* + * When defrag_mode is enabled, make kcompactd target + * watermarks in whole pageblocks. Because they can be stolen + * without polluting, no further fallback checks are needed. + */ + if (defrag_mode && !cc->direct_compaction) { + if (__zone_watermark_ok(cc->zone, cc->order, + high_wmark_pages(cc->zone), + cc->highest_zoneidx, cc->alloc_flags, + zone_page_state(cc->zone, + NR_FREE_PAGES_BLOCKS))) + return COMPACT_SUCCESS; + + return COMPACT_CONTINUE; + } + /* Direct compactor: Is a suitable page free? */ ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order < NR_PAGE_ORDERS; order++) { @@ -2495,13 +2511,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, static enum compact_result compaction_suit_allocation_order(struct zone *zone, unsigned int order, int highest_zoneidx, unsigned int alloc_flags, - bool async) + bool async, bool kcompactd) { + unsigned long free_pages; unsigned long watermark; + if (kcompactd && defrag_mode) + free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); + else + free_pages = zone_page_state(zone, NR_FREE_PAGES); + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, - alloc_flags)) + if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx, + alloc_flags, free_pages)) return COMPACT_SUCCESS; /* @@ -2557,7 +2579,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) ret = compaction_suit_allocation_order(cc->zone, cc->order, cc->highest_zoneidx, cc->alloc_flags, - cc->mode == MIGRATE_ASYNC); + cc->mode == MIGRATE_ASYNC, + !cc->direct_compaction); if (ret != COMPACT_CONTINUE) return ret; } @@ -3051,6 +3074,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) struct zone *zone; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; enum compact_result ret; + unsigned int alloc_flags = defrag_mode ? + ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN; for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { zone = &pgdat->node_zones[zoneid]; @@ -3060,8 +3085,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) ret = compaction_suit_allocation_order(zone, pgdat->kcompactd_max_order, - highest_zoneidx, ALLOC_WMARK_MIN, - false); + highest_zoneidx, alloc_flags, + false, true); if (ret == COMPACT_CONTINUE) return true; } @@ -3084,7 +3109,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, - .alloc_flags = ALLOC_WMARK_MIN, + .alloc_flags = defrag_mode ? ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN, }; enum compact_result ret; @@ -3104,7 +3129,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) ret = compaction_suit_allocation_order(zone, cc.order, zoneid, cc.alloc_flags, - false); + false, true); if (ret != COMPACT_CONTINUE) continue; diff --git a/mm/internal.h b/mm/internal.h index 2f52a65272c1..286520a424fe 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -536,6 +536,7 @@ extern char * const zone_names[MAX_NR_ZONES]; DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); extern int min_free_kbytes; +extern int defrag_mode; void setup_per_zone_wmarks(void); void calculate_min_free_kbytes(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5a2ee82f723e..4337467eaf5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -273,7 +273,7 @@ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; static int watermark_boost_factor __read_mostly = 15000; static int watermark_scale_factor = 10; -static int defrag_mode; +int defrag_mode; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -660,16 +660,20 @@ static inline void __add_to_free_list(struct page *page, struct zone *zone, bool tail) { struct free_area *area = &zone->free_area[order]; + int nr_pages = 1 << order; VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), migratetype, 1 << order); + get_pageblock_migratetype(page), migratetype, nr_pages); if (tail) list_add_tail(&page->buddy_list, &area->free_list[migratetype]); else list_add(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; + + if (order >= pageblock_order && !is_migrate_isolate(migratetype)) + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); } /* @@ -681,24 +685,34 @@ static inline void move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int old_mt, int new_mt) { struct free_area *area = &zone->free_area[order]; + int nr_pages = 1 << order; /* Free page moving can fail, so it happens before the type update */ VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), old_mt, 1 << order); + get_pageblock_migratetype(page), old_mt, nr_pages); list_move_tail(&page->buddy_list, &area->free_list[new_mt]); - account_freepages(zone, -(1 << order), old_mt); - account_freepages(zone, 1 << order, new_mt); + account_freepages(zone, -nr_pages, old_mt); + account_freepages(zone, nr_pages, new_mt); + + if (order >= pageblock_order && + is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) { + if (!is_migrate_isolate(old_mt)) + nr_pages = -nr_pages; + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); + } } static inline void __del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) { + int nr_pages = 1 << order; + VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, "page type is %lu, passed migratetype is %d (nr=%d)\n", - get_pageblock_migratetype(page), migratetype, 1 << order); + get_pageblock_migratetype(page), migratetype, nr_pages); /* clear reported state and update reported page count */ if (page_reported(page)) @@ -708,6 +722,9 @@ static inline void __del_page_from_free_list(struct page *page, struct zone *zon __ClearPageBuddy(page); set_page_private(page, 0); zone->free_area[order].nr_free--; + + if (order >= pageblock_order && !is_migrate_isolate(migratetype)) + __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); } static inline void del_page_from_free_list(struct page *page, struct zone *zone, diff --git a/mm/vmscan.c b/mm/vmscan.c index 3370bdca6868..b5c7dfc2b189 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6724,11 +6724,24 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) * meet watermarks. */ for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) { + unsigned long free_pages; + if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) mark = promo_wmark_pages(zone); else mark = high_wmark_pages(zone); - if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) + + /* + * In defrag_mode, watermarks must be met in whole + * blocks to avoid polluting allocator fallbacks. + */ + if (defrag_mode) + free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); + else + free_pages = zone_page_state(zone, NR_FREE_PAGES); + + if (__zone_watermark_ok(zone, order, mark, highest_zoneidx, + 0, free_pages)) return true; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 16bfe1c694dd..ed49a86348f7 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1190,6 +1190,7 @@ int fragmentation_index(struct zone *zone, unsigned int order) const char * const vmstat_text[] = { /* enum zone_stat_item counters */ "nr_free_pages", + "nr_free_pages_blocks", "nr_zone_inactive_anon", "nr_zone_active_anon", "nr_zone_inactive_file", From 24ac6fb6e3647fff3646b3ea1811095441380560 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Mon, 10 Feb 2025 09:56:06 +0800 Subject: [PATCH 403/431] mm/cma: using per-CMA locks to improve concurrent allocation performance For different CMAs, concurrent allocation of CMA memory ideally should not require synchronization using locks. Currently, a global cma_mutex lock is employed to synchronize all CMA allocations, which can impact the performance of concurrent allocations across different CMAs. To test the performance impact, follow these steps: 1. Boot the kernel with the command line argument hugetlb_cma=30G to allocate a 30GB CMA area specifically for huge page allocations. (note: on my machine, which has 3 nodes, each node is initialized with 10G of CMA) 2. Use the dd command with parameters if=/dev/zero of=/dev/shm/file bs=1G count=30 to fully utilize the CMA area by writing zeroes to a file in /dev/shm. 3. Open three terminals and execute the following commands simultaneously: (Note: Each of these commands attempts to allocate 10GB [2621440 * 4KB pages] of CMA memory.) On Terminal 1: time echo 2621440 > /sys/kernel/debug/cma/hugetlb1/alloc On Terminal 2: time echo 2621440 > /sys/kernel/debug/cma/hugetlb2/alloc On Terminal 3: time echo 2621440 > /sys/kernel/debug/cma/hugetlb3/alloc We attempt to allocate pages through the CMA debug interface and use the time command to measure the duration of each allocation. Performance comparison: Without this patch With this patch Terminal1 ~7s ~7s Terminal2 ~14s ~8s Terminal3 ~21s ~7s To solve problem above, we could use per-CMA locks to improve concurrent allocation performance. This would allow each CMA to be managed independently, reducing the need for a global lock and thus improving scalability and performance. Link: https://lkml.kernel.org/r/1739152566-744-1-git-send-email-yangge1116@126.com Signed-off-by: Ge Yang Reviewed-by: Barry Song Acked-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Aisheng Dong Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/cma.c | 7 ++++--- mm/cma.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 09322b8284bd..b06d5fe73399 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -34,7 +34,6 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned int cma_area_count; -static DEFINE_MUTEX(cma_mutex); static int __init __cma_declare_contiguous_nid(phys_addr_t base, phys_addr_t size, phys_addr_t limit, @@ -175,6 +174,8 @@ static void __init cma_activate_area(struct cma *cma) spin_lock_init(&cma->lock); + mutex_init(&cma->alloc_mutex); + #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); @@ -813,9 +814,9 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr, spin_unlock_irq(&cma->lock); pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + mutex_lock(&cma->alloc_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp); - mutex_unlock(&cma_mutex); + mutex_unlock(&cma->alloc_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; diff --git a/mm/cma.h b/mm/cma.h index df7fc623b7a6..41a3ab0ec3de 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -39,6 +39,7 @@ struct cma { unsigned long available_count; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + struct mutex alloc_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; From 09bdc4fe700d1c499d94452d7a20e69c26a8c007 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Tue, 25 Feb 2025 10:30:16 +0200 Subject: [PATCH 404/431] mm/mm_init: rename __init_reserved_page_zone to __init_page_from_nid __init_reserved_page_zone() function finds the zone for pfn and nid and performs initialization of a struct page with that zone and nid. There is nothing in that function about reserved pages and it is misnamed. Rename it to __init_page_from_nid() to better reflect what the function does. Link: https://lkml.kernel.org/r/20250225083017.567649-2-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: Wei Yang Cc: Frank van der Linden Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- mm/internal.h | 2 +- mm/mm_init.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index af9b8c1fca67..6fccfe6d046c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3407,7 +3407,7 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page, while (npages--) { pfn = page_to_pfn(page); - __init_reserved_page_zone(pfn, nid); + __init_page_from_nid(pfn, nid); free_reserved_page(page); page++; } diff --git a/mm/internal.h b/mm/internal.h index 286520a424fe..21f2643f3d95 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1518,7 +1518,7 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid); -void __meminit __init_reserved_page_zone(unsigned long pfn, int nid); +void __meminit __init_page_from_nid(unsigned long pfn, int nid); /* shrinker related functions */ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, diff --git a/mm/mm_init.c b/mm/mm_init.c index c82b0162f1cb..16a96aaf65c4 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -668,7 +668,7 @@ static inline void fixup_hashdist(void) {} /* * Initialize a reserved page unconditionally, finding its zone first. */ -void __meminit __init_reserved_page_zone(unsigned long pfn, int nid) +void __meminit __init_page_from_nid(unsigned long pfn, int nid) { pg_data_t *pgdat; int zid; @@ -748,7 +748,7 @@ static void __meminit init_reserved_page(unsigned long pfn, int nid) if (early_page_initialised(pfn, nid)) return; - __init_reserved_page_zone(pfn, nid); + __init_page_from_nid(pfn, nid); } #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} From b4f65dbdf87812056c224fd8d2c66318b2140ab5 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Tue, 25 Feb 2025 10:30:17 +0200 Subject: [PATCH 405/431] mm/mm_init: rename init_reserved_page to init_deferred_page When CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, init_reserved_page() function performs initialization of a struct page that would have been deferred normally. Rename it to init_deferred_page() to better reflect what the function does. Link: https://lkml.kernel.org/r/20250225083017.567649-3-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: Wei Yang Cc: Frank van der Linden Cc: Muchun Song Cc: Changyuan Lyu Signed-off-by: Andrew Morton --- mm/mm_init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index 16a96aaf65c4..a38a1909b407 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -743,7 +743,7 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static void __meminit init_reserved_page(unsigned long pfn, int nid) +static void __meminit init_deferred_page(unsigned long pfn, int nid) { if (early_page_initialised(pfn, nid)) return; @@ -763,7 +763,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static inline void init_reserved_page(unsigned long pfn, int nid) +static inline void init_deferred_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -784,7 +784,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn, nid); + init_deferred_page(start_pfn, nid); /* * no need for atomic set_bit because the struct From 20d6c17252282c3af261d1cde8e34def1b2458c8 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 12 Mar 2025 22:48:12 -0700 Subject: [PATCH 406/431] memcg: avoid refill_stock for root memcg We never charge the page counters of root memcg, so there is no need to put root memcg in the memcg stock. At the moment, refill_stock() can be called from try_charge_memcg(), obj_cgroup_uncharge_pages() and mem_cgroup_uncharge_skmem(). The try_charge_memcg() and mem_cgroup_uncharge_skmem() are never called with root memcg, so those are fine. However obj_cgroup_uncharge_pages() can potentially call refill_stock() with root memcg if the objcg object has been reparented over to the root memcg. Let's just avoid refill_stock() from obj_cgroup_uncharge_pages() for root memcg. Link: https://lkml.kernel.org/r/20250313054812.2185900-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/memcontrol.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b07eff78414b..ce57660bf5a2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2643,7 +2643,8 @@ static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); memcg1_account_kmem(memcg, -nr_pages); - refill_stock(memcg, nr_pages); + if (!mem_cgroup_is_root(memcg)) + refill_stock(memcg, nr_pages); css_put(&memcg->css); } From cb44821e1f524a5b8c05a738a40d572efd1ca430 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 12 Mar 2025 15:25:52 -0700 Subject: [PATCH 407/431] memcg: move do_memsw_account() to CONFIG_MEMCG_V1 The do_memsw_account() is used to enable or disable legacy memory+swap accounting in memory cgroup. However with disabled CONFIG_MEMCG_V1, we don't need to keep checking it. So, let's always return false for !CONFIG_MEMCG_V1 configs. Before the patch: $ size mm/memcontrol.o text data bss dec hex filename 49928 10736 4172 64836 fd44 mm/memcontrol.o After the patch: $ size mm/memcontrol.o text data bss dec hex filename 49430 10480 4172 64082 fa52 mm/memcontrol.o Link: https://lkml.kernel.org/r/20250312222552.3284173-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/memcontrol-v1.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 653ff1bad244..6358464bb416 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -22,12 +22,6 @@ iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -/* Whether legacy memory+swap accounting is active */ -static inline bool do_memsw_account(void) -{ - return !cgroup_subsys_on_dfl(memory_cgrp_subsys); -} - unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); void drain_all_stock(struct mem_cgroup *root_memcg); @@ -42,6 +36,12 @@ struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg); /* Cgroup v1-specific declarations */ #ifdef CONFIG_MEMCG_V1 +/* Whether legacy memory+swap accounting is active */ +static inline bool do_memsw_account(void) +{ + return !cgroup_subsys_on_dfl(memory_cgrp_subsys); +} + unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); @@ -94,6 +94,7 @@ extern struct cftype mem_cgroup_legacy_files[]; #else /* CONFIG_MEMCG_V1 */ +static inline bool do_memsw_account(void) { return false; } static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; } static inline void memcg1_free_events(struct mem_cgroup *memcg) {} From fa23a338de93aa03eb0b6146a0440f5762309f85 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Mar 2025 13:36:11 +0000 Subject: [PATCH 408/431] mm: separate folio_split_memcg_refs() from split_page_memcg() Patch series "Minor memcg cleanups & prep for memdescs", v2. Separate the handling of accounted folios and GFP_ACCOUNT pages for easier to understand code. For more detail, see https://lore.kernel.org/linux-mm/Z9LwTOudOlCGny3f@casper.infradead.org/ This patch (of 5): Folios always use memcg_data to refer to the mem_cgroup while pages allocated with GFP_ACCOUNT have a pointer to the obj_cgroup. Since the caller already knows what it has, split the function into two and then we don't need to check. Move the assignment of split folio memcg_data to the point where we set up the other parts of the new folio. That leaves folio_split_memcg_refs() just handling the memcg accounting. Link: https://lkml.kernel.org/r/20250314133617.138071-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250314133617.138071-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Zi Yan Acked-by: Roman Gushchin Cc: David Hildenbrand Cc: Matthew Wilcow (Oracle) Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 7 +++++++ mm/huge_memory.c | 16 ++++------------ mm/memcontrol.c | 17 +++++++++++++---- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 57664e2a8fb7..d090089c5497 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1039,6 +1039,8 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, } void split_page_memcg(struct page *head, int old_order, int new_order); +void folio_split_memcg_refs(struct folio *folio, unsigned old_order, + unsigned new_order); static inline u64 cgroup_id_from_mm(struct mm_struct *mm) { @@ -1463,6 +1465,11 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or { } +static inline void folio_split_memcg_refs(struct folio *folio, + unsigned old_order, unsigned new_order) +{ +} + static inline u64 cgroup_id_from_mm(struct mm_struct *mm) { return 0; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 10a86b681cf1..2a47682d1ab7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3394,6 +3394,9 @@ static void __split_folio_to_order(struct folio *folio, int old_order, folio_set_young(new_folio); if (folio_test_idle(folio)) folio_set_idle(new_folio); +#ifdef CONFIG_MEMCG + new_folio->memcg_data = folio->memcg_data; +#endif folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); } @@ -3525,18 +3528,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, } } - /* - * Reset any memcg data overlay in the tail pages. - * folio_nr_pages() is unreliable until prep_compound_page() - * was called again. - */ -#ifdef NR_PAGES_IN_LARGE_FOLIO - folio->_nr_pages = 0; -#endif - - - /* complete memcg works before add pages to LRU */ - split_page_memcg(&folio->page, old_order, split_order); + folio_split_memcg_refs(folio, old_order, split_order); split_page_owner(&folio->page, old_order, split_order); pgalloc_tag_split(folio, old_order, split_order); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ce57660bf5a2..f267b309b5b7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3081,10 +3081,19 @@ void split_page_memcg(struct page *head, int old_order, int new_order) for (i = new_nr; i < old_nr; i += new_nr) folio_page(folio, i)->memcg_data = folio->memcg_data; - if (folio_memcg_kmem(folio)) - obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); - else - css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1); + obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); +} + +void folio_split_memcg_refs(struct folio *folio, unsigned old_order, + unsigned new_order) +{ + unsigned new_refs; + + if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) + return; + + new_refs = (1 << (old_order - new_order)) - 1; + css_get_many(&__folio_memcg(folio)->css, new_refs); } unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) From 1506c25508acd740ced5e92c539ed3d12f622c5b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Mar 2025 13:36:12 +0000 Subject: [PATCH 409/431] mm: simplify split_page_memcg() The last argument to split_page_memcg() is now always 0, so remove it, effectively reverting commit b8791381d7ed. Link: https://lkml.kernel.org/r/20250314133617.138071-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Zi Yan Acked-by: Roman Gushchin Cc: David Hildenbrand Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 4 ++-- mm/memcontrol.c | 15 +++++++-------- mm/page_alloc.c | 4 ++-- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d090089c5497..ea28cacfb0d2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1038,7 +1038,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); } -void split_page_memcg(struct page *head, int old_order, int new_order); +void split_page_memcg(struct page *first, unsigned order); void folio_split_memcg_refs(struct folio *folio, unsigned old_order, unsigned new_order); @@ -1461,7 +1461,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } -static inline void split_page_memcg(struct page *head, int old_order, int new_order) +static inline void split_page_memcg(struct page *first, unsigned order) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f267b309b5b7..fa7a3a1b710a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3066,22 +3066,21 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, } /* - * Because folio_memcg(head) is not set on tails, set it now. + * The objcg is only set on the first page, so transfer it to all the + * other pages. */ -void split_page_memcg(struct page *head, int old_order, int new_order) +void split_page_memcg(struct page *first, unsigned order) { - struct folio *folio = page_folio(head); - int i; - unsigned int old_nr = 1 << old_order; - unsigned int new_nr = 1 << new_order; + struct folio *folio = page_folio(first); + unsigned int i, nr = 1 << order; if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) return; - for (i = new_nr; i < old_nr; i += new_nr) + for (i = 1; i < nr; i++) folio_page(folio, i)->memcg_data = folio->memcg_data; - obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); + obj_cgroup_get_many(__folio_objcg(folio), nr - 1); } void folio_split_memcg_refs(struct folio *folio, unsigned old_order, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4337467eaf5a..a6d060eea638 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2778,7 +2778,7 @@ void split_page(struct page *page, unsigned int order) set_page_refcounted(page + i); split_page_owner(page, order, 0); pgalloc_tag_split(page_folio(page), order, 0); - split_page_memcg(page, order, 0); + split_page_memcg(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -4992,7 +4992,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, split_page_owner(page, order, 0); pgalloc_tag_split(page_folio(page), order, 0); - split_page_memcg(page, order, 0); + split_page_memcg(page, order); while (page < --last) set_page_refcounted(last); From 7cc57ecae40a68ff7204bca5e17a0241fe505ec7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Mar 2025 13:36:13 +0000 Subject: [PATCH 410/431] mm: remove references to folio in split_page_memcg() We know that the passed in page is not part of a folio (it's a plain page allocated with GFP_ACCOUNT), so we should get rid of the misleading references to folios. Introduce page_objcg() and page_set_objcg() helpers to make things more clear. Link: https://lkml.kernel.org/r/20250314133617.138071-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Roman Gushchin Cc: David Hildenbrand Cc: Michal Hocko Cc: Muchun Song Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/memcontrol.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fa7a3a1b710a..d95b1f4216ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2677,6 +2677,23 @@ static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, return ret; } +static struct obj_cgroup *page_objcg(const struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + if (mem_cgroup_disabled() || !memcg_data) + return NULL; + + VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM, + page); + return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); +} + +static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg) +{ + page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; +} + /** * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup * @page: page to charge @@ -2695,8 +2712,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); if (!ret) { obj_cgroup_get(objcg); - page->memcg_data = (unsigned long)objcg | - MEMCG_DATA_KMEM; + page_set_objcg(page, objcg); return 0; } } @@ -3069,18 +3085,18 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, * The objcg is only set on the first page, so transfer it to all the * other pages. */ -void split_page_memcg(struct page *first, unsigned order) +void split_page_memcg(struct page *page, unsigned order) { - struct folio *folio = page_folio(first); + struct obj_cgroup *objcg = page_objcg(page); unsigned int i, nr = 1 << order; - if (mem_cgroup_disabled() || !folio_memcg_charged(folio)) + if (!objcg) return; for (i = 1; i < nr; i++) - folio_page(folio, i)->memcg_data = folio->memcg_data; + page_set_objcg(&page[i], objcg); - obj_cgroup_get_many(__folio_objcg(folio), nr - 1); + obj_cgroup_get_many(objcg, nr - 1); } void folio_split_memcg_refs(struct folio *folio, unsigned old_order, From 8492936abb49eda26e00eab01e58d7e69f2355ba Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Mar 2025 13:36:14 +0000 Subject: [PATCH 411/431] mm: simplify folio_memcg_charged() There's no need to check which kind of pointer is in the memcg_data field, all we actually care about is whether it's zero or not. Saves 70 bytes in workingset_activation() with the Debian config. Link: https://lkml.kernel.org/r/20250314133617.138071-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Roman Gushchin Cc: David Hildenbrand Cc: Michal Hocko Cc: Muchun Song Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ea28cacfb0d2..53364526d877 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -438,9 +438,7 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) */ static inline bool folio_memcg_charged(struct folio *folio) { - if (folio_memcg_kmem(folio)) - return __folio_objcg(folio) != NULL; - return __folio_memcg(folio) != NULL; + return folio->memcg_data != 0; } /* From 0d2a2605237341f9dfde99cd0ed3c2d003322464 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Mar 2025 13:36:15 +0000 Subject: [PATCH 412/431] mm: remove references to folio in __memcg_kmem_uncharge_page() This use of folios is misleading because these pages are not part of a folio. Remove an unnecessary call to page_folio(), saving 58 bytes of text in a Debian kernel build. Link: https://lkml.kernel.org/r/20250314133617.138071-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Johannes Weiner Acked-by: Shakeel Butt Acked-by: Roman Gushchin Cc: David Hildenbrand Cc: Michal Hocko Cc: Muchun Song Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/memcontrol.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d95b1f4216ac..57cf5a6c279c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2726,16 +2726,14 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) */ void __memcg_kmem_uncharge_page(struct page *page, int order) { - struct folio *folio = page_folio(page); - struct obj_cgroup *objcg; + struct obj_cgroup *objcg = page_objcg(page); unsigned int nr_pages = 1 << order; - if (!folio_memcg_kmem(folio)) + if (!objcg) return; - objcg = __folio_objcg(folio); obj_cgroup_uncharge_pages(objcg, nr_pages); - folio->memcg_data = 0; + page->memcg_data = 0; obj_cgroup_put(objcg); } From 835de37603ef6412949df0a607128f5fffed4576 Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Fri, 14 Mar 2025 15:37:54 -0600 Subject: [PATCH 413/431] meminfo: add a per node counter for balloon drivers Patch series "track memory used by balloon drivers", v2. This series introduces a way to track memory used by balloon drivers. Add a NR_BALLOON_PAGES counter to track how many pages are reclaimed by the balloon drivers. First add the accounting, then updates the balloon drivers (virtio, Hyper-V, VMware, Pseries-cmm, and Xen) to maintain this counter. The virtio, Vmware, and pseries-cmm balloon drivers utilize the balloon_compaction interface to allocate and free balloon pages. Other balloon drivers will have to maintain this counter manually. This makes the information visible in memory reporting interfaces like /proc/meminfo, show_mem, and OOM reporting. This provides admins visibility into their VM balloon sizes without requiring different virtualization tooling. Furthermore, this information is helpful when debugging an OOM inside a VM. This patch (of 4): Add NR_BALLOON_PAGES counter to track memory used by balloon drivers and expose it through /proc/meminfo and other memory reporting interfaces. [npache@redhat.com: document Balloon Meminfo entry] Link: https://lkml.kernel.org/r/a0315ccf-f244-460e-8643-fd7388724fe5@redhat.com Link: https://lkml.kernel.org/r/20250314213757.244258-1-npache@redhat.com Link: https://lkml.kernel.org/r/20250314213757.244258-2-npache@redhat.com Signed-off-by: Nico Pache Cc: Alexander Atanasov Cc: Chengming Zhou Cc: David Hildenbrand Cc: Dexuan Cui Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Juegren Gross Cc: Kanchana P Sridhar Cc: K. Y. Srinivasan Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Oleksandr Tyshchenko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Stefano Stabellini Cc: Wei Liu Cc: Michael Kelley Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 3 +++ fs/proc/meminfo.c | 2 ++ include/linux/mmzone.h | 1 + mm/show_mem.c | 4 +++- mm/vmstat.c | 1 + 5 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 3c37b248fc4f..80f33bce5fe1 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1081,6 +1081,7 @@ Example output. You may not have all of these fields. FilePmdMapped: 0 kB CmaTotal: 0 kB CmaFree: 0 kB + Balloon: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 @@ -1255,6 +1256,8 @@ CmaTotal Memory reserved for the Contiguous Memory Allocator (CMA) CmaFree Free remaining memory in the CMA reserves +Balloon + Memory returned to Host by VM Balloon Drivers HugePages_Total, HugePages_Free, HugePages_Rsvd, HugePages_Surp, Hugepagesize, Hugetlb See Documentation/admin-guide/mm/hugetlbpage.rst. DirectMap4k, DirectMap2M, DirectMap1G diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 8ba9b1472390..83be312159c9 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -162,6 +162,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "Unaccepted: ", global_zone_page_state(NR_UNACCEPTED)); #endif + show_val_kb(m, "Balloon: ", + global_node_page_state(NR_BALLOON_PAGES)); hugetlb_report_meminfo(m); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 37c29f3fbca8..a9db0fbd2b94 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -224,6 +224,7 @@ enum node_stat_item { #ifdef CONFIG_HUGETLB_PAGE NR_HUGETLB, #endif + NR_BALLOON_PAGES, NR_VM_NODE_STAT_ITEMS }; diff --git a/mm/show_mem.c b/mm/show_mem.c index 43afb56abbd3..6af13bcd2ab3 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -260,6 +260,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z " pagetables:%lukB" " sec_pagetables:%lukB" " all_unreclaimable? %s" + " Balloon:%lukB" "\n", pgdat->node_id, K(node_page_state(pgdat, NR_ACTIVE_ANON)), @@ -285,7 +286,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z #endif K(node_page_state(pgdat, NR_PAGETABLE)), K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), - str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)); + str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES), + K(node_page_state(pgdat, NR_BALLOON_PAGES))); } for_each_populated_zone(zone) { diff --git a/mm/vmstat.c b/mm/vmstat.c index ed49a86348f7..ae0e4259ac23 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1277,6 +1277,7 @@ const char * const vmstat_text[] = { #ifdef CONFIG_HUGETLB_PAGE "nr_hugetlb", #endif + "nr_balloon_pages", /* system-wide enum vm_stat_item counters */ "nr_dirty_threshold", "nr_dirty_background_threshold", From 4d689474e1b263aa14e93f3995cf0b6cd1910f74 Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Fri, 14 Mar 2025 15:37:55 -0600 Subject: [PATCH 414/431] balloon_compaction: update the NR_BALLOON_PAGES state Update the NR_BALLOON_PAGES counter when pages are added or removed using the balloon compaction interface. The virtio, Vmware, and pseries-cmm balloon drivers utilize the balloon_compaction interface to allocate and free balloon pages. Other balloon drivers will have to maintain this counter manually. Link: https://lkml.kernel.org/r/20250314213757.244258-3-npache@redhat.com Signed-off-by: Nico Pache Cc: Alexander Atanasov Cc: Chengming Zhou Cc: David Hildenbrand Cc: Dexuan Cui Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Juegren Gross Cc: Kanchana P Sridhar Cc: K. Y. Srinivasan Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Oleksandr Tyshchenko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Stefano Stabellini Cc: Wei Liu Cc: Michael Kelley Signed-off-by: Andrew Morton --- mm/balloon_compaction.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 6597ebea8ae2..d3e00731e262 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -24,6 +24,7 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, balloon_page_insert(b_dev_info, page); unlock_page(page); __count_vm_event(BALLOON_INFLATE); + inc_node_page_state(page, NR_BALLOON_PAGES); } /** @@ -103,6 +104,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, __count_vm_event(BALLOON_DEFLATE); list_add(&page->lru, pages); unlock_page(page); + dec_node_page_state(page, NR_BALLOON_PAGES); n_pages++; } spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); From 02ec35963bc830f17c2aaa6f1008dcf7745c7c17 Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Fri, 14 Mar 2025 15:37:56 -0600 Subject: [PATCH 415/431] hv_balloon: update the NR_BALLOON_PAGES state Update the NR_BALLOON_PAGES counter when pages are added to or removed from the Hyper-V balloon. Link: https://lkml.kernel.org/r/20250314213757.244258-4-npache@redhat.com Signed-off-by: Nico Pache Reviewed-by: Michael Kelley Cc: Alexander Atanasov Cc: Chengming Zhou Cc: David Hildenbrand Cc: Dexuan Cui Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Juegren Gross Cc: Kanchana P Sridhar Cc: K. Y. Srinivasan Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Oleksandr Tyshchenko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Stefano Stabellini Cc: Wei Liu Signed-off-by: Andrew Morton --- drivers/hv/hv_balloon.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index fec2f18679e3..2b4080e51f97 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1192,6 +1192,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm, __ClearPageOffline(pg); __free_page(pg); dm->num_pages_ballooned--; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, -1); adjust_managed_page_count(pg, 1); } } @@ -1221,6 +1222,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, return i * alloc_unit; dm->num_pages_ballooned += alloc_unit; + mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, alloc_unit); /* * If we allocatted 2M pages; split them so we From f6a09e6800936c6c9ba5667ac3efc18feb8f3a2f Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Fri, 14 Mar 2025 15:37:57 -0600 Subject: [PATCH 416/431] xen: balloon: update the NR_BALLOON_PAGES state Update the NR_BALLOON_PAGES counter when pages are added to or removed from the Xen balloon. Link: https://lkml.kernel.org/r/20250314213757.244258-5-npache@redhat.com Signed-off-by: Nico Pache Reviewed-by: Juergen Gross Cc: Alexander Atanasov Cc: Chengming Zhou Cc: David Hildenbrand Cc: Dexuan Cui Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Juegren Gross Cc: Kanchana P Sridhar Cc: K. Y. Srinivasan Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Muchun Song Cc: Nhat Pham Cc: Oleksandr Tyshchenko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Stefano Stabellini Cc: Wei Liu Cc: Michael Kelley Signed-off-by: Andrew Morton --- drivers/xen/balloon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 163f7f1d70f1..65d4e7fa1eb8 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -157,6 +157,8 @@ static void balloon_append(struct page *page) list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } + inc_node_page_state(page, NR_BALLOON_PAGES); + wake_up(&balloon_wq); } @@ -179,6 +181,8 @@ static struct page *balloon_retrieve(bool require_lowmem) balloon_stats.balloon_low--; __ClearPageOffline(page); + dec_node_page_state(page, NR_BALLOON_PAGES); + return page; } From 9f171d94be80d80067c6445e53b00d098150391e Mon Sep 17 00:00:00 2001 From: Jiwen Qi Date: Sat, 15 Mar 2025 21:13:17 +0000 Subject: [PATCH 417/431] docs/mm: Physical Memory: Populate the "Zones" section Briefly describe what zones are and the fields of struct zone. Link: https://lkml.kernel.org/r/20250315211317.27612-1-jiwen7.qi@gmail.com Signed-off-by: Jiwen Qi Acked-by: Mike Rapoport (Microsoft) Cc: Bagas Sanjaya Cc: Jonathan Corbet Cc: Randy Dunlap Signed-off-by: Andrew Morton --- Documentation/mm/physical_memory.rst | 266 ++++++++++++++++++++++++++- 1 file changed, 264 insertions(+), 2 deletions(-) diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst index 71fd4a6acf42..d3ac106e6b14 100644 --- a/Documentation/mm/physical_memory.rst +++ b/Documentation/mm/physical_memory.rst @@ -338,10 +338,272 @@ Statistics Zones ===== +As we have mentioned, each zone in memory is described by a ``struct zone`` +which is an element of the ``node_zones`` array of the node it belongs to. +``struct zone`` is the core data structure of the page allocator. A zone +represents a range of physical memory and may have holes. -.. admonition:: Stub +The page allocator uses the GFP flags, see :ref:`mm-api-gfp-flags`, specified by +a memory allocation to determine the highest zone in a node from which the +memory allocation can allocate memory. The page allocator first allocates memory +from that zone, if the page allocator can't allocate the requested amount of +memory from the zone, it will allocate memory from the next lower zone in the +node, the process continues up to and including the lowest zone. For example, if +a node contains ``ZONE_DMA32``, ``ZONE_NORMAL`` and ``ZONE_MOVABLE`` and the +highest zone of a memory allocation is ``ZONE_MOVABLE``, the order of the zones +from which the page allocator allocates memory is ``ZONE_MOVABLE`` > +``ZONE_NORMAL`` > ``ZONE_DMA32``. - This section is incomplete. Please list and describe the appropriate fields. +At runtime, free pages in a zone are in the Per-CPU Pagesets (PCP) or free areas +of the zone. The Per-CPU Pagesets are a vital mechanism in the kernel's memory +management system. By handling most frequent allocations and frees locally on +each CPU, the Per-CPU Pagesets improve performance and scalability, especially +on systems with many cores. The page allocator in the kernel employs a two-step +strategy for memory allocation, starting with the Per-CPU Pagesets before +falling back to the buddy allocator. Pages are transferred between the Per-CPU +Pagesets and the global free areas (managed by the buddy allocator) in batches. +This minimizes the overhead of frequent interactions with the global buddy +allocator. + +Architecture specific code calls free_area_init() to initializes zones. + +Zone structure +-------------- +The zones structure ``struct zone`` is defined in ``include/linux/mmzone.h``. +Here we briefly describe fields of this structure: + +General +~~~~~~~ + +``_watermark`` + The watermarks for this zone. When the amount of free pages in a zone is below + the min watermark, boosting is ignored, an allocation may trigger direct + reclaim and direct compaction, it is also used to throttle direct reclaim. + When the amount of free pages in a zone is below the low watermark, kswapd is + woken up. When the amount of free pages in a zone is above the high watermark, + kswapd stops reclaiming (a zone is balanced) when the + ``NUMA_BALANCING_MEMORY_TIERING`` bit of ``sysctl_numa_balancing_mode`` is not + set. The promo watermark is used for memory tiering and NUMA balancing. When + the amount of free pages in a zone is above the promo watermark, kswapd stops + reclaiming when the ``NUMA_BALANCING_MEMORY_TIERING`` bit of + ``sysctl_numa_balancing_mode`` is set. The watermarks are set by + ``__setup_per_zone_wmarks()``. The min watermark is calculated according to + ``vm.min_free_kbytes`` sysctl. The other three watermarks are set according + to the distance between two watermarks. The distance itself is calculated + taking ``vm.watermark_scale_factor`` sysctl into account. + +``watermark_boost`` + The number of pages which are used to boost watermarks to increase reclaim + pressure to reduce the likelihood of future fallbacks and wake kswapd now + as the node may be balanced overall and kswapd will not wake naturally. + +``nr_reserved_highatomic`` + The number of pages which are reserved for high-order atomic allocations. + +``nr_free_highatomic`` + The number of free pages in reserved highatomic pageblocks + +``lowmem_reserve`` + The array of the amounts of the memory reserved in this zone for memory + allocations. For example, if the highest zone a memory allocation can + allocate memory from is ``ZONE_MOVABLE``, the amount of memory reserved in + this zone for this allocation is ``lowmem_reserve[ZONE_MOVABLE]`` when + attempting to allocate memory from this zone. This is a mechanism the page + allocator uses to prevent allocations which could use ``highmem`` from using + too much ``lowmem``. For some specialised workloads on ``highmem`` machines, + it is dangerous for the kernel to allow process memory to be allocated from + the ``lowmem`` zone. This is because that memory could then be pinned via the + ``mlock()`` system call, or by unavailability of swapspace. + ``vm.lowmem_reserve_ratio`` sysctl determines how aggressive the kernel is in + defending these lower zones. This array is recalculated by + ``setup_per_zone_lowmem_reserve()`` at runtime if ``vm.lowmem_reserve_ratio`` + sysctl changes. + +``node`` + The index of the node this zone belongs to. Available only when + ``CONFIG_NUMA`` is enabled because there is only one zone in a UMA system. + +``zone_pgdat`` + Pointer to the ``struct pglist_data`` of the node this zone belongs to. + +``per_cpu_pageset`` + Pointer to the Per-CPU Pagesets (PCP) allocated and initialized by + ``setup_zone_pageset()``. By handling most frequent allocations and frees + locally on each CPU, PCP improves performance and scalability on systems with + many cores. + +``pageset_high_min`` + Copied to the ``high_min`` of the Per-CPU Pagesets for faster access. + +``pageset_high_max`` + Copied to the ``high_max`` of the Per-CPU Pagesets for faster access. + +``pageset_batch`` + Copied to the ``batch`` of the Per-CPU Pagesets for faster access. The + ``batch``, ``high_min`` and ``high_max`` of the Per-CPU Pagesets are used to + calculate the number of elements the Per-CPU Pagesets obtain from the buddy + allocator under a single hold of the lock for efficiency. They are also used + to decide if the Per-CPU Pagesets return pages to the buddy allocator in page + free process. + +``pageblock_flags`` + The pointer to the flags for the pageblocks in the zone (see + ``include/linux/pageblock-flags.h`` for flags list). The memory is allocated + in ``setup_usemap()``. Each pageblock occupies ``NR_PAGEBLOCK_BITS`` bits. + Defined only when ``CONFIG_FLATMEM`` is enabled. The flags is stored in + ``mem_section`` when ``CONFIG_SPARSEMEM`` is enabled. + +``zone_start_pfn`` + The start pfn of the zone. It is initialized by + ``calculate_node_totalpages()``. + +``managed_pages`` + The present pages managed by the buddy system, which is calculated as: + ``managed_pages`` = ``present_pages`` - ``reserved_pages``, ``reserved_pages`` + includes pages allocated by the memblock allocator. It should be used by page + allocator and vm scanner to calculate all kinds of watermarks and thresholds. + It is accessed using ``atomic_long_xxx()`` functions. It is initialized in + ``free_area_init_core()`` and then is reinitialized when memblock allocator + frees pages into buddy system. + +``spanned_pages`` + The total pages spanned by the zone, including holes, which is calculated as: + ``spanned_pages`` = ``zone_end_pfn`` - ``zone_start_pfn``. It is initialized + by ``calculate_node_totalpages()``. + +``present_pages`` + The physical pages existing within the zone, which is calculated as: + ``present_pages`` = ``spanned_pages`` - ``absent_pages`` (pages in holes). It + may be used by memory hotplug or memory power management logic to figure out + unmanaged pages by checking (``present_pages`` - ``managed_pages``). Write + access to ``present_pages`` at runtime should be protected by + ``mem_hotplug_begin/done()``. Any reader who can't tolerant drift of + ``present_pages`` should use ``get_online_mems()`` to get a stable value. It + is initialized by ``calculate_node_totalpages()``. + +``present_early_pages`` + The present pages existing within the zone located on memory available since + early boot, excluding hotplugged memory. Defined only when + ``CONFIG_MEMORY_HOTPLUG`` is enabled and initialized by + ``calculate_node_totalpages()``. + +``cma_pages`` + The pages reserved for CMA use. These pages behave like ``ZONE_MOVABLE`` when + they are not used for CMA. Defined only when ``CONFIG_CMA`` is enabled. + +``name`` + The name of the zone. It is a pointer to the corresponding element of + the ``zone_names`` array. + +``nr_isolate_pageblock`` + Number of isolated pageblocks. It is used to solve incorrect freepage counting + problem due to racy retrieving migratetype of pageblock. Protected by + ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled. + +``span_seqlock`` + The seqlock to protect ``zone_start_pfn`` and ``spanned_pages``. It is a + seqlock because it has to be read outside of ``zone->lock``, and it is done in + the main allocator path. However, the seqlock is written quite infrequently. + Defined only when ``CONFIG_MEMORY_HOTPLUG`` is enabled. + +``initialized`` + The flag indicating if the zone is initialized. Set by + ``init_currently_empty_zone()`` during boot. + +``free_area`` + The array of free areas, where each element corresponds to a specific order + which is a power of two. The buddy allocator uses this structure to manage + free memory efficiently. When allocating, it tries to find the smallest + sufficient block, if the smallest sufficient block is larger than the + requested size, it will be recursively split into the next smaller blocks + until the required size is reached. When a page is freed, it may be merged + with its buddy to form a larger block. It is initialized by + ``zone_init_free_lists()``. + +``unaccepted_pages`` + The list of pages to be accepted. All pages on the list are ``MAX_PAGE_ORDER``. + Defined only when ``CONFIG_UNACCEPTED_MEMORY`` is enabled. + +``flags`` + The zone flags. The least three bits are used and defined by + ``enum zone_flags``. ``ZONE_BOOSTED_WATERMARK`` (bit 0): zone recently boosted + watermarks. Cleared when kswapd is woken. ``ZONE_RECLAIM_ACTIVE`` (bit 1): + kswapd may be scanning the zone. ``ZONE_BELOW_HIGH`` (bit 2): zone is below + high watermark. + +``lock`` + The main lock that protects the internal data structures of the page allocator + specific to the zone, especially protects ``free_area``. + +``percpu_drift_mark`` + When free pages are below this point, additional steps are taken when reading + the number of free pages to avoid per-cpu counter drift allowing watermarks + to be breached. It is updated in ``refresh_zone_stat_thresholds()``. + +Compaction control +~~~~~~~~~~~~~~~~~~ + +``compact_cached_free_pfn`` + The PFN where compaction free scanner should start in the next scan. + +``compact_cached_migrate_pfn`` + The PFNs where compaction migration scanner should start in the next scan. + This array has two elements: the first one is used in ``MIGRATE_ASYNC`` mode, + and the other one is used in ``MIGRATE_SYNC`` mode. + +``compact_init_migrate_pfn`` + The initial migration PFN which is initialized to 0 at boot time, and to the + first pageblock with migratable pages in the zone after a full compaction + finishes. It is used to check if a scan is a whole zone scan or not. + +``compact_init_free_pfn`` + The initial free PFN which is initialized to 0 at boot time and to the last + pageblock with free ``MIGRATE_MOVABLE`` pages in the zone. It is used to check + if it is the start of a scan. + +``compact_considered`` + The number of compactions attempted since last failure. It is reset in + ``defer_compaction()`` when a compaction fails to result in a page allocation + success. It is increased by 1 in ``compaction_deferred()`` when a compaction + should be skipped. ``compaction_deferred()`` is called before + ``compact_zone()`` is called, ``compaction_defer_reset()`` is called when + ``compact_zone()`` returns ``COMPACT_SUCCESS``, ``defer_compaction()`` is + called when ``compact_zone()`` returns ``COMPACT_PARTIAL_SKIPPED`` or + ``COMPACT_COMPLETE``. + +``compact_defer_shift`` + The number of compactions skipped before trying again is + ``1< Date: Mon, 17 Mar 2025 17:36:14 +0100 Subject: [PATCH 418/431] fork: use __vmalloc_node() for stack allocation Replace __vmalloc_node_range() by __vmalloc_node(). The last variant requires less parameters and it uses exactly the same arguments which are partly now hidden inside __vmalloc_node(). This change does not change any functionality. It makes the code a bit simpler. Link: https://lkml.kernel.org/r/20250317163614.166502-1-urezki@gmail.com Signed-off-by: Uladzislau Rezki (Sony) Acked-by: Michal Hocko Cc: Christian Brauner Cc: Oleg Nesterov Cc: Tejun Heo Signed-off-by: Andrew Morton --- kernel/fork.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index f9cf0f056eb6..83cb82643817 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -311,11 +311,9 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node) * so memcg accounting is performed manually on assigning/releasing * stacks to tasks. Drop __GFP_ACCOUNT. */ - stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, - VMALLOC_START, VMALLOC_END, + stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP & ~__GFP_ACCOUNT, - PAGE_KERNEL, - 0, node, __builtin_return_address(0)); + node, __builtin_return_address(0)); if (!stack) return -ENOMEM; From d8a866c766ebe34b4371d42f4a3edca200f5e645 Mon Sep 17 00:00:00 2001 From: Brendan Jackman Date: Mon, 17 Mar 2025 10:20:34 +0000 Subject: [PATCH 419/431] selftests/mm: add commentary about 9pfs bugs As discussed here: https://lore.kernel.org/lkml/Z9RRkL1hom48z3Tt@google.com/ This code could benefit from some more commentary. To avoid needing to comment the same thing in multiple places (I guess more of these SKIPs will need to be added over time, for now I am only like 20% of the way through Project Run run_vmtests.sh Successfully), add a dummy "skip tests for this specific reason" function that basically just serves as a hook to hang comments on. Link: https://lkml.kernel.org/r/20250317-9pfs-comments-v1-1-9ac96043e146@google.com Signed-off-by: Brendan Jackman Cc: David Hildenbrand Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/gup_longterm.c | 6 +----- tools/testing/selftests/mm/map_populate.c | 8 +++----- tools/testing/selftests/mm/vm_util.h | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 03271442aae5..21595b20bbc3 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -97,11 +97,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared) if (ftruncate(fd, size)) { if (errno == ENOENT) { - /* - * This can happen if the file has been unlinked and the - * filesystem doesn't support truncating unlinked files. - */ - ksft_test_result_skip("ftruncate() failed with ENOENT\n"); + skip_test_dodgy_fs("ftruncate()"); } else { ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno)); } diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c index 433e54fb634f..9df2636c829b 100644 --- a/tools/testing/selftests/mm/map_populate.c +++ b/tools/testing/selftests/mm/map_populate.c @@ -18,6 +18,8 @@ #include #include "../kselftest.h" +#include "vm_util.h" + #define MMAP_SZ 4096 #define BUG_ON(condition, description) \ @@ -88,11 +90,7 @@ int main(int argc, char **argv) ret = ftruncate(fileno(ftmp), MMAP_SZ); if (ret < 0 && errno == ENOENT) { - /* - * This probably means tmpfile() made a file on a filesystem - * that doesn't handle temporary files the way we want. - */ - ksft_exit_skip("ftruncate(fileno(tmpfile())) gave ENOENT, weird filesystem?\n"); + skip_test_dodgy_fs("ftruncate()"); } BUG_ON(ret, "ftruncate()"); diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 0e629586556b..6effafdc4d8a 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -5,6 +5,7 @@ #include #include /* ffsl() */ #include /* _SC_PAGESIZE */ +#include "../kselftest.h" #define BIT_ULL(nr) (1ULL << (nr)) #define PM_SOFT_DIRTY BIT_ULL(55) @@ -32,6 +33,23 @@ static inline unsigned int pshift(void) return __page_shift; } +/* + * Plan 9 FS has bugs (at least on QEMU) where certain operations fail with + * ENOENT on unlinked files. See + * https://gitlab.com/qemu-project/qemu/-/issues/103 for some info about such + * bugs. There are rumours of NFS implementations with similar bugs. + * + * Ideally, tests should just detect filesystems known to have such issues and + * bail early. But 9pfs has the additional "feature" that it causes fstatfs to + * pass through the f_type field from the host filesystem. To avoid having to + * scrape /proc/mounts or some other hackery, tests can call this function when + * it seems such a bug might have been encountered. + */ +static inline void skip_test_dodgy_fs(const char *op_name) +{ + ksft_test_result_skip("%s failed with ENOENT. Filesystem might be buggy (9pfs?)\n", op_name); +} + uint64_t pagemap_get_entry(int fd, char *start); bool pagemap_is_softdirty(int fd, char *start); bool pagemap_is_swapped(int fd, char *start); From 0bfd4586855cf6919d025b5914be212fd4cd27b5 Mon Sep 17 00:00:00 2001 From: Nico Pache Date: Mon, 17 Mar 2025 17:04:03 -0600 Subject: [PATCH 420/431] MM documentation: add "Unaccepted" meminfo entry Commit dcdfdd40fa82 ("mm: Add support for unaccepted memory") added a entry to meminfo but did not document it in the proc.rst file. This counter tracks the amount of "Unaccepted" guest memory for some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP. Add the missing entry in the documentation. Link: https://lkml.kernel.org/r/20250317230403.79632-1-npache@redhat.com Signed-off-by: Nico Pache Acked-by: Kirill A. Shutemov Acked-by: David Hildenbrand Cc: Andrii Nakryiko Cc: Catalin Marinas Cc: Jeff Xu Cc: Jonathan Corbet Cc: Pasha Tatashin Cc: Suren Baghdasaryan Cc: xu xin Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 80f33bce5fe1..f97692b31a2d 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1081,6 +1081,7 @@ Example output. You may not have all of these fields. FilePmdMapped: 0 kB CmaTotal: 0 kB CmaFree: 0 kB + Unaccepted: 0 kB Balloon: 0 kB HugePages_Total: 0 HugePages_Free: 0 @@ -1256,6 +1257,8 @@ CmaTotal Memory reserved for the Contiguous Memory Allocator (CMA) CmaFree Free remaining memory in the CMA reserves +Unaccepted + Memory that has not been accepted by the guest Balloon Memory returned to Host by VM Balloon Drivers HugePages_Total, HugePages_Free, HugePages_Rsvd, HugePages_Surp, Hugepagesize, Hugetlb From 98c183a4fccf3b855fa64005a8b6d892570dfd66 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Tue, 18 Mar 2025 18:33:01 -0700 Subject: [PATCH 421/431] fs/dax: don't disassociate zero page entries Prior to commit 38607c62b34b ("fs/dax: properly refcount fs dax pages") dax_associate_entry() and dax_disassociate_entry() would implicitly skip zero and empty dax entries using the for_each_mapped_pfn() macro. The use of compound ZONE_DEVICE folios removed the need for this macro and so it was removed, leading dax_folio_put() to be called on zero pages. This lead to the below warning. To fix this explicitly skip zero and empty entries in dax_associate/disassociate_entry(). [ 27.536963] ------------[ cut here ]------------ [ 27.537674] WARNING: CPU: 11 PID: 874 at fs/dax.c:415 dax_folio_put.isra.0+0x10d/0x170 [ 27.538844] Modules linked in: nd_pmem nd_btt nd_e820 libnvdimm [ 27.539732] CPU: 11 UID: 0 PID: 874 Comm: ctl_prefault Tainted: G W 6.14.0-rc2+ #1104 [ 27.541093] Tainted: [W]=WARN [ 27.541549] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/204 [ 27.543197] RIP: 0010:dax_folio_put.isra.0+0x10d/0x170 [ 27.543970] Code: 20 48 85 c0 0f 84 29 ff ff ff 48 83 e8 01 48 89 47 20 0f 84 1b ff ff ff 48 83 c4 10 5b 5d 41 5c c3 cc cc4 [ 27.546723] RSP: 0000:ffff961e4102fae0 EFLAGS: 00010002 [ 27.547505] RAX: 0000000000000001 RBX: ffffc9cce4e18000 RCX: 0000000000000009 [ 27.548564] RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8a2a7badca40 [ 27.549630] RBP: ffffc9cce4e18000 R08: 0000000000009ffb R09: 00000000ffffdfff [ 27.550691] R10: 00000000ffffdfff R11: ffffffffa4e823a0 R12: 0000000000000000 [ 27.551748] R13: 0000000000000000 R14: 0000000010f10005 R15: 0000000000000004 [ 27.552819] FS: 00007f5f539d74c0(0000) GS:ffff8a2a7bac0000(0000) knlGS:0000000000000000 [ 27.554015] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 27.554873] CR2: 00007f5f52e00000 CR3: 0000000909340000 CR4: 00000000000006f0 [ 27.555938] Call Trace: [ 27.556318] [ 27.556650] ? __warn+0x91/0x190 [ 27.557146] ? dax_folio_put.isra.0+0x10d/0x170 [ 27.557824] ? report_bug+0x164/0x190 [ 27.558378] ? handle_bug+0x54/0x90 [ 27.558898] ? exc_invalid_op+0x17/0x70 [ 27.559489] ? asm_exc_invalid_op+0x1a/0x20 [ 27.560125] ? dax_folio_put.isra.0+0x10d/0x170 [ 27.560808] dax_insert_entry+0x1e1/0x420 [ 27.561419] dax_fault_iter+0x252/0x860 [ 27.561995] dax_iomap_pmd_fault+0x23c/0x4a0 [ 27.562651] ext4_dax_huge_fault+0x1e2/0x450 [ 27.563296] __handle_mm_fault+0x6c8/0x12b0 [ 27.563920] ? do_user_addr_fault+0x1ca/0x670 [ 27.564577] ? lock_vma_under_rcu+0x178/0x3b0 [ 27.565235] handle_mm_fault+0xe5/0x290 [ 27.565816] do_user_addr_fault+0x208/0x670 [ 27.566446] exc_page_fault+0x6d/0x230 [ 27.567008] asm_exc_page_fault+0x26/0x30 [ 27.567610] RIP: 0033:0x7f5f543bcb4f [ 27.568152] Code: 45 f0 48 8b 45 f0 48 8b 4d f8 48 03 41 18 48 89 45 e8 48 8b 45 f0 48 3b 45 e8 0f 83 97 00 00 00 48 8b 458 [ 27.570895] RSP: 002b:00007ffc2d774460 EFLAGS: 00010287 [ 27.571672] RAX: 00007f5f52e00000 RBX: 0000000000200000 RCX: 000055760153fc00 [ 27.572731] RDX: 0000000000000000 RSI: 0000557601542a20 RDI: 000055760153fc00 [ 27.573787] RBP: 00007ffc2d774460 R08: 0000000000000000 R09: 0000000000000073 [ 27.574840] R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffc2d77534b [ 27.575897] R13: 00007ffc2d774aa0 R14: 0000000000800000 R15: 0000000000800000 [ 27.576961] [ 27.577301] irq event stamp: 13394 [ 27.577810] hardirqs last enabled at (13393): [] flush_tlb_mm_range+0x1c0/0x220 [ 27.579138] hardirqs last disabled at (13394): [] _raw_spin_lock_irq+0x47/0x50 [ 27.580428] softirqs last enabled at (12530): [] xs_tcp_send_request+0x22a/0x2e0 [ 27.581762] softirqs last disabled at (12528): [] release_sock+0x1d/0xb0 [ 27.582986] ---[ end trace 0000000000000000 ]--- Link: https://lkml.kernel.org/r/20250319013301.369822-1-apopple@nvidia.com Signed-off-by: Alistair Popple Fixes: 38607c62b34b ("fs/dax: properly refcount fs dax pages") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202503102229.122fbd6c-lkp@intel.com Cc: Dan Williams Cc: Alison Schofield Cc: David Hildenbrand Cc: Balbir Singh Cc: "Darrick J. Wong" Cc: Dave Hansen Cc: Jason Gunthorpe Cc: John Hubbard Signed-off-by: Andrew Morton --- fs/dax.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index cf96f3dd4e5f..464c2badc135 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -445,6 +445,9 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, unsigned long size = dax_entry_size(entry), index; struct folio *folio = dax_to_folio(entry); + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return; + if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; @@ -473,6 +476,9 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping, if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return; + dax_folio_put(folio); } @@ -1074,8 +1080,7 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, void *old; dax_disassociate_entry(entry, mapping, false); - if (!(flags & DAX_ZERO_PAGE)) - dax_associate_entry(new_entry, mapping, vmf->vma, + dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, shared); /* From 3b23a44f1f196741596616082e759f7f3a400e78 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 18 Mar 2025 11:30:28 -0700 Subject: [PATCH 422/431] mm/damon: implement a new DAMOS filter type for active pages Patch series "mm/damon: introduce DAMOS filter type for active pages". The memory reclaim algorithm categorizes pages into active and inactive lists, separately for file and anon pages. The system's performance relies heavily on the (relative and absolute) accuracy of this categorization. This patch series add a new DAMOS filter for pages' activeness, giving us visibility into the access frequency of the pages on each list. This insight can help us diagnose issues with the active-inactive balancing dynamics, and make decisions to optimize reclaim efficiency and memory utilization. For instance, we might decide to enable DAMON_LRU_SORT, if we find that there are pages on the active list that are infrequently accessed, or less frequently accessed than pages on the inactive list. This patch (of 2): Implement a DAMOS filter type for active pages on DAMON kernel API, and add support of it from the physical address space DAMON operations set (paddr). Link: https://lkml.kernel.org/r/20250318183029.2062917-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20250318183029.2062917-2-nphamcs@gmail.com Signed-off-by: Nhat Pham Suggested-by: SeongJae Park Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/damon.h | 2 ++ mm/damon/paddr.c | 3 +++ mm/damon/sysfs-schemes.c | 1 + 3 files changed, 6 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 3db4f77261f5..47e36e6ea203 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -334,6 +334,7 @@ struct damos_stat { /** * enum damos_filter_type - Type of memory for &struct damos_filter * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. + * @DAMOS_FILTER_TYPE_ACTIVE: Active pages. * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages. * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage. @@ -355,6 +356,7 @@ struct damos_stat { */ enum damos_filter_type { DAMOS_FILTER_TYPE_ANON, + DAMOS_FILTER_TYPE_ACTIVE, DAMOS_FILTER_TYPE_MEMCG, DAMOS_FILTER_TYPE_YOUNG, DAMOS_FILTER_TYPE_HUGEPAGE_SIZE, diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index b08847ef9b81..1b70d3f36046 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -217,6 +217,9 @@ static bool damos_pa_filter_match(struct damos_filter *filter, case DAMOS_FILTER_TYPE_ANON: matched = folio_test_anon(folio); break; + case DAMOS_FILTER_TYPE_ACTIVE: + matched = folio_test_active(folio); + break; case DAMOS_FILTER_TYPE_MEMCG: rcu_read_lock(); memcg = folio_memcg_check(folio); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 5023f2b690d6..23b562df0839 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -344,6 +344,7 @@ static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc( /* Should match with enum damos_filter_type */ static const char * const damon_sysfs_scheme_filter_type_strs[] = { "anon", + "active", "memcg", "young", "hugepage_size", From af96c610c6fdcd3a765f8a158293fe208a205ac2 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 18 Mar 2025 11:30:29 -0700 Subject: [PATCH 423/431] docs/mm/damon/design: document active DAMOS filter type Document availability and meaning of "active" DAMOS filter type on design document. Since introduction of the type requires no additional user ABI, usage and ABI document need no update. Link: https://lkml.kernel.org/r/20250318183029.2062917-3-nphamcs@gmail.com Signed-off-by: Nhat Pham Suggested-by: SeongJae Park Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index aae3a691ee69..f12d33749329 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -656,6 +656,8 @@ Below ``type`` of filters are currently supported. - Operations layer handled, supported by only ``paddr`` operations set. - anon - Applied to pages that containing data that not stored in files. + - active + - Applied to active pages. - memcg - Applied to pages that belonging to a given cgroup. - young From 735b3f7e773bd09d459537562754debd1f8e816b Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 18 Mar 2025 17:43:40 +0000 Subject: [PATCH 424/431] selftests/mm: uffd-unit-tests support for hugepages > 2M uffd-unit-tests uses a memory area with a fixed 32M size. Then it calculates the number of pages by dividing by page_size, which itself is either the base page size or the PMD huge page size depending on the test config. For the latter, we end up with nr_pages=1 for arm64 16K base pages, and nr_pages=0 for 64K base pages. This doesn't end well. So let's make the 32M size a floor and also ensure that we have at least 2 pages given the PMD size. With this change, the tests pass on arm64 64K base page size configuration. Link: https://lkml.kernel.org/r/20250318174343.243631-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Peter Xu Acked-by: Rafael Aquini Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-unit-tests.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 24ea82ee2231..e8fd9011c2a3 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -26,6 +26,8 @@ #define ALIGN_UP(x, align_to) \ ((__typeof__(x))((((unsigned long)(x)) + ((align_to)-1)) & ~((align_to)-1))) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + struct mem_type { const char *name; unsigned int mem_flag; @@ -196,7 +198,8 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test, else page_size = psize(); - nr_pages = UFFD_TEST_MEM_SIZE / page_size; + /* Ensure we have at least 2 pages */ + nr_pages = MAX(UFFD_TEST_MEM_SIZE, page_size * 2) / page_size; /* TODO: remove this global var.. it's so ugly */ nr_parallel = 1; From a2c6f9c3cafac02a48db83714f4b62fee2508bc3 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 18 Mar 2025 17:43:41 +0000 Subject: [PATCH 425/431] selftests/mm: speed up split_huge_page_test create_pagecache_thp_and_fd() was previously writing a file sized at twice the PMD size by making a per-byte write syscall. This was quite slow when the PMD size is 4M, but completely intolerable for 32M (PMD size for arm64's 16K page size), and 512M (PMD size for arm64's 64K page size). The byte pattern has a 256 byte period, so let's create a 1K buffer and fill it with exactly 4 periods. Then we can write the buffer as many times as is required to fill the file. This makes things much more tolerable. The test now passes for 16K page size. It still fails for 64K page size because MAX_PAGECACHE_ORDER is too small for 512M folio size (I think). Link: https://lkml.kernel.org/r/20250318174343.243631-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Peter Xu Acked-by: Rafael Aquini Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/split_huge_page_test.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 719c5e2a6624..aa7400ed0e99 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -5,6 +5,7 @@ */ #define _GNU_SOURCE +#include #include #include #include @@ -398,6 +399,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, { size_t i; int dummy = 0; + unsigned char buf[1024]; srand(time(NULL)); @@ -405,11 +407,12 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, if (*fd == -1) ksft_exit_fail_msg("Failed to create a file at %s\n", testfile); - for (i = 0; i < fd_size; i++) { - unsigned char byte = (unsigned char)i; + assert(fd_size % sizeof(buf) == 0); + for (i = 0; i < sizeof(buf); i++) + buf[i] = (unsigned char)i; + for (i = 0; i < fd_size; i += sizeof(buf)) + write(*fd, buf, sizeof(buf)); - write(*fd, &byte, sizeof(byte)); - } close(*fd); sync(); *fd = open("/proc/sys/vm/drop_caches", O_WRONLY); From e452872b40e3f1fb92adf0d573a0a6a7c9f6ce22 Mon Sep 17 00:00:00 2001 From: Hao Jia Date: Tue, 18 Mar 2025 15:58:32 +0800 Subject: [PATCH 426/431] mm: vmscan: split proactive reclaim statistics from direct reclaim statistics MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Adding Proactive Memory Reclaim Statistics". These two patches are related to proactive memory reclaim. Patch 1 Split proactive reclaim statistics from direct reclaim counters and introduces new counters: pgsteal_proactive, pgdemote_proactive, and pgscan_proactive. Patch 2 Adds pswpin and pswpout items to the cgroup-v2 documentation. This patch (of 2): In proactive memory reclaim scenarios, it is necessary to accurately track proactive reclaim statistics to dynamically adjust the frequency and amount of memory being reclaimed proactively. Currently, proactive reclaim is included in direct reclaim statistics, which can make these direct reclaim statistics misleading. Therefore, separate proactive reclaim memory from the direct reclaim counters by introducing new counters: pgsteal_proactive, pgdemote_proactive, and pgscan_proactive, to avoid confusion with direct reclaim. Link: https://lkml.kernel.org/r/20250318075833.90615-1-jiahao.kernel@gmail.com Link: https://lkml.kernel.org/r/20250318075833.90615-2-jiahao.kernel@gmail.com Signed-off-by: Hao Jia Acked-by: Johannes Weiner Cc: Jonathan Corbet Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Cc: Tejun Heo Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v2.rst | 9 +++++++ include/linux/mmzone.h | 1 + include/linux/vm_event_item.h | 2 ++ mm/memcontrol.c | 7 +++++ mm/vmscan.c | 35 ++++++++++++++----------- mm/vmstat.c | 3 +++ 6 files changed, 42 insertions(+), 15 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f8a894a16307..d7624e500610 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1576,6 +1576,9 @@ The following nested keys are defined. pgscan_khugepaged (npn) Amount of scanned pages by khugepaged (in an inactive LRU list) + pgscan_proactive (npn) + Amount of scanned pages proactively (in an inactive LRU list) + pgsteal_kswapd (npn) Amount of reclaimed pages by kswapd @@ -1585,6 +1588,9 @@ The following nested keys are defined. pgsteal_khugepaged (npn) Amount of reclaimed pages by khugepaged + pgsteal_proactive (npn) + Amount of reclaimed pages proactively + pgfault (npn) Total number of page faults incurred @@ -1662,6 +1668,9 @@ The following nested keys are defined. pgdemote_khugepaged Number of pages demoted by khugepaged. + pgdemote_proactive + Number of pages demoted by proactively. + hugetlb Amount of memory used by hugetlb pages. This metric only shows up if hugetlb usage is accounted for in memory.current (i.e. diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a9db0fbd2b94..f7fe1126dc75 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -221,6 +221,7 @@ enum node_stat_item { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + PGDEMOTE_PROACTIVE, #ifdef CONFIG_HUGETLB_PAGE NR_HUGETLB, #endif diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index f70d0958095c..f11b6fa9c5b3 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -41,9 +41,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSTEAL_KHUGEPAGED, + PGSTEAL_PROACTIVE, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, + PGSCAN_PROACTIVE, PGSCAN_DIRECT_THROTTLE, PGSCAN_ANON, PGSCAN_FILE, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 57cf5a6c279c..40c07b8699ae 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -315,6 +315,7 @@ static const unsigned int memcg_node_stat_items[] = { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + PGDEMOTE_PROACTIVE, #ifdef CONFIG_HUGETLB_PAGE NR_HUGETLB, #endif @@ -431,9 +432,11 @@ static const unsigned int memcg_vm_event_stat[] = { PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, + PGSCAN_PROACTIVE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSTEAL_KHUGEPAGED, + PGSTEAL_PROACTIVE, PGFAULT, PGMAJFAULT, PGREFILL, @@ -1394,6 +1397,7 @@ static const struct memory_stat memory_stats[] = { { "pgdemote_kswapd", PGDEMOTE_KSWAPD }, { "pgdemote_direct", PGDEMOTE_DIRECT }, { "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED }, + { "pgdemote_proactive", PGDEMOTE_PROACTIVE }, #ifdef CONFIG_NUMA_BALANCING { "pgpromote_success", PGPROMOTE_SUCCESS }, #endif @@ -1436,6 +1440,7 @@ static int memcg_page_state_output_unit(int item) case PGDEMOTE_KSWAPD: case PGDEMOTE_DIRECT: case PGDEMOTE_KHUGEPAGED: + case PGDEMOTE_PROACTIVE: #ifdef CONFIG_NUMA_BALANCING case PGPROMOTE_SUCCESS: #endif @@ -1509,10 +1514,12 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) seq_buf_printf(s, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + memcg_events(memcg, PGSCAN_DIRECT) + + memcg_events(memcg, PGSCAN_PROACTIVE) + memcg_events(memcg, PGSCAN_KHUGEPAGED)); seq_buf_printf(s, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + memcg_events(memcg, PGSTEAL_DIRECT) + + memcg_events(memcg, PGSTEAL_PROACTIVE) + memcg_events(memcg, PGSTEAL_KHUGEPAGED)); for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { diff --git a/mm/vmscan.c b/mm/vmscan.c index b5c7dfc2b189..98e6ac82e428 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -456,21 +456,26 @@ void drop_slab(void) } while ((freed >> shift++) > 1); } -static int reclaimer_offset(void) +#define CHECK_RECLAIMER_OFFSET(type) \ + do { \ + BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \ + PGDEMOTE_##type - PGDEMOTE_KSWAPD); \ + BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \ + PGSCAN_##type - PGSCAN_KSWAPD); \ + } while (0) + +static int reclaimer_offset(struct scan_control *sc) { - BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != - PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); - BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != - PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); - BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != - PGSCAN_DIRECT - PGSCAN_KSWAPD); - BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != - PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); + CHECK_RECLAIMER_OFFSET(DIRECT); + CHECK_RECLAIMER_OFFSET(KHUGEPAGED); + CHECK_RECLAIMER_OFFSET(PROACTIVE); if (current_is_kswapd()) return 0; if (current_is_khugepaged()) return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; + if (sc->proactive) + return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD; return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; } @@ -2008,7 +2013,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, &nr_scanned, sc, lru); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); - item = PGSCAN_KSWAPD + reclaimer_offset(); + item = PGSCAN_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_scanned); __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); @@ -2024,10 +2029,10 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, spin_lock_irq(&lruvec->lru_lock); move_folios_to_lru(lruvec, &folio_list); - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), + __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); - item = PGSTEAL_KSWAPD + reclaimer_offset(); + item = PGSTEAL_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_reclaimed); __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); @@ -4571,7 +4576,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, break; } - item = PGSCAN_KSWAPD + reclaimer_offset(); + item = PGSCAN_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) { __count_vm_events(item, isolated); __count_vm_events(PGREFILL, sorted); @@ -4721,10 +4726,10 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap reset_batch_size(walk); } - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(), + __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); - item = PGSTEAL_KSWAPD + reclaimer_offset(); + item = PGSTEAL_KSWAPD + reclaimer_offset(sc); if (!cgroup_reclaim(sc)) __count_vm_events(item, reclaimed); __count_memcg_events(memcg, item, reclaimed); diff --git a/mm/vmstat.c b/mm/vmstat.c index ae0e4259ac23..ab5c840941f3 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1274,6 +1274,7 @@ const char * const vmstat_text[] = { "pgdemote_kswapd", "pgdemote_direct", "pgdemote_khugepaged", + "pgdemote_proactive", #ifdef CONFIG_HUGETLB_PAGE "nr_hugetlb", #endif @@ -1309,9 +1310,11 @@ const char * const vmstat_text[] = { "pgsteal_kswapd", "pgsteal_direct", "pgsteal_khugepaged", + "pgsteal_proactive", "pgscan_kswapd", "pgscan_direct", "pgscan_khugepaged", + "pgscan_proactive", "pgscan_direct_throttle", "pgscan_anon", "pgscan_file", From 4c8bc7c4e3fbbdf07a879429c34a78f31d9894d4 Mon Sep 17 00:00:00 2001 From: Hao Jia Date: Tue, 18 Mar 2025 15:58:33 +0800 Subject: [PATCH 427/431] cgroup: docs: add pswpin and pswpout items in cgroup v2 doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit 15ff4d409e1a ("mm/memcontrol: add per-memcg pgpgin/pswpin counter") introduced the pswpin and pswpout items in the memory.stat of cgroup v2. Therefore, update them accordingly in the cgroup-v2 documentation. Link: https://lkml.kernel.org/r/20250318075833.90615-3-jiahao.kernel@gmail.com Fixes: 15ff4d409e1a ("mm/memcontrol: add per-memcg pgpgin/pswpin counter") Signed-off-by: Hao Jia Acked-by: Tejun Heo Acked-by: Johannes Weiner Cc: Jonathan Corbet Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v2.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index d7624e500610..ba11a4d7321b 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1561,6 +1561,12 @@ The following nested keys are defined. workingset_nodereclaim Number of times a shadow node has been reclaimed + pswpin (npn) + Number of pages swapped into memory + + pswpout (npn) + Number of pages swapped out of memory + pgscan (npn) Amount of scanned pages (in an inactive LRU list) From 5f5ee52d4f58605330b09851273d6e56aaadd29e Mon Sep 17 00:00:00 2001 From: Jinjiang Tu Date: Tue, 18 Mar 2025 16:39:38 +0800 Subject: [PATCH 428/431] mm/hwpoison: introduce folio_contain_hwpoisoned_page() helper Patch series "mm/vmscan: don't try to reclaim hwpoison folio". Fix a bug during memory reclaim if folio is hwpoisoned. This patch (of 2): Introduce helper folio_contain_hwpoisoned_page() to check if the entire folio is hwpoisoned or it contains hwpoisoned pages. Link: https://lkml.kernel.org/r/20250318083939.987651-1-tujinjiang@huawei.com Link: https://lkml.kernel.org/r/20250318083939.987651-2-tujinjiang@huawei.com Signed-off-by: Jinjiang Tu Acked-by: Miaohe Lin Cc: David Hildenbrand Cc: Kefeng Wang Cc: Nanyong Sun Cc: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 6 ++++++ mm/memory_hotplug.c | 3 +-- mm/shmem.c | 3 +-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f26ce54c7aa4..68f4b188fc6b 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1098,6 +1098,12 @@ static inline bool is_page_hwpoison(const struct page *page) return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } +static inline bool folio_contain_hwpoisoned_page(struct folio *folio) +{ + return folio_test_hwpoison(folio) || + (folio_test_large(folio) && folio_test_has_hwpoisoned(folio)); +} + bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 16cf9e17077e..75401866fb76 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1828,8 +1828,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (unlikely(page_folio(page) != folio)) goto put_folio; - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { + if (folio_contain_hwpoisoned_page(folio)) { if (WARN_ON(folio_test_lru(folio))) folio_isolate_lru(folio); if (folio_mapped(folio)) { diff --git a/mm/shmem.c b/mm/shmem.c index 7b738d8d6581..17f27d92c664 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3290,8 +3290,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, if (ret) return ret; - if (folio_test_hwpoison(folio) || - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { + if (folio_contain_hwpoisoned_page(folio)) { folio_unlock(folio); folio_put(folio); return -EIO; From 1b0449544c6482179ac84530b61fc192a6527bfd Mon Sep 17 00:00:00 2001 From: Jinjiang Tu Date: Tue, 18 Mar 2025 16:39:39 +0800 Subject: [PATCH 429/431] mm/vmscan: don't try to reclaim hwpoison folio Syzkaller reports a bug as follows: Injecting memory failure for pfn 0x18b00e at process virtual address 0x20ffd000 Memory failure: 0x18b00e: dirty swapcache page still referenced by 2 users Memory failure: 0x18b00e: recovery action for dirty swapcache page: Failed page: refcount:2 mapcount:0 mapping:0000000000000000 index:0x20ffd pfn:0x18b00e memcg:ffff0000dd6d9000 anon flags: 0x5ffffe00482011(locked|dirty|arch_1|swapbacked|hwpoison|node=0|zone=2|lastcpupid=0xfffff) raw: 005ffffe00482011 dead000000000100 dead000000000122 ffff0000e232a7c9 raw: 0000000000020ffd 0000000000000000 00000002ffffffff ffff0000dd6d9000 page dumped because: VM_BUG_ON_FOLIO(!folio_test_uptodate(folio)) ------------[ cut here ]------------ kernel BUG at mm/swap_state.c:184! Internal error: Oops - BUG: 00000000f2000800 [#1] SMP Modules linked in: CPU: 0 PID: 60 Comm: kswapd0 Not tainted 6.6.0-gcb097e7de84e #3 Hardware name: linux,dummy-virt (DT) pstate: 80400005 (Nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : add_to_swap+0xbc/0x158 lr : add_to_swap+0xbc/0x158 sp : ffff800087f37340 x29: ffff800087f37340 x28: fffffc00052c0380 x27: ffff800087f37780 x26: ffff800087f37490 x25: ffff800087f37c78 x24: ffff800087f377a0 x23: ffff800087f37c50 x22: 0000000000000000 x21: fffffc00052c03b4 x20: 0000000000000000 x19: fffffc00052c0380 x18: 0000000000000000 x17: 296f696c6f662865 x16: 7461646f7470755f x15: 747365745f6f696c x14: 6f6621284f494c4f x13: 0000000000000001 x12: ffff600036d8b97b x11: 1fffe00036d8b97a x10: ffff600036d8b97a x9 : dfff800000000000 x8 : 00009fffc9274686 x7 : ffff0001b6c5cbd3 x6 : 0000000000000001 x5 : ffff0000c25896c0 x4 : 0000000000000000 x3 : 0000000000000000 x2 : 0000000000000000 x1 : ffff0000c25896c0 x0 : 0000000000000000 Call trace: add_to_swap+0xbc/0x158 shrink_folio_list+0x12ac/0x2648 shrink_inactive_list+0x318/0x948 shrink_lruvec+0x450/0x720 shrink_node_memcgs+0x280/0x4a8 shrink_node+0x128/0x978 balance_pgdat+0x4f0/0xb20 kswapd+0x228/0x438 kthread+0x214/0x230 ret_from_fork+0x10/0x20 I can reproduce this issue with the following steps: 1) When a dirty swapcache page is isolated by reclaim process and the page isn't locked, inject memory failure for the page. me_swapcache_dirty() clears uptodate flag and tries to delete from lru, but fails. Reclaim process will put the hwpoisoned page back to lru. 2) The process that maps the hwpoisoned page exits, the page is deleted the page will never be freed and will be in the lru forever. 3) If we trigger a reclaim again and tries to reclaim the page, add_to_swap() will trigger VM_BUG_ON_FOLIO due to the uptodate flag is cleared. To fix it, skip the hwpoisoned page in shrink_folio_list(). Besides, the hwpoison folio may not be unmapped by hwpoison_user_mappings() yet, unmap it in shrink_folio_list(), otherwise the folio will fail to be unmaped by hwpoison_user_mappings() since the folio isn't in lru list. Link: https://lkml.kernel.org/r/20250318083939.987651-3-tujinjiang@huawei.com Signed-off-by: Jinjiang Tu Acked-by: Miaohe Lin Cc: David Hildenbrand Cc: Kefeng Wang Cc: Nanyong Sun Cc: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton --- mm/vmscan.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 98e6ac82e428..2b2ab386cab5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1127,6 +1127,13 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (!folio_trylock(folio)) goto keep; + if (folio_contain_hwpoisoned_page(folio)) { + unmap_poisoned_folio(folio, folio_pfn(folio), false); + folio_unlock(folio); + folio_put(folio); + continue; + } + VM_BUG_ON_FOLIO(folio_test_active(folio), folio); nr_pages = folio_nr_pages(folio); From d893aca973c315ee985e1f8220fcd239c0ab7d19 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Wed, 19 Mar 2025 14:23:37 +0200 Subject: [PATCH 430/431] x86/mm: restore early initialization of high_memory for 32-bits Kernel test robot reports the following crash on 32-bit system with HIGHMEM and DEBUG_VIRTUAL: [ 0.056128][ T0] kernel BUG at arch/x86/mm/physaddr.c:77! PANIC: early exception 0x06 IP 60:c116539d error 0 cr2 0x0 [ 0.056916][ T0] CPU: 0 UID: 0 PID: 0 Comm: swapper Not tainted 6.14.0-rc4-00010-ga4dbe5c71817 #1 [ 0.057570][ T0] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 [ 0.058299][ T0] EIP: __phys_addr (arch/x86/mm/physaddr.c:77) [ 0.058633][ T0] Code: 00 74 33 89 f0 e8 d3 8b 2e 00 89 c3 0f b6 d0 b8 58 bb 4b c5 31 c9 6a 00 e8 70 f5 15 00 83 c4 04 84 db 74 25 ff 05 78 de 5d c5 <0f> 0b b8 c8 91 ea c4 e8 e7 6e ea ff b8 58 bb 4b c5 31 d2 31 c9 6a All code [ 0.060017][ T0] EAX: 00000000 EBX: c61f7001 ECX: 00000000 EDX: 00000000 [ 0.060519][ T0] ESI: c61f7000 EDI: 061f7000 EBP: c4e31f04 ESP: c61f7000 [ 0.061016][ T0] DS: 007b ES: 007b FS: 0000 GS: 0000 SS: cff4 EFLAGS: 00210002 [ 0.061560][ T0] CR0: 80050033 CR2: 00000000 CR3: 059fc000 CR4: 00000090 [ 0.062060][ T0] Call Trace: [ 0.062288][ T0] ? show_regs (arch/x86/kernel/dumpstack.c:478) [ 0.062588][ T0] ? early_fixup_exception (arch/x86/include/asm/nospec-branch.h:595) [ 0.062968][ T0] ? early_idt_handler_common (arch/x86/kernel/head_32.S:352) [ 0.063360][ T0] ? __phys_addr (arch/x86/mm/physaddr.c:77) [ 0.063677][ T0] ? one_page_table_init (arch/x86/mm/init_32.c:100) [ 0.064037][ T0] ? page_table_range_init (arch/x86/mm/init_32.c:227) [ 0.064411][ T0] ? permanent_kmaps_init (include/linux/pgtable.h:191 include/linux/pgtable.h:196 arch/x86/mm/init_32.c:395) [ 0.064814][ T0] ? paging_init (arch/x86/mm/init_32.c:677) [ 0.065118][ T0] ? native_pagetable_init (arch/x86/mm/init_32.c:481) [ 0.065503][ T0] ? setup_arch (arch/x86/kernel/setup.c:1131) [ 0.065819][ T0] ? start_kernel (include/linux/jump_label.h:267 init/main.c:920) [ 0.066143][ T0] ? i386_start_kernel (arch/x86/kernel/head32.c:79) [ 0.066501][ T0] ? startup_32_smp (arch/x86/kernel/head_32.S:292) The crash happens because commit e120d1bc12da ("arch, mm: set high_memory in free_area_init()") moved initialization of high_memory after __vmalloc_start_set and with high_memory still set to 0 any address passes is_vmalloc_addr() check. Restore early initialization of high_memory on 32-bit systems in initmem_init(). Link: https://lkml.kernel.org/r/20250319122337.1538924-1-rppt@kernel.org Fixes: e120d1bc12da ("arch, mm: set high_memory in free_area_init()") Signed-off-by: Mike Rapoport (Microsoft) Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202503191442.112e954f-lkp@intel.com Cc: Andy Lutomirski Cc: Borislav Betkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Thomas Gleinxer Signed-off-by: Andrew Morton --- arch/x86/mm/init_32.c | 3 +++ arch/x86/mm/numa_32.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 95b2758b4e4d..f69d2436d780 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -626,6 +626,9 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); + high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; +#else + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 442ef3facff0..65fda406e6f2 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -41,6 +41,9 @@ void __init initmem_init(void) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); + high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; +#else + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); From 0a1e082b64ccce165e7307a7b49d22b2504f9d1f Mon Sep 17 00:00:00 2001 From: Liu Ye Date: Wed, 19 Mar 2025 17:17:26 +0800 Subject: [PATCH 431/431] mm/page_alloc: remove unnecessary __maybe_unused in order_to_pindex() The `movable` variable is always used when `CONFIG_TRANSPARENT_HUGEPAGE` is enabled, so the `__maybe_unused` attribute is not necessary. This patch removes it and keeps the variable declaration within the `#ifdef` block for better clarity. Link: https://lkml.kernel.org/r/20250319091726.401158-1-liuyerd@163.com Signed-off-by: Liu Ye Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a6d060eea638..0c01998cb3a0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -509,9 +509,9 @@ static void bad_page(struct page *page, const char *reason) static inline unsigned int order_to_pindex(int migratetype, int order) { - bool __maybe_unused movable; #ifdef CONFIG_TRANSPARENT_HUGEPAGE + bool movable; if (order > PAGE_ALLOC_COSTLY_ORDER) { VM_BUG_ON(order != HPAGE_PMD_ORDER);