From c33453d08a56a45c2e72f9f56043610b8f7449c7 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 6 Jun 2025 17:41:12 +0200 Subject: [PATCH 001/234] efi: Drop preprocessor directives from zboot.lds Older versions of `ld` don't seem to support preprocessor directives in linker scripts, e.g. on RHEL9's ld-2.35.2-63.el9 the build fails with: ld:./drivers/firmware/efi/libstub/zboot.lds:32: ignoring invalid character `#' in expression ld:./drivers/firmware/efi/libstub/zboot.lds:33: syntax error We don't seem to need these '#ifdef', no empty .sbat section is created when CONFIG_EFI_SBAT_FILE="": # objdump -h arch/arm64/boot/vmlinuz.efi arch/arm64/boot/vmlinuz.efi: file format pei-aarch64-little Sections: Idx Name Size VMA LMA File off Algn 0 .text 00b94000 0000000000001000 0000000000001000 00001000 2**2 CONTENTS, ALLOC, LOAD, READONLY, CODE 1 .data 00000200 0000000000b95000 0000000000b95000 00b95000 2**2 CONTENTS, ALLOC, LOAD, DATA Fixes: 0f9a1739dd0e ("efi: zboot specific mechanism for embedding SBAT section") Signed-off-by: Vitaly Kuznetsov Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/libstub/zboot.lds | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index c3a166675450..4b8d5cd3dfa2 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -29,14 +29,12 @@ SECTIONS . = _etext; } -#ifdef CONFIG_EFI_SBAT .sbat : ALIGN(4096) { _sbat = .; *(.sbat) _esbat = ALIGN(4096); . = _esbat; } -#endif .data : ALIGN(4096) { _data = .; From aa807b9f22df2eee28593cbbabba0f93f4aa26c1 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Thu, 12 Jun 2025 10:14:17 +0800 Subject: [PATCH 002/234] dma-contiguous: hornor the cma address limit setup by user When porting a cma related usage from x86_64 server to arm64 server, the "cma=4G@4G" setup failed on arm64. The reason is arm64 and some other architectures have specific physical address limit for reserved cma area, like 4GB due to the device's need for 32 bit dma. Actually lots of platforms of those architectures don't have this device dma limit, but still have to obey it, and are not able to reserve a huge cma pool. This situation could be improved by honoring the user input cma physical address than the arch limit. As when users specify it, they already knows what the default is which probably can't suit them. Suggested-by: Robin Murphy Signed-off-by: Feng Tang Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20250612021417.44929-1-feng.tang@linux.alibaba.com --- kernel/dma/contiguous.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 8df0dfaaca18..67af8a55185d 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -222,7 +222,10 @@ void __init dma_contiguous_reserve(phys_addr_t limit) if (size_cmdline != -1) { selected_size = size_cmdline; selected_base = base_cmdline; - selected_limit = min_not_zero(limit_cmdline, limit); + + /* Hornor the user setup dma address limit */ + selected_limit = limit_cmdline ?: limit; + if (base_cmdline + size_cmdline == limit_cmdline) fixed = true; } else { From f9af88a3d384c8b55beb5dc5483e5da0135fadbd Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 11 Sep 2024 05:13:46 +0200 Subject: [PATCH 003/234] x86/bugs: Rename MDS machinery to something more generic It will be used by other x86 mitigations. No functional changes. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Pawan Gupta --- .../hw-vuln/processor_mmio_stale_data.rst | 4 +-- Documentation/arch/x86/mds.rst | 8 ++--- arch/x86/entry/entry.S | 8 ++--- arch/x86/include/asm/irqflags.h | 4 +-- arch/x86/include/asm/mwait.h | 4 +-- arch/x86/include/asm/nospec-branch.h | 29 ++++++++++--------- arch/x86/kernel/cpu/bugs.c | 12 ++++---- arch/x86/kvm/vmx/vmx.c | 2 +- 8 files changed, 35 insertions(+), 36 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst index 1302fd1b55e8..6dba18dbb9ab 100644 --- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in combination with a microcode update. The microcode clears the affected CPU buffers when the VERW instruction is executed. -Kernel reuses the MDS function to invoke the buffer clearing: - - mds_clear_cpu_buffers() +Kernel does the buffer clearing with x86_clear_cpu_buffers(). On MDS affected CPUs, the kernel already invokes CPU buffer clear on kernel/userspace, hypervisor/guest and C-state (idle) transitions. No diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst index 5a2e6c0ef04a..3518671e1a85 100644 --- a/Documentation/arch/x86/mds.rst +++ b/Documentation/arch/x86/mds.rst @@ -93,7 +93,7 @@ enters a C-state. The kernel provides a function to invoke the buffer clearing: - mds_clear_cpu_buffers() + x86_clear_cpu_buffers() Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Other than CFLAGS.ZF, this macro doesn't clobber any registers. @@ -185,9 +185,9 @@ Mitigation points idle clearing would be a window dressing exercise and is therefore not activated. - The invocation is controlled by the static key mds_idle_clear which is - switched depending on the chosen mitigation mode and the SMT state of - the system. + The invocation is controlled by the static key cpu_buf_idle_clear which is + switched depending on the chosen mitigation mode and the SMT state of the + system. The buffer clear is only invoked before entering the C-State to prevent that stale data from the idling CPU from spilling to the Hyper-Thread diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 175958b02f2b..8e9a0cc20a4a 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb); /* * Define the VERW operand that is disguised as entry code so that - * it can be referenced with KPTI enabled. This ensure VERW can be + * it can be referenced with KPTI enabled. This ensures VERW can be * used late in exit-to-user path after page tables are switched. */ .pushsection .entry.text, "ax" .align L1_CACHE_BYTES, 0xcc -SYM_CODE_START_NOALIGN(mds_verw_sel) +SYM_CODE_START_NOALIGN(x86_verw_sel) UNWIND_HINT_UNDEFINED ANNOTATE_NOENDBR .word __KERNEL_DS .align L1_CACHE_BYTES, 0xcc -SYM_CODE_END(mds_verw_sel); +SYM_CODE_END(x86_verw_sel); /* For KVM */ -EXPORT_SYMBOL_GPL(mds_verw_sel); +EXPORT_SYMBOL_GPL(x86_verw_sel); .popsection diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 9a9b21b78905..b30e5474c18e 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void) static __always_inline void native_safe_halt(void) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); asm volatile("sti; hlt": : :"memory"); } static __always_inline void native_halt(void) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); asm volatile("hlt": : :"memory"); } diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index dd2b129b0418..cc34c3fd197b 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -43,7 +43,7 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) static __always_inline void __mwait(u32 eax, u32 ecx) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); /* * Use the instruction mnemonic with implicit operands, as the LLVM @@ -98,7 +98,7 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) */ static __always_inline void __sti_mwait(u32 eax, u32 ecx) { - mds_idle_clear_cpu_buffers(); + x86_idle_clear_cpu_buffers(); asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 20d754b98f3f..5dcd75bb5e0d 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -302,22 +302,22 @@ .endm /* - * Macro to execute VERW instruction that mitigate transient data sampling - * attacks such as MDS. On affected systems a microcode update overloaded VERW - * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. - * + * Macro to execute VERW insns that mitigate transient data sampling + * attacks such as MDS or TSA. On affected systems a microcode update + * overloaded VERW insns to also clear the CPU buffers. VERW clobbers + * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers. */ .macro CLEAR_CPU_BUFFERS #ifdef CONFIG_X86_64 - ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF #else /* * In 32bit mode, the memory operand must be a %cs reference. The data * segments may not be usable (vm86 mode), and the stack segment may not * be flat (ESPFIX32). */ - ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF #endif .endm @@ -567,24 +567,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb); -DECLARE_STATIC_KEY_FALSE(mds_idle_clear); +DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); -extern u16 mds_verw_sel; +extern u16 x86_verw_sel; #include /** - * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability + * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns * * This uses the otherwise unused and obsolete VERW instruction in * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ -static __always_inline void mds_clear_cpu_buffers(void) +static __always_inline void x86_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; @@ -601,14 +601,15 @@ static __always_inline void mds_clear_cpu_buffers(void) } /** - * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability + * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS + * vulnerability * * Clear CPU buffers if the corresponding static key is enabled */ -static __always_inline void mds_idle_clear_cpu_buffers(void) +static __always_inline void x86_idle_clear_cpu_buffers(void) { - if (static_branch_likely(&mds_idle_clear)) - mds_clear_cpu_buffers(); + if (static_branch_likely(&cpu_buf_idle_clear)) + x86_clear_cpu_buffers(); } #endif /* __ASSEMBLER__ */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 7f94e6a5497d..258ed3d2b6a9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -169,9 +169,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); -/* Control MDS CPU buffer clear before idling (halt, mwait) */ -DEFINE_STATIC_KEY_FALSE(mds_idle_clear); -EXPORT_SYMBOL_GPL(mds_idle_clear); +/* Control CPU buffer clear before idling (halt, mwait) */ +DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); +EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); /* * Controls whether l1d flush based mitigations are enabled, @@ -637,7 +637,7 @@ static void __init mmio_apply_mitigation(void) * is required irrespective of SMT state. */ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) - static_branch_enable(&mds_idle_clear); + static_branch_enable(&cpu_buf_idle_clear); if (mmio_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); @@ -2249,10 +2249,10 @@ static void update_mds_branch_idle(void) return; if (sched_smt_active()) { - static_branch_enable(&mds_idle_clear); + static_branch_enable(&cpu_buf_idle_clear); } else if (mmio_mitigation == MMIO_MITIGATION_OFF || (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { - static_branch_disable(&mds_idle_clear); + static_branch_disable(&cpu_buf_idle_clear); } } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4953846cb30d..191a9ed0da22 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7291,7 +7291,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&cpu_buf_vm_clear) && kvm_arch_has_assigned_device(vcpu->kvm)) - mds_clear_cpu_buffers(); + x86_clear_cpu_buffers(); vmx_disable_fb_clear(vmx); From d8010d4ba43e9f790925375a7de100604a5e2dba Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 11 Sep 2024 10:53:08 +0200 Subject: [PATCH 004/234] x86/bugs: Add a Transient Scheduler Attacks mitigation Add the required features detection glue to bugs.c et all in order to support the TSA mitigation. Co-developed-by: Kim Phillips Signed-off-by: Kim Phillips Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Pawan Gupta --- .../ABI/testing/sysfs-devices-system-cpu | 1 + .../admin-guide/kernel-parameters.txt | 13 ++ arch/x86/Kconfig | 9 ++ arch/x86/include/asm/cpufeatures.h | 6 +- arch/x86/include/asm/mwait.h | 2 +- arch/x86/include/asm/nospec-branch.h | 14 +- arch/x86/kernel/cpu/amd.c | 44 +++++++ arch/x86/kernel/cpu/bugs.c | 124 ++++++++++++++++++ arch/x86/kernel/cpu/common.c | 14 +- arch/x86/kernel/cpu/scattered.c | 2 + arch/x86/kvm/svm/vmenter.S | 6 + drivers/base/cpu.c | 3 + include/linux/cpu.h | 1 + 13 files changed, 232 insertions(+), 7 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index bf85f4de6862..ab8cd337f43a 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -584,6 +584,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/srbds + /sys/devices/system/cpu/vulnerabilities/tsa /sys/devices/system/cpu/vulnerabilities/tsx_async_abort Date: January 2018 Contact: Linux kernel mailing list diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f1f2c0874da9..07e22ba5bfe3 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -7488,6 +7488,19 @@ having this key zero'ed is acceptable. E.g. in testing scenarios. + tsa= [X86] Control mitigation for Transient Scheduler + Attacks on AMD CPUs. Search the following in your + favourite search engine for more details: + + "Technical guidance for mitigating transient scheduler + attacks". + + off - disable the mitigation + on - enable the mitigation (default) + user - mitigate only user/kernel transitions + vm - mitigate only guest/host transitions + + tsc= Disable clocksource stability checks for TSC. Format: [x86] reliable: mark tsc clocksource as reliable, this diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 340e5468980e..71dfe7d7c786 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2695,6 +2695,15 @@ config MITIGATION_ITS disabled, mitigation cannot be enabled via cmdline. See +config MITIGATION_TSA + bool "Mitigate Transient Scheduler Attacks" + depends on CPU_SUP_AMD + default y + help + Enable mitigation for Transient Scheduler Attacks. TSA is a hardware + security vulnerability on AMD CPUs which can lead to forwarding of + invalid info to subsequent instructions and thus can affect their + timing and thereby cause a leakage. endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ee176236c2be..286d509f9363 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -456,6 +456,7 @@ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ #define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ +#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ @@ -487,6 +488,9 @@ #define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */ #define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */ +#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */ +#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */ +#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */ /* * BUG word(s) @@ -542,5 +546,5 @@ #define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */ #define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */ - +#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index cc34c3fd197b..82bd9eb73b3c 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -80,7 +80,7 @@ static __always_inline void __mwait(u32 eax, u32 ecx) */ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) { - /* No MDS buffer clear as this is AMD/HYGON only */ + /* No need for TSA buffer clearing on AMD */ /* "mwaitx %eax, %ebx, %ecx" */ asm volatile(".byte 0x0f, 0x01, 0xfb" diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 5dcd75bb5e0d..10f261678749 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -308,19 +308,25 @@ * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers. */ -.macro CLEAR_CPU_BUFFERS +.macro __CLEAR_CPU_BUFFERS feature #ifdef CONFIG_X86_64 - ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature #else /* * In 32bit mode, the memory operand must be a %cs reference. The data * segments may not be usable (vm86 mode), and the stack segment may not * be flat (ESPFIX32). */ - ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature #endif .endm +#define CLEAR_CPU_BUFFERS \ + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF + +#define VM_CLEAR_CPU_BUFFERS \ + __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM + #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP @@ -602,7 +608,7 @@ static __always_inline void x86_clear_cpu_buffers(void) /** * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS - * vulnerability + * and TSA vulnerabilities. * * Clear CPU buffers if the corresponding static key is enabled */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 93da466dfe2c..23c535871a7e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -377,6 +377,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c) #endif } +#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \ + X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \ + step, step, ucode) + +static const struct x86_cpu_id amd_tsa_microcode[] = { + ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7), + ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b), + ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d), + ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c), + ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c), + ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109), + ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e), + ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211), + ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108), + ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012), + ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a), + ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108), + ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208), + ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008), + ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008), + ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216), + {}, +}; + +static void tsa_init(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return; + + if (cpu_has(c, X86_FEATURE_ZEN3) || + cpu_has(c, X86_FEATURE_ZEN4)) { + if (x86_match_min_microcode_rev(amd_tsa_microcode)) + setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); + else + pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode); + } else { + setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); + setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); + } +} + static void bsp_init_amd(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { @@ -489,6 +530,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) } bsp_determine_snp(c); + + tsa_init(c); + return; warn: diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 258ed3d2b6a9..f4d3abb12317 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -94,6 +94,8 @@ static void __init bhi_apply_mitigation(void); static void __init its_select_mitigation(void); static void __init its_update_mitigation(void); static void __init its_apply_mitigation(void); +static void __init tsa_select_mitigation(void); +static void __init tsa_apply_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -225,6 +227,7 @@ void __init cpu_select_mitigations(void) gds_select_mitigation(); its_select_mitigation(); bhi_select_mitigation(); + tsa_select_mitigation(); /* * After mitigations are selected, some may need to update their @@ -272,6 +275,7 @@ void __init cpu_select_mitigations(void) gds_apply_mitigation(); its_apply_mitigation(); bhi_apply_mitigation(); + tsa_apply_mitigation(); } /* @@ -1487,6 +1491,94 @@ static void __init its_apply_mitigation(void) set_return_thunk(its_return_thunk); } +#undef pr_fmt +#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt + +enum tsa_mitigations { + TSA_MITIGATION_NONE, + TSA_MITIGATION_AUTO, + TSA_MITIGATION_UCODE_NEEDED, + TSA_MITIGATION_USER_KERNEL, + TSA_MITIGATION_VM, + TSA_MITIGATION_FULL, +}; + +static const char * const tsa_strings[] = { + [TSA_MITIGATION_NONE] = "Vulnerable", + [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", + [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", + [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", +}; + +static enum tsa_mitigations tsa_mitigation __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; + +static int __init tsa_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + tsa_mitigation = TSA_MITIGATION_NONE; + else if (!strcmp(str, "on")) + tsa_mitigation = TSA_MITIGATION_FULL; + else if (!strcmp(str, "user")) + tsa_mitigation = TSA_MITIGATION_USER_KERNEL; + else if (!strcmp(str, "vm")) + tsa_mitigation = TSA_MITIGATION_VM; + else + pr_err("Ignoring unknown tsa=%s option.\n", str); + + return 0; +} +early_param("tsa", tsa_parse_cmdline); + +static void __init tsa_select_mitigation(void) +{ + if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { + tsa_mitigation = TSA_MITIGATION_NONE; + return; + } + + if (tsa_mitigation == TSA_MITIGATION_NONE) + return; + + if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) { + tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; + goto out; + } + + if (tsa_mitigation == TSA_MITIGATION_AUTO) + tsa_mitigation = TSA_MITIGATION_FULL; + + /* + * No need to set verw_clear_cpu_buf_mitigation_selected - it + * doesn't fit all cases here and it is not needed because this + * is the only VERW-based mitigation on AMD. + */ +out: + pr_info("%s\n", tsa_strings[tsa_mitigation]); +} + +static void __init tsa_apply_mitigation(void) +{ + switch (tsa_mitigation) { + case TSA_MITIGATION_USER_KERNEL: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + break; + case TSA_MITIGATION_VM: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + break; + case TSA_MITIGATION_FULL: + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + break; + default: + break; + } +} + #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt @@ -2316,6 +2408,25 @@ void cpu_bugs_smt_update(void) break; } + switch (tsa_mitigation) { + case TSA_MITIGATION_USER_KERNEL: + case TSA_MITIGATION_VM: + case TSA_MITIGATION_AUTO: + case TSA_MITIGATION_FULL: + /* + * TSA-SQ can potentially lead to info leakage between + * SMT threads. + */ + if (sched_smt_active()) + static_branch_enable(&cpu_buf_idle_clear); + else + static_branch_disable(&cpu_buf_idle_clear); + break; + case TSA_MITIGATION_NONE: + case TSA_MITIGATION_UCODE_NEEDED: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -3265,6 +3376,11 @@ static ssize_t gds_show_state(char *buf) return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); } +static ssize_t tsa_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -3328,6 +3444,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_ITS: return its_show_state(buf); + case X86_BUG_TSA: + return tsa_show_state(buf); + default: break; } @@ -3414,6 +3533,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att { return cpu_show_common(dev, attr, buf, X86_BUG_ITS); } + +ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_TSA); +} #endif void __warn_thunk(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8feb8fd2957a..f7b9fca82bda 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define ITS BIT(8) /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ #define ITS_NATIVE_ONLY BIT(9) +/* CPU is affected by Transient Scheduler Attacks */ +#define TSA BIT(10) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS), @@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), - VULNBL_AMD(0x19, SRSO), + VULNBL_AMD(0x19, SRSO | TSA), VULNBL_AMD(0x1a, SRSO), {} }; @@ -1530,6 +1532,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); } + if (c->x86_vendor == X86_VENDOR_AMD) { + if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) || + !cpu_has(c, X86_FEATURE_TSA_L1_NO)) { + if (cpu_matches(cpu_vuln_blacklist, TSA) || + /* Enable bug on Zen guests to allow for live migration. */ + (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN))) + setup_force_cpu_bug(X86_BUG_TSA); + } + } + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index dbf6d71bdf18..b4a1f6732a3a 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -50,6 +50,8 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, + { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, + { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 }, { X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 0c61153b275f..235c4af6b692 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run) #endif mov VCPU_RDI(%_ASM_DI), %_ASM_DI + /* Clobbers EFLAGS.ZF */ + VM_CLEAR_CPU_BUFFERS + /* Enter guest mode */ 3: vmrun %_ASM_AX 4: @@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov SVM_current_vmcb(%rdi), %rax mov KVM_VMCB_pa(%rax), %rax + /* Clobbers EFLAGS.ZF */ + VM_CLEAR_CPU_BUFFERS + /* Enter guest mode */ 1: vmrun %rax 2: diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 7779ab0ca7ce..efc575a00edd 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -602,6 +602,7 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(ghostwrite); CPU_SHOW_VULN_FALLBACK(old_microcode); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); +CPU_SHOW_VULN_FALLBACK(tsa); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -620,6 +621,7 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL); static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); +static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -639,6 +641,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_ghostwrite.attr, &dev_attr_old_microcode.attr, &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, NULL }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 96a3a0d6a60e..6378370a952f 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -82,6 +82,7 @@ extern ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, From 31272abd5974b38ba312e9cf2ec2f09f9dd7dcba Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 11 Sep 2024 11:00:50 +0200 Subject: [PATCH 005/234] KVM: SVM: Advertise TSA CPUID bits to guests Synthesize the TSA CPUID feature bits for guests. Set TSA_{SQ,L1}_NO on unaffected machines. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/cpuid.c | 10 +++++++++- arch/x86/kvm/reverse_cpuid.h | 7 +++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4a391929cdb..eb559f3564c6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -764,6 +764,7 @@ enum kvm_only_cpuid_leafs { CPUID_8000_0022_EAX, CPUID_7_2_EDX, CPUID_24_0_EBX, + CPUID_8000_0021_ECX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index b2d006756e02..f84bc0569c9c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1165,6 +1165,8 @@ void kvm_set_cpu_caps(void) */ SYNTHESIZED_F(LFENCE_RDTSC), /* SmmPgCfgLock */ + /* 4: Resv */ + SYNTHESIZED_F(VERW_CLEAR), F(NULL_SEL_CLR_BASE), /* UpperAddressIgnore */ F(AUTOIBRS), @@ -1179,6 +1181,11 @@ void kvm_set_cpu_caps(void) F(SRSO_USER_KERNEL_NO), ); + kvm_cpu_cap_init(CPUID_8000_0021_ECX, + SYNTHESIZED_F(TSA_SQ_NO), + SYNTHESIZED_F(TSA_L1_NO), + ); + kvm_cpu_cap_init(CPUID_8000_0022_EAX, F(PERFMON_V2), ); @@ -1748,8 +1755,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; case 0x80000021: - entry->ebx = entry->ecx = entry->edx = 0; + entry->ebx = entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0021_EAX); + cpuid_entry_override(entry, CPUID_8000_0021_ECX); break; /* AMD Extended Performance Monitoring and Debug */ case 0x80000022: { diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index fde0ae986003..c53b92379e6e 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -52,6 +52,10 @@ /* CPUID level 0x80000022 (EAX) */ #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) +/* CPUID level 0x80000021 (ECX) */ +#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1) +#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2) + struct cpuid_reg { u32 function; u32 index; @@ -82,6 +86,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, + [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, }; /* @@ -121,6 +126,8 @@ static __always_inline u32 __feature_translate(int x86_feature) KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); + KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO); + KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO); default: return x86_feature; } From 2329f250e04d3b8e78b36a68b9880ca7750a07ef Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 27 Mar 2025 12:23:55 +0100 Subject: [PATCH 006/234] x86/microcode/AMD: Add TSA microcode SHAs Signed-off-by: Borislav Petkov (AMD) --- arch/x86/kernel/cpu/microcode/amd_shas.c | 112 +++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c index 2a1655b1fdd8..1fd349cfc802 100644 --- a/arch/x86/kernel/cpu/microcode/amd_shas.c +++ b/arch/x86/kernel/cpu/microcode/amd_shas.c @@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = { 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, } }, + { 0xa0011d7, { + 0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b, + 0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12, + 0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2, + 0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36, + } + }, { 0xa001223, { 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, @@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = { 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, } }, + { 0xa00123b, { + 0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2, + 0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3, + 0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28, + 0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72, + } + }, { 0xa00820c, { 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, @@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = { 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, } }, + { 0xa00820d, { + 0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4, + 0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca, + 0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1, + 0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7, + } + }, { 0xa10113e, { 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, @@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = { 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, } }, + { 0xa10114c, { + 0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64, + 0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74, + 0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a, + 0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0, + } + }, { 0xa10123e, { 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, @@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = { 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, } }, + { 0xa10124c, { + 0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90, + 0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46, + 0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc, + 0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2, + } + }, { 0xa108108, { 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, @@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = { 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, } }, + { 0xa108109, { + 0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa, + 0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b, + 0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35, + 0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59, + } + }, { 0xa20102d, { 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, @@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = { 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, } }, + { 0xa20102e, { + 0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd, + 0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6, + 0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5, + 0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe, + } + }, { 0xa201210, { 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, @@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = { 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, } }, + { 0xa201211, { + 0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95, + 0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27, + 0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff, + 0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27, + } + }, { 0xa404107, { 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, @@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = { 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, } }, + { 0xa404108, { + 0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc, + 0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb, + 0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19, + 0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00, + } + }, { 0xa500011, { 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, @@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = { 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, } }, + { 0xa500012, { + 0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4, + 0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16, + 0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f, + 0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63, + } + }, { 0xa601209, { 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, @@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = { 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, } }, + { 0xa60120a, { + 0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d, + 0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b, + 0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d, + 0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c, + } + }, { 0xa704107, { 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, @@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = { 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, } }, + { 0xa704108, { + 0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93, + 0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98, + 0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49, + 0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a, + } + }, { 0xa705206, { 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, @@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = { 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, } }, + { 0xa705208, { + 0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19, + 0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2, + 0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11, + 0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff, + } + }, { 0xa708007, { 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, @@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = { 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, } }, + { 0xa708008, { + 0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46, + 0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab, + 0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16, + 0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b, + } + }, { 0xa70c005, { 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, @@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = { 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, } }, + { 0xa70c008, { + 0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21, + 0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e, + 0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66, + 0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0, + } + }, { 0xaa00116, { 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, @@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = { 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, } }, + { 0xaa00216, { + 0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5, + 0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08, + 0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2, + 0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1, + } + }, }; From 8e786a85c0a3c0fffae6244733fb576eeabd9dec Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 14 Apr 2025 15:33:19 +0200 Subject: [PATCH 007/234] x86/process: Move the buffer clearing before MONITOR Move the VERW clearing before the MONITOR so that VERW doesn't disarm it and the machine never enters C1. Original idea by Kim Phillips . Suggested-by: Andrew Cooper Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/mwait.h | 25 +++++++++++++++---------- arch/x86/kernel/process.c | 16 ++++++++++++---- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 82bd9eb73b3c..6ca6516c7492 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -43,8 +43,6 @@ static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) static __always_inline void __mwait(u32 eax, u32 ecx) { - x86_idle_clear_cpu_buffers(); - /* * Use the instruction mnemonic with implicit operands, as the LLVM * assembler fails to assemble the mnemonic with explicit operands: @@ -98,7 +96,6 @@ static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx) */ static __always_inline void __sti_mwait(u32 eax, u32 ecx) { - x86_idle_clear_cpu_buffers(); asm volatile("sti; mwait" :: "a" (eax), "c" (ecx)); } @@ -115,21 +112,29 @@ static __always_inline void __sti_mwait(u32 eax, u32 ecx) */ static __always_inline void mwait_idle_with_hints(u32 eax, u32 ecx) { + if (need_resched()) + return; + + x86_idle_clear_cpu_buffers(); + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { const void *addr = ¤t_thread_info()->flags; alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); __monitor(addr, 0, 0); - if (!need_resched()) { - if (ecx & 1) { - __mwait(eax, ecx); - } else { - __sti_mwait(eax, ecx); - raw_local_irq_disable(); - } + if (need_resched()) + goto out; + + if (ecx & 1) { + __mwait(eax, ecx); + } else { + __sti_mwait(eax, ecx); + raw_local_irq_disable(); } } + +out: current_clr_polling(); } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 704883c21f3a..a838be04f874 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -907,16 +907,24 @@ static __init bool prefer_mwait_c1_over_halt(void) */ static __cpuidle void mwait_idle(void) { + if (need_resched()) + return; + + x86_idle_clear_cpu_buffers(); + if (!current_set_polling_and_test()) { const void *addr = ¤t_thread_info()->flags; alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); __monitor(addr, 0, 0); - if (!need_resched()) { - __sti_mwait(0, 0); - raw_local_irq_disable(); - } + if (need_resched()) + goto out; + + __sti_mwait(0, 0); + raw_local_irq_disable(); } + +out: __current_clr_polling(); } From 93712205ce2f1fb047739494c0399a26ea4f0890 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Thu, 12 Jun 2025 11:14:48 +0200 Subject: [PATCH 008/234] pinctrl: qcom: msm: mark certain pins as invalid for interrupts On some platforms, the UFS-reset pin has no interrupt logic in TLMM but is nevertheless registered as a GPIO in the kernel. This enables the user-space to trigger a BUG() in the pinctrl-msm driver by running, for example: `gpiomon -c 0 113` on RB2. The exact culprit is requesting pins whose intr_detection_width setting is not 1 or 2 for interrupts. This hits a BUG() in msm_gpio_irq_set_type(). Potentially crashing the kernel due to an invalid request from user-space is not optimal, so let's go through the pins and mark those that would fail the check as invalid for the irq chip as we should not even register them as available irqs. This function can be extended if we determine that there are more corner-cases like this. Fixes: f365be092572 ("pinctrl: Add Qualcomm TLMM driver") Cc: stable@vger.kernel.org Reviewed-by: Bjorn Andersson Signed-off-by: Bartosz Golaszewski Link: https://lore.kernel.org/20250612091448.41546-1-brgl@bgdev.pl Signed-off-by: Linus Walleij --- drivers/pinctrl/qcom/pinctrl-msm.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 5c4687de1464..f713c80d7f3e 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1038,6 +1038,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, test_bit(d->hwirq, pctrl->skip_wake_irqs); } +static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g; + int i; + + bitmap_fill(valid_mask, ngpios); + + for (i = 0; i < ngpios; i++) { + g = &pctrl->soc->groups[i]; + + if (g->intr_detection_width != 1 && + g->intr_detection_width != 2) + clear_bit(i, valid_mask); + } +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1441,6 +1460,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parents[0] = pctrl->irq; + girq->init_valid_mask = msm_gpio_irq_init_valid_mask; ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { From 7d502192431e2d863f4b2595182d61c7fa8e656f Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Thu, 12 Jun 2025 15:19:27 +0200 Subject: [PATCH 009/234] MAINTAINERS: drop bouncing Lakshmi Sowjanya D The address for Lakshmi Sowjanya D: lakshmi.sowjanya.d@intel.com is bouncing. Drop it and mark the driver as orphaned. Signed-off-by: Bartosz Golaszewski Link: https://lore.kernel.org/20250612131927.127733-1-brgl@bgdev.pl Signed-off-by: Linus Walleij --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 0c1d245bf7b8..949317e39ed5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19566,8 +19566,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git F: drivers/pinctrl/intel/ PIN CONTROLLER - KEEMBAY -M: Lakshmi Sowjanya D -S: Supported +S: Orphan F: drivers/pinctrl/pinctrl-keembay* PIN CONTROLLER - MEDIATEK From 46147490b4098e200b7d7d3ac4637a3e4f7b806a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 13 Jun 2025 20:13:12 +0200 Subject: [PATCH 010/234] pinctrl: nuvoton: Fix boot on ma35dx platforms As part of a wider cleanup trying to get rid of OF specific APIs, an incorrect (and partially unrelated) cleanup was introduced. The goal was to replace a device_for_each_chil_node() loop including an additional condition inside by a macro doing both the loop and the check on a single line. The snippet: device_for_each_child_node(dev, child) if (fwnode_property_present(child, "gpio-controller")) continue; was replaced by: for_each_gpiochip_node(dev, child) which expands into: device_for_each_child_node(dev, child) for_each_if(fwnode_property_present(child, "gpio-controller")) This change is actually doing the opposite of what was initially expected, breaking the probe of this driver, breaking at the same time the whole boot of Nuvoton platforms (no more console, the kernel WARN()). Revert these two changes to roll back to the correct behavior. Fixes: 693c9ecd8326 ("pinctrl: nuvoton: Reduce use of OF-specific APIs") Cc: stable@vger.kernel.org Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/20250613181312.1269794-1-miquel.raynal@bootlin.com Signed-off-by: Linus Walleij --- drivers/pinctrl/nuvoton/pinctrl-ma35.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c index 06ae1fe8b8c5..b51704bafd81 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c +++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c @@ -1074,7 +1074,10 @@ static int ma35_pinctrl_probe_dt(struct platform_device *pdev, struct ma35_pinct u32 idx = 0; int ret; - for_each_gpiochip_node(dev, child) { + device_for_each_child_node(dev, child) { + if (fwnode_property_present(child, "gpio-controller")) + continue; + npctl->nfunctions++; npctl->ngroups += of_get_child_count(to_of_node(child)); } @@ -1092,7 +1095,10 @@ static int ma35_pinctrl_probe_dt(struct platform_device *pdev, struct ma35_pinct if (!npctl->groups) return -ENOMEM; - for_each_gpiochip_node(dev, child) { + device_for_each_child_node(dev, child) { + if (fwnode_property_present(child, "gpio-controller")) + continue; + ret = ma35_pinctrl_parse_functions(child, npctl, idx++); if (ret) { fwnode_handle_put(child); From 6306e0c5a0d28e9df2b5902f4a021204bee75173 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 12 Jun 2025 14:56:57 +0200 Subject: [PATCH 011/234] clk: scmi: Handle case where child clocks are initialized before their parents The SCMI clock driver currently assumes that parent clocks are always initialized before their children. However, this assumption can fail if a child clock is encountered before its parent during probe. This leads to an issue during initialization of the parent_data array: sclk->parent_data[i].hw = hws[sclk->info->parents[i]]; If the parent clock's hardware structure has not been initialized yet, this assignment results in invalid data. To resolve this, allocate all struct scmi_clk instances as a contiguous array at the beginning of the probe and populate the hws[] array upfront. This ensures that any parent referenced later is already initialized, regardless of the order in which clocks are processed. Note that we can no longer free individual scmi_clk instances if scmi_clk_ops_init() fails which shouldn't be a problem if the SCMI platform has proper per-agent clock discovery. Fixes: 65a8a3dd3b95f ("clk: scmi: Add support for clock {set,get}_parent") Reviewed-by: peng.fan@nxp.com Reviewed-by: Cristian Marussi Reviewed-by: Sudeep Holla Signed-off-by: Sascha Hauer Link: https://lore.kernel.org/r/20250612-clk-scmi-children-parent-fix-v3-1-7de52a27593d@pengutronix.de Signed-off-by: Stephen Boyd --- drivers/clk/clk-scmi.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 15510c2ff21c..1b1561c84127 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -404,6 +404,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; + struct scmi_clk *sclks; if (!handle) return -ENODEV; @@ -430,18 +431,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev) transport_is_atomic = handle->is_transport_atomic(handle, &atomic_threshold_us); - for (idx = 0; idx < count; idx++) { - struct scmi_clk *sclk; - const struct clk_ops *scmi_ops; + sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL); + if (!sclks) + return -ENOMEM; - sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); - if (!sclk) - return -ENOMEM; + for (idx = 0; idx < count; idx++) + hws[idx] = &sclks[idx].hw; + + for (idx = 0; idx < count; idx++) { + struct scmi_clk *sclk = &sclks[idx]; + const struct clk_ops *scmi_ops; sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); - devm_kfree(dev, sclk); + hws[idx] = NULL; continue; } @@ -479,13 +483,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk->parent_data); - devm_kfree(dev, sclk); hws[idx] = NULL; } else { dev_dbg(dev, "Registered clock:%s%s\n", sclk->info->name, scmi_ops->enable ? " (atomic ops)" : ""); - hws[idx] = &sclk->hw; } } From aacc875a448d363332b9df0621dde6d3a225ea9f Mon Sep 17 00:00:00 2001 From: Xiaolei Wang Date: Thu, 19 Jun 2025 14:21:08 +0800 Subject: [PATCH 012/234] clk: imx: Fix an out-of-bounds access in dispmix_csr_clk_dev_data When num_parents is 4, __clk_register() occurs an out-of-bounds when accessing parent_names member. Use ARRAY_SIZE() instead of hardcode number here. BUG: KASAN: global-out-of-bounds in __clk_register+0x1844/0x20d8 Read of size 8 at addr ffff800086988e78 by task kworker/u24:3/59 Hardware name: NXP i.MX95 19X19 board (DT) Workqueue: events_unbound deferred_probe_work_func Call trace: dump_backtrace+0x94/0xec show_stack+0x18/0x24 dump_stack_lvl+0x8c/0xcc print_report+0x398/0x5fc kasan_report+0xd4/0x114 __asan_report_load8_noabort+0x20/0x2c __clk_register+0x1844/0x20d8 clk_hw_register+0x44/0x110 __clk_hw_register_mux+0x284/0x3a8 imx95_bc_probe+0x4f4/0xa70 Fixes: 5224b189462f ("clk: imx: add i.MX95 BLK CTL clk driver") Cc: stable@vger.kernel.org Reviewed-by: Frank Li Signed-off-by: Xiaolei Wang Link: https://lore.kernel.org/r/20250619062108.2016511-1-xiaolei.wang@windriver.com Signed-off-by: Stephen Boyd --- drivers/clk/imx/clk-imx95-blk-ctl.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 25974947ad0c..cc2ee2be1819 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -219,11 +219,15 @@ static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = { .clk_reg_offset = 0, }; +static const char * const disp_engine_parents[] = { + "videopll1", "dsi_pll", "ldb_pll_div7" +}; + static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { [IMX95_CLK_DISPMIX_ENG0_SEL] = { .name = "disp_engine0_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 0, .bit_width = 2, @@ -232,8 +236,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { }, [IMX95_CLK_DISPMIX_ENG1_SEL] = { .name = "disp_engine1_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 2, .bit_width = 2, From f8b53cc9174c5980549f60c972faad82b660b62d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 20 Jun 2025 13:16:07 +0200 Subject: [PATCH 013/234] efi: Fix .data section size calculations when .sbat is present Commit 0f9a1739dd0e ("efi: zboot specific mechanism for embedding SBAT section") neglected to adjust the sizes of the .data section when CONFIG_EFI_SBAT_FILE is set. As the result, the produced PE binary is incorrect and some tools complain about it. E.g. 'sbsign' reports: # sbsign --key my.key --cert my.crt arch/arm64/boot/vmlinuz.efi warning: file-aligned section .data extends beyond end of file warning: checksum areas are greater than image size. Invalid section table? Note, '__data_size' is also used in the PE optional header and it is not entirely clear whether .sbat needs to be accounted as part of SizeOfInitializedData or not. As the header seems to be unused by the real world firmware, keeping the field equal to __data_size. Fixes: 0f9a1739dd0e ("efi: zboot specific mechanism for embedding SBAT section") Reported-by: Heinrich Schuchardt Signed-off-by: Vitaly Kuznetsov Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/libstub/zboot.lds | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index 4b8d5cd3dfa2..367907eb7d86 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -58,6 +58,6 @@ SECTIONS PROVIDE(__efistub__gzdata_size = ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start)); -PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); -PROVIDE(__data_size = ABSOLUTE(_end - _etext)); +PROVIDE(__data_rawsize = ABSOLUTE(_edata - _data)); +PROVIDE(__data_size = ABSOLUTE(_end - _data)); PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat)); From 4580dbef5ce0f95a4bd8ac2d007bc4fbf1539332 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 20 Jun 2025 13:28:08 -0400 Subject: [PATCH 014/234] KVM: TDX: Exit to userspace for SetupEventNotifyInterrupt Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 7 +++++++ arch/x86/include/asm/shared/tdx.h | 1 + arch/x86/kvm/vmx/tdx.c | 23 +++++++++++++++++++++++ include/uapi/linux/kvm.h | 4 ++++ 4 files changed, 35 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 9abf93ee5f65..f0d961436d0f 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7196,6 +7196,10 @@ The valid value for 'flags' is: u64 leaf; u64 r11, r12, r13, r14; } get_tdvmcall_info; + struct { + u64 ret; + u64 vector; + } setup_event_notify; }; } tdx; @@ -7226,6 +7230,9 @@ status of TDVMCALLs. The output values for the given leaf should be placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` field of the union. +* ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to +set up a notification interrupt for vector ``vector``. + KVM may add support for more values in the future that may cause a userspace exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case, it will enter with output fields already valid; in the common case, the diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h index d8525e6ef50a..8bc074c8d7c6 100644 --- a/arch/x86/include/asm/shared/tdx.h +++ b/arch/x86/include/asm/shared/tdx.h @@ -72,6 +72,7 @@ #define TDVMCALL_MAP_GPA 0x10001 #define TDVMCALL_GET_QUOTE 0x10002 #define TDVMCALL_REPORT_FATAL_ERROR 0x10003 +#define TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT 0x10004ULL /* * TDG.VP.VMCALL Status Codes (returned in R10) diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 1ad20c273f3b..b4055a746ecd 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -1530,6 +1530,27 @@ static int tdx_get_quote(struct kvm_vcpu *vcpu) return 0; } +static int tdx_setup_event_notify_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_tdx *tdx = to_tdx(vcpu); + u64 vector = tdx->vp_enter_args.r12; + + if (vector < 32 || vector > 255) { + tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND); + return 1; + } + + vcpu->run->exit_reason = KVM_EXIT_TDX; + vcpu->run->tdx.flags = 0; + vcpu->run->tdx.nr = TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT; + vcpu->run->tdx.setup_event_notify.ret = TDVMCALL_STATUS_SUBFUNC_UNSUPPORTED; + vcpu->run->tdx.setup_event_notify.vector = vector; + + vcpu->arch.complete_userspace_io = tdx_complete_simple; + + return 0; +} + static int handle_tdvmcall(struct kvm_vcpu *vcpu) { switch (tdvmcall_leaf(vcpu)) { @@ -1541,6 +1562,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu) return tdx_get_td_vm_call_info(vcpu); case TDVMCALL_GET_QUOTE: return tdx_get_quote(vcpu); + case TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT: + return tdx_setup_event_notify_interrupt(vcpu); default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 37891580d05d..7a4c35ff03fe 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -467,6 +467,10 @@ struct kvm_run { __u64 leaf; __u64 r11, r12, r13, r14; } get_tdvmcall_info; + struct { + __u64 ret; + __u64 vector; + } setup_event_notify; }; } tdx; /* Fix the size of the union. */ From 28224ef02b56fceee2c161fe2a49a0bb197e44f5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 20 Jun 2025 14:20:20 -0400 Subject: [PATCH 015/234] KVM: TDX: Report supported optional TDVMCALLs in TDX capabilities Allow userspace to advertise TDG.VP.VMCALL subfunctions that the kernel also supports. For each output register of GetTdVmCallInfo's leaf 1, add two fields to KVM_TDX_CAPABILITIES: one for kernel-supported TDVMCALLs (userspace can set those blindly) and one for user-supported TDVMCALLs (userspace can set those if it knows how to handle them). Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/x86/intel-tdx.rst | 15 ++++++++++++++- arch/x86/include/uapi/asm/kvm.h | 8 +++++++- arch/x86/kvm/vmx/tdx.c | 7 +++++++ 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/Documentation/virt/kvm/x86/intel-tdx.rst b/Documentation/virt/kvm/x86/intel-tdx.rst index 76bdd95334d6..5efac62c92c7 100644 --- a/Documentation/virt/kvm/x86/intel-tdx.rst +++ b/Documentation/virt/kvm/x86/intel-tdx.rst @@ -79,7 +79,20 @@ to be configured to the TDX guest. struct kvm_tdx_capabilities { __u64 supported_attrs; __u64 supported_xfam; - __u64 reserved[254]; + + /* TDG.VP.VMCALL hypercalls executed in kernel and forwarded to + * userspace, respectively + */ + __u64 kernel_tdvmcallinfo_1_r11; + __u64 user_tdvmcallinfo_1_r11; + + /* TDG.VP.VMCALL instruction executions subfunctions executed in kernel + * and forwarded to userspace, respectively + */ + __u64 kernel_tdvmcallinfo_1_r12; + __u64 user_tdvmcallinfo_1_r12; + + __u64 reserved[250]; /* Configurable CPUID bits for userspace */ struct kvm_cpuid2 cpuid; diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 6f3499507c5e..0f15d683817d 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -965,7 +965,13 @@ struct kvm_tdx_cmd { struct kvm_tdx_capabilities { __u64 supported_attrs; __u64 supported_xfam; - __u64 reserved[254]; + + __u64 kernel_tdvmcallinfo_1_r11; + __u64 user_tdvmcallinfo_1_r11; + __u64 kernel_tdvmcallinfo_1_r12; + __u64 user_tdvmcallinfo_1_r12; + + __u64 reserved[250]; /* Configurable CPUID bits for userspace */ struct kvm_cpuid2 cpuid; diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index b4055a746ecd..f31ccdeb905b 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -173,6 +173,9 @@ static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char i tdx_clear_unsupported_cpuid(entry); } +#define TDVMCALLINFO_GET_QUOTE BIT(0) +#define TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT BIT(1) + static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf, struct kvm_tdx_capabilities *caps) { @@ -188,6 +191,10 @@ static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf, caps->cpuid.nent = td_conf->num_cpuid_config; + caps->user_tdvmcallinfo_1_r11 = + TDVMCALLINFO_GET_QUOTE | + TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT; + for (i = 0; i < td_conf->num_cpuid_config; i++) td_init_cpuid_entry2(&caps->cpuid.entries[i], i); From a42b4dcc4f9f309a23e6de5ae57a680b9fd2ea10 Mon Sep 17 00:00:00 2001 From: Julien Massot Date: Fri, 16 May 2025 16:12:13 +0200 Subject: [PATCH 016/234] dt-bindings: clock: mediatek: Add #reset-cells property for MT8188 The '#reset-cells' property is permitted for some of the MT8188 clock controllers, but not listed as a valid property. Fixes: 9a5cd59640ac ("dt-bindings: clock: mediatek: Add SMI LARBs reset for MT8188") Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Julien Massot Link: https://lore.kernel.org/r/20250516-dtb-check-mt8188-v2-1-fb60bef1b8e1@collabora.com Acked-by: Conor Dooley Signed-off-by: Stephen Boyd --- .../devicetree/bindings/clock/mediatek,mt8188-clock.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/clock/mediatek,mt8188-clock.yaml b/Documentation/devicetree/bindings/clock/mediatek,mt8188-clock.yaml index 2985c8c717d7..5403242545ab 100644 --- a/Documentation/devicetree/bindings/clock/mediatek,mt8188-clock.yaml +++ b/Documentation/devicetree/bindings/clock/mediatek,mt8188-clock.yaml @@ -52,6 +52,9 @@ properties: '#clock-cells': const: 1 + '#reset-cells': + const: 1 + required: - compatible - reg From ecf371f8b02d5e31b9aa1da7f159f1b2107bdb01 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 2 Jun 2025 15:44:58 -0700 Subject: [PATCH 017/234] KVM: SVM: Reject SEV{-ES} intra host migration if vCPU creation is in-flight Reject migration of SEV{-ES} state if either the source or destination VM is actively creating a vCPU, i.e. if kvm_vm_ioctl_create_vcpu() is in the section between incrementing created_vcpus and online_vcpus. The bulk of vCPU creation runs _outside_ of kvm->lock to allow creating multiple vCPUs in parallel, and so sev_info.es_active can get toggled from false=>true in the destination VM after (or during) svm_vcpu_create(), resulting in an SEV{-ES} VM effectively having a non-SEV{-ES} vCPU. The issue manifests most visibly as a crash when trying to free a vCPU's NULL VMSA page in an SEV-ES VM, but any number of things can go wrong. BUG: unable to handle page fault for address: ffffebde00000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: Oops: 0000 [#1] SMP KASAN NOPTI CPU: 227 UID: 0 PID: 64063 Comm: syz.5.60023 Tainted: G U O 6.15.0-smp-DEV #2 NONE Tainted: [U]=USER, [O]=OOT_MODULE Hardware name: Google, Inc. Arcadia_IT_80/Arcadia_IT_80, BIOS 12.52.0-0 10/28/2024 RIP: 0010:constant_test_bit arch/x86/include/asm/bitops.h:206 [inline] RIP: 0010:arch_test_bit arch/x86/include/asm/bitops.h:238 [inline] RIP: 0010:_test_bit include/asm-generic/bitops/instrumented-non-atomic.h:142 [inline] RIP: 0010:PageHead include/linux/page-flags.h:866 [inline] RIP: 0010:___free_pages+0x3e/0x120 mm/page_alloc.c:5067 Code: <49> f7 06 40 00 00 00 75 05 45 31 ff eb 0c 66 90 4c 89 f0 4c 39 f0 RSP: 0018:ffff8984551978d0 EFLAGS: 00010246 RAX: 0000777f80000001 RBX: 0000000000000000 RCX: ffffffff918aeb98 RDX: 0000000000000000 RSI: 0000000000000008 RDI: ffffebde00000000 RBP: 0000000000000000 R08: ffffebde00000007 R09: 1ffffd7bc0000000 R10: dffffc0000000000 R11: fffff97bc0000001 R12: dffffc0000000000 R13: ffff8983e19751a8 R14: ffffebde00000000 R15: 1ffffd7bc0000000 FS: 0000000000000000(0000) GS:ffff89ee661d3000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffebde00000000 CR3: 000000793ceaa000 CR4: 0000000000350ef0 DR0: 0000000000000000 DR1: 0000000000000b5f DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Call Trace: sev_free_vcpu+0x413/0x630 arch/x86/kvm/svm/sev.c:3169 svm_vcpu_free+0x13a/0x2a0 arch/x86/kvm/svm/svm.c:1515 kvm_arch_vcpu_destroy+0x6a/0x1d0 arch/x86/kvm/x86.c:12396 kvm_vcpu_destroy virt/kvm/kvm_main.c:470 [inline] kvm_destroy_vcpus+0xd1/0x300 virt/kvm/kvm_main.c:490 kvm_arch_destroy_vm+0x636/0x820 arch/x86/kvm/x86.c:12895 kvm_put_kvm+0xb8e/0xfb0 virt/kvm/kvm_main.c:1310 kvm_vm_release+0x48/0x60 virt/kvm/kvm_main.c:1369 __fput+0x3e4/0x9e0 fs/file_table.c:465 task_work_run+0x1a9/0x220 kernel/task_work.c:227 exit_task_work include/linux/task_work.h:40 [inline] do_exit+0x7f0/0x25b0 kernel/exit.c:953 do_group_exit+0x203/0x2d0 kernel/exit.c:1102 get_signal+0x1357/0x1480 kernel/signal.c:3034 arch_do_signal_or_restart+0x40/0x690 arch/x86/kernel/signal.c:337 exit_to_user_mode_loop kernel/entry/common.c:111 [inline] exit_to_user_mode_prepare include/linux/entry-common.h:329 [inline] __syscall_exit_to_user_mode_work kernel/entry/common.c:207 [inline] syscall_exit_to_user_mode+0x67/0xb0 kernel/entry/common.c:218 do_syscall_64+0x7c/0x150 arch/x86/entry/syscall_64.c:100 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7f87a898e969 Modules linked in: gq(O) gsmi: Log Shutdown Reason 0x03 CR2: ffffebde00000000 ---[ end trace 0000000000000000 ]--- Deliberately don't check for a NULL VMSA when freeing the vCPU, as crashing the host is likely desirable due to the VMSA being consumed by hardware. E.g. if KVM manages to allow VMRUN on the vCPU, hardware may read/write a bogus VMSA page. Accessing PFN 0 is "fine"-ish now that it's sequestered away thanks to L1TF, but panicking in this scenario is preferable to potentially running with corrupted state. Reported-by: Alexander Potapenko Tested-by: Alexander Potapenko Fixes: 0b020f5af092 ("KVM: SEV: Add support for SEV-ES intra host migration") Fixes: b56639318bb2 ("KVM: SEV: Add support for SEV intra host migration") Cc: stable@vger.kernel.org Cc: James Houghton Cc: Peter Gonda Reviewed-by: Liam Merwick Tested-by: Liam Merwick Reviewed-by: James Houghton Link: https://lore.kernel.org/r/20250602224459.41505-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 459c3b791fd4..65d1597c3fed 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1971,6 +1971,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src) struct kvm_vcpu *src_vcpu; unsigned long i; + if (src->created_vcpus != atomic_read(&src->online_vcpus) || + dst->created_vcpus != atomic_read(&dst->online_vcpus)) + return -EBUSY; + if (!sev_es_guest(src)) return 0; From 48f15f624189762e7ff2d95bcbb68e21c2d56077 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 2 Jun 2025 15:44:59 -0700 Subject: [PATCH 018/234] KVM: SVM: Initialize vmsa_pa in VMCB to INVALID_PAGE if VMSA page is NULL When creating an SEV-ES vCPU for intra-host migration, set its vmsa_pa to INVALID_PAGE to harden against doing VMRUN with a bogus VMSA (KVM checks for a valid VMSA page in pre_sev_run()). Cc: Tom Lendacky Reviewed-by: Liam Merwick Tested-by: Liam Merwick Link: https://lore.kernel.org/r/20250602224459.41505-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 65d1597c3fed..b201f77fcd49 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4449,8 +4449,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) * the VMSA will be NULL if this vCPU is the destination for intrahost * migration, and will be copied later. */ - if (svm->sev_es.vmsa && !svm->sev_es.snp_has_guest_vmsa) - svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); + if (!svm->sev_es.snp_has_guest_vmsa) { + if (svm->sev_es.vmsa) + svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); + else + svm->vmcb->control.vmsa_pa = INVALID_PAGE; + } if (cpu_feature_enabled(X86_FEATURE_ALLOWED_SEV_FEATURES)) svm->vmcb->control.allowed_sev_features = sev->vmsa_features | From 0b6f4a5f0878c410677a8201c48127fda0bfd843 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 13 Jun 2025 12:39:22 -0700 Subject: [PATCH 019/234] KVM: x86/hyper-v: Use preallocated per-vCPU buffer for de-sparsified vCPU masks Use a preallocated per-vCPU bitmap for tracking the unpacked set of vCPUs being targeted for Hyper-V's paravirt TLB flushing. If KVM_MAX_NR_VCPUS is set to 4096 (which is allowed even for MAXSMP=n builds), putting the vCPU mask on-stack pushes kvm_hv_flush_tlb() past the default FRAME_WARN limit. arch/x86/kvm/hyperv.c:2001:12: error: stack frame size (1288) exceeds limit (1024) in 'kvm_hv_flush_tlb' [-Werror,-Wframe-larger-than] 2001 | static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) | ^ 1 error generated. Note, sparse_banks was given the same treatment by commit 7d5e88d301f8 ("KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks'"), for the exact same reason. Reported-by: Abinash Lalotra Closes: https://lore.kernel.org/all/20250613111023.786265-1-abinashsinghlalotra@gmail.com Link: https://lore.kernel.org/all/aEylI-O8kFnFHrOH@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 7 ++++++- arch/x86/kvm/hyperv.c | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4a391929cdb..ee43615d96d8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -700,8 +700,13 @@ struct kvm_vcpu_hv { struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS]; - /* Preallocated buffer for handling hypercalls passing sparse vCPU set */ + /* + * Preallocated buffers for handling hypercalls that pass sparse vCPU + * sets (for high vCPU counts, they're too large to comfortably fit on + * the stack). + */ u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; + DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); struct hv_vp_assist_page vp_assist_page; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 24f0318c50d7..75221a11e15e 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -2001,11 +2001,11 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + unsigned long *vcpu_mask = hv_vcpu->vcpu_mask; u64 *sparse_banks = hv_vcpu->sparse_banks; struct kvm *kvm = vcpu->kvm; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; - DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; /* * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE' From a7f4dff21fd744d08fa956c243d2b1795f23cbf7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 8 May 2025 13:30:12 -0700 Subject: [PATCH 020/234] KVM: x86/xen: Allow 'out of range' event channel ports in IRQ routing table. To avoid imposing an ordering constraint on userspace, allow 'invalid' event channel targets to be configured in the IRQ routing table. This is the same as accepting interrupts targeted at vCPUs which don't exist yet, which is already the case for both Xen event channels *and* for MSIs (which don't do any filtering of permitted APIC ID targets at all). If userspace actually *triggers* an IRQ with an invalid target, that will fail cleanly, as kvm_xen_set_evtchn_fast() also does the same range check. If KVM enforced that the IRQ target must be valid at the time it is *configured*, that would force userspace to create all vCPUs and do various other parts of setup (in this case, setting the Xen long_mode) before restoring the IRQ table. Cc: stable@vger.kernel.org Signed-off-by: David Woodhouse Reviewed-by: Paul Durrant Link: https://lore.kernel.org/r/e489252745ac4b53f1f7f50570b03fb416aa2065.camel@infradead.org [sean: massage comment] Signed-off-by: Sean Christopherson --- arch/x86/kvm/xen.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 9b029bb29a16..5fa2cca43653 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1971,8 +1971,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, { struct kvm_vcpu *vcpu; - if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) - return -EINVAL; + /* + * Don't check for the port being within range of max_evtchn_port(). + * Userspace can configure what ever targets it likes; events just won't + * be delivered if/while the target is invalid, just like userspace can + * configure MSIs which target non-existent APICs. + * + * This allow on Live Migration and Live Update, the IRQ routing table + * can be restored *independently* of other things like creating vCPUs, + * without imposing an ordering dependency on userspace. In this + * particular case, the problematic ordering would be with setting the + * Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096 + * instead of 1024 event channels. + */ /* We only support 2 level event channels for now */ if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) From 47bb584237cc285e3a860b70c01f7bda9dcfb05b Mon Sep 17 00:00:00 2001 From: Liam Merwick Date: Mon, 9 Jun 2025 09:11:19 +0000 Subject: [PATCH 021/234] KVM: Allow CPU to reschedule while setting per-page memory attributes When running an SEV-SNP guest with a sufficiently large amount of memory (1TB+), the host can experience CPU soft lockups when running an operation in kvm_vm_set_mem_attributes() to set memory attributes on the whole range of guest memory. watchdog: BUG: soft lockup - CPU#8 stuck for 26s! [qemu-kvm:6372] CPU: 8 UID: 0 PID: 6372 Comm: qemu-kvm Kdump: loaded Not tainted 6.15.0-rc7.20250520.el9uek.rc1.x86_64 #1 PREEMPT(voluntary) Hardware name: Oracle Corporation ORACLE SERVER E4-2c/Asm,MB Tray,2U,E4-2c, BIOS 78016600 11/13/2024 RIP: 0010:xas_create+0x78/0x1f0 Code: 00 00 00 41 80 fc 01 0f 84 82 00 00 00 ba 06 00 00 00 bd 06 00 00 00 49 8b 45 08 4d 8d 65 08 41 39 d6 73 20 83 ed 06 48 85 c0 <74> 67 48 89 c2 83 e2 03 48 83 fa 02 75 0c 48 3d 00 10 00 00 0f 87 RSP: 0018:ffffad890a34b940 EFLAGS: 00000286 RAX: ffff96f30b261daa RBX: ffffad890a34b9c8 RCX: 0000000000000000 RDX: 000000000000001e RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000018 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffad890a356868 R13: ffffad890a356860 R14: 0000000000000000 R15: ffffad890a356868 FS: 00007f5578a2a400(0000) GS:ffff97ed317e1000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f015c70fb18 CR3: 00000001109fd006 CR4: 0000000000f70ef0 PKRU: 55555554 Call Trace: xas_store+0x58/0x630 __xa_store+0xa5/0x130 xa_store+0x2c/0x50 kvm_vm_set_mem_attributes+0x343/0x710 [kvm] kvm_vm_ioctl+0x796/0xab0 [kvm] __x64_sys_ioctl+0xa3/0xd0 do_syscall_64+0x8c/0x7a0 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7f5578d031bb Code: ff ff ff 85 c0 79 9b 49 c7 c4 ff ff ff ff 5b 5d 4c 89 e0 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 2d 4c 0f 00 f7 d8 64 89 01 48 RSP: 002b:00007ffe0a742b88 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 000000004020aed2 RCX: 00007f5578d031bb RDX: 00007ffe0a742c80 RSI: 000000004020aed2 RDI: 000000000000000b RBP: 0000010000000000 R08: 0000010000000000 R09: 0000017680000000 R10: 0000000000000080 R11: 0000000000000246 R12: 00005575e5f95120 R13: 00007ffe0a742c80 R14: 0000000000000008 R15: 00005575e5f961e0 While looping through the range of memory setting the attributes, call cond_resched() to give the scheduler a chance to run a higher priority task on the runqueue if necessary and avoid staying in kernel mode long enough to trigger the lockup. Fixes: 5a475554db1e ("KVM: Introduce per-page memory attributes") Cc: stable@vger.kernel.org # 6.12.x Suggested-by: Sean Christopherson Signed-off-by: Liam Merwick Reviewed-by: Pankaj Gupta Link: https://lore.kernel.org/r/20250609091121.2497429-2-liam.merwick@oracle.com Signed-off-by: Sean Christopherson --- virt/kvm/kvm_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index eec82775c5bf..222f0e894a0c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2572,6 +2572,8 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT); if (r) goto out_unlock; + + cond_resched(); } kvm_handle_gfn_range(kvm, &pre_set_range); @@ -2580,6 +2582,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, r = xa_err(xa_store(&kvm->mem_attr_array, i, entry, GFP_KERNEL_ACCOUNT)); KVM_BUG_ON(r, kvm); + cond_resched(); } kvm_handle_gfn_range(kvm, &post_set_range); From 0048ca5e9945f487fc055dad987ee4c7fdc1ed18 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Fri, 20 Jun 2025 14:22:18 +0800 Subject: [PATCH 022/234] KVM: selftests: Add back the missing check of MONITOR/MWAIT availability The revamp of monitor/mwait test missed the original check of feature availability [*]. If MONITOR/MWAIT is not supported or is disabled by IA32_MISC_ENABLE on the host, executing MONITOR or MWAIT instruction from guest doesn't cause monitor/mwait VM exits, but a #UD. [*] https://lore.kernel.org/all/20240411210237.34646-1-zide.chen@intel.com/ Reported-by: Xuelian Guo Fixes: 80fd663590cf ("selftests: kvm: revamp MONITOR/MWAIT tests") Signed-off-by: Chenyi Qiang Link: https://lore.kernel.org/r/20250620062219.342930-1-chenyi.qiang@intel.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/monitor_mwait_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c index 390ae2d87493..0eb371c62ab8 100644 --- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c @@ -74,6 +74,7 @@ int main(int argc, char *argv[]) int testcase; char test[80]; + TEST_REQUIRE(this_cpu_has(X86_FEATURE_MWAIT)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2)); ksft_print_header(); From 0c84b534047dcf843f4344108bc3f2c2344b7e31 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 25 Jun 2025 09:48:29 +0800 Subject: [PATCH 023/234] Documentation: KVM: Fix unexpected unindent warnings Add proper indentations to bullet list items to resolve the warning: "Bullet list ends without a blank line; unexpected unindent." Closes:https://lore.kernel.org/kvm/20250623162110.6e2f4241@canb.auug.org.au/ Fixes: cf207eac06f6 ("KVM: TDX: Handle TDG.VP.VMCALL") Fixes: 25e8b1dd4883 ("KVM: TDX: Exit to userspace for GetTdVmCallInfo") Reported-by: Stephen Rothwell Signed-off-by: Binbin Wu Reviewed-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20250625014829.82289-1-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson --- Documentation/virt/kvm/api.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index f0d961436d0f..43ed57e048a8 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7214,21 +7214,21 @@ number from register R11. The remaining field of the union provide the inputs and outputs of the TDVMCALL. Currently the following values of ``nr`` are defined: -* ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote -signed by a service hosting TD-Quoting Enclave operating on the host. -Parameters and return value are in the ``get_quote`` field of the union. -The ``gpa`` field and ``size`` specify the guest physical address -(without the shared bit set) and the size of a shared-memory buffer, in -which the TDX guest passes a TD Report. The ``ret`` field represents -the return value of the GetQuote request. When the request has been -queued successfully, the TDX guest can poll the status field in the -shared-memory area to check whether the Quote generation is completed or -not. When completed, the generated Quote is returned via the same buffer. + * ``TDVMCALL_GET_QUOTE``: the guest has requested to generate a TD-Quote + signed by a service hosting TD-Quoting Enclave operating on the host. + Parameters and return value are in the ``get_quote`` field of the union. + The ``gpa`` field and ``size`` specify the guest physical address + (without the shared bit set) and the size of a shared-memory buffer, in + which the TDX guest passes a TD Report. The ``ret`` field represents + the return value of the GetQuote request. When the request has been + queued successfully, the TDX guest can poll the status field in the + shared-memory area to check whether the Quote generation is completed or + not. When completed, the generated Quote is returned via the same buffer. -* ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support -status of TDVMCALLs. The output values for the given leaf should be -placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` -field of the union. + * ``TDVMCALL_GET_TD_VM_CALL_INFO``: the guest has requested the support + status of TDVMCALLs. The output values for the given leaf should be + placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info`` + field of the union. * ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to set up a notification interrupt for vector ``vector``. From 51a4273dcab39dd1e850870945ccec664352d383 Mon Sep 17 00:00:00 2001 From: Nikunj A Dadhania Date: Tue, 8 Apr 2025 15:02:11 +0530 Subject: [PATCH 024/234] KVM: SVM: Add missing member in SNP_LAUNCH_START command structure The sev_data_snp_launch_start structure should include a 4-byte desired_tsc_khz field before the gosvw field, which was missed in the initial implementation. As a result, the structure is 4 bytes shorter than expected by the firmware, causing the gosvw field to start 4 bytes early. Fix this by adding the missing 4-byte member for the desired TSC frequency. Fixes: 3a45dc2b419e ("crypto: ccp: Define the SEV-SNP commands") Cc: stable@vger.kernel.org Suggested-by: Tom Lendacky Reviewed-by: Tom Lendacky Tested-by: Vaishali Thakkar Signed-off-by: Nikunj A Dadhania Link: https://lore.kernel.org/r/20250408093213.57962-3-nikunj@amd.com Signed-off-by: Sean Christopherson --- include/linux/psp-sev.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 0b3a36bdaa90..0f5f94137f6d 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -594,6 +594,7 @@ struct sev_data_snp_addr { * @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the * purpose of guest-assisted migration. * @rsvd: reserved + * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest * @gosvw: guest OS-visible workarounds, as defined by hypervisor */ struct sev_data_snp_launch_start { @@ -603,6 +604,7 @@ struct sev_data_snp_launch_start { u32 ma_en:1; /* In */ u32 imi_en:1; /* In */ u32 rsvd:30; + u32 desired_tsc_khz; /* In */ u8 gosvw[16]; /* In */ } __packed; From fa787ac07b3ceb56dd88a62d1866038498e96230 Mon Sep 17 00:00:00 2001 From: Manuel Andreas Date: Wed, 25 Jun 2025 15:53:19 +0200 Subject: [PATCH 025/234] KVM: x86/hyper-v: Skip non-canonical addresses during PV TLB flush In KVM guests with Hyper-V hypercalls enabled, the hypercalls HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST and HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX allow a guest to request invalidation of portions of a virtual TLB. For this, the hypercall parameter includes a list of GVAs that are supposed to be invalidated. However, when non-canonical GVAs are passed, there is currently no filtering in place and they are eventually passed to checked invocations of INVVPID on Intel / INVLPGA on AMD. While AMD's INVLPGA silently ignores non-canonical addresses (effectively a no-op), Intel's INVVPID explicitly signals VM-Fail and ultimately triggers the WARN_ONCE in invvpid_error(): invvpid failed: ext=0x0 vpid=1 gva=0xaaaaaaaaaaaaa000 WARNING: CPU: 6 PID: 326 at arch/x86/kvm/vmx/vmx.c:482 invvpid_error+0x91/0xa0 [kvm_intel] Modules linked in: kvm_intel kvm 9pnet_virtio irqbypass fuse CPU: 6 UID: 0 PID: 326 Comm: kvm-vm Not tainted 6.15.0 #14 PREEMPT(voluntary) RIP: 0010:invvpid_error+0x91/0xa0 [kvm_intel] Call Trace: vmx_flush_tlb_gva+0x320/0x490 [kvm_intel] kvm_hv_vcpu_flush_tlb+0x24f/0x4f0 [kvm] kvm_arch_vcpu_ioctl_run+0x3013/0x5810 [kvm] Hyper-V documents that invalid GVAs (those that are beyond a partition's GVA space) are to be ignored. While not completely clear whether this ruling also applies to non-canonical GVAs, it is likely fine to make that assumption, and manual testing on Azure confirms "real" Hyper-V interprets the specification in the same way. Skip non-canonical GVAs when processing the list of address to avoid tripping the INVVPID failure. Alternatively, KVM could filter out "bad" GVAs before inserting into the FIFO, but practically speaking the only downside of pushing validation to the final processing is that doing so is suboptimal for the guest, and no well-behaved guest will request TLB flushes for non-canonical addresses. Fixes: 260970862c88 ("KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently") Cc: stable@vger.kernel.org Signed-off-by: Manuel Andreas Suggested-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/c090efb3-ef82-499f-a5e0-360fc8420fb7@tum.de Signed-off-by: Sean Christopherson --- arch/x86/kvm/hyperv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 75221a11e15e..ee27064dd72f 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1979,6 +1979,9 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) goto out_flush_all; + if (is_noncanonical_invlpg_address(entries[i], vcpu)) + continue; + /* * Lower 12 bits of 'address' encode the number of additional * pages to flush. From af040a9a296044fd4b748786c2516f172a7617f1 Mon Sep 17 00:00:00 2001 From: Wei-Lin Chang Date: Wed, 25 Jun 2025 16:47:09 +0800 Subject: [PATCH 026/234] KVM: arm64: nv: Fix MI line level calculation in vgic_v3_nested_update_mi() The state of the vcpu's MI line should be asserted when its ICH_HCR_EL2.En is set and ICH_MISR_EL2 is non-zero. Using bitwise AND (&=) directly for this calculation will not give us the correct result when the LSB of the vcpu's ICH_MISR_EL2 isn't set. Correct this by directly computing the line level with a logical AND operation. Signed-off-by: Wei-Lin Chang Link: https://lore.kernel.org/r/20250625084709.3968844-1-r09922117@csie.ntu.edu.tw [maz: drop the level check from the original code] Signed-off-by: Marc Zyngier --- arch/arm64/kvm/vgic/vgic-v3-nested.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c index a50fb7e6841f..679aafe77de2 100644 --- a/arch/arm64/kvm/vgic/vgic-v3-nested.c +++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c @@ -401,9 +401,7 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu) { bool level; - level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En; - if (level) - level &= vgic_v3_get_misr(vcpu); + level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu); kvm_vgic_inject_irq(vcpu->kvm, vcpu, vcpu->kvm->arch.vgic.mi_intid, level, vcpu); } From e728e705802fec20f65d974a5d5eb91217ac618d Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 25 Jun 2025 10:55:48 +0000 Subject: [PATCH 027/234] KVM: arm64: Adjust range correctly during host stage-2 faults host_stage2_adjust_range() tries to find the largest block mapping that fits within a memory or mmio region (represented by a kvm_mem_range in this function) during host stage-2 faults under pKVM. To do so, it walks the host stage-2 page-table, finds the faulting PTE and its level, and then progressively increments the level until it finds a granule of the appropriate size. However, the condition in the loop implementing the above is broken as it checks kvm_level_supports_block_mapping() for the next level instead of the current, so pKVM may attempt to map a region larger than can be covered with a single block. This is not a security problem and is quite rare in practice (the kvm_mem_range check usually forces host_stage2_adjust_range() to choose a smaller granule), but this is clearly not the expected behaviour. Refactor the loop to fix the bug and improve readability. Fixes: c4f0935e4d95 ("KVM: arm64: Optimize host memory aborts") Signed-off-by: Quentin Perret Link: https://lore.kernel.org/r/20250625105548.984572-1-qperret@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 95d7534c9679..8957734d6183 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -479,6 +479,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) { struct kvm_mem_range cur; kvm_pte_t pte; + u64 granule; s8 level; int ret; @@ -496,18 +497,21 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) return -EPERM; } - do { - u64 granule = kvm_granule_size(level); + for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) { + if (!kvm_level_supports_block_mapping(level)) + continue; + granule = kvm_granule_size(level); cur.start = ALIGN_DOWN(addr, granule); cur.end = cur.start + granule; - level++; - } while ((level <= KVM_PGTABLE_LAST_LEVEL) && - !(kvm_level_supports_block_mapping(level) && - range_included(&cur, range))); + if (!range_included(&cur, range)) + continue; + *range = cur; + return 0; + } - *range = cur; + WARN_ON(1); - return 0; + return -EINVAL; } int host_stage2_idmap_locked(phys_addr_t addr, u64 size, From 9a2b9416fd1d18d97ce1b737a11fcbc521140e5d Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Wed, 25 Jun 2025 12:30:58 +0000 Subject: [PATCH 028/234] KVM: arm64: Fix error path in init_hyp_mode() In the unlikely case pKVM failed to allocate carveout, the error path tries to access NULL ptr when it de-reference the SVE state from the uninitialized nVHE per-cpu base. [ 1.575420] pstate: 61400005 (nZCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--) [ 1.576010] pc : teardown_hyp_mode+0xe4/0x180 [ 1.576920] lr : teardown_hyp_mode+0xd0/0x180 [ 1.577308] sp : ffff8000826fb9d0 [ 1.577600] x29: ffff8000826fb9d0 x28: 0000000000000000 x27: ffff80008209b000 [ 1.578383] x26: ffff800081dde000 x25: ffff8000820493c0 x24: ffff80008209eb00 [ 1.579180] x23: 0000000000000040 x22: 0000000000000001 x21: 0000000000000000 [ 1.579881] x20: 0000000000000002 x19: ffff800081d540b8 x18: 0000000000000000 [ 1.580544] x17: ffff800081205230 x16: 0000000000000152 x15: 00000000fffffff8 [ 1.581183] x14: 0000000000000008 x13: fff00000ff7f6880 x12: 000000000000003e [ 1.581813] x11: 0000000000000002 x10: 00000000000000ff x9 : 0000000000000000 [ 1.582503] x8 : 0000000000000000 x7 : 7f7f7f7f7f7f7f7f x6 : 43485e525851ff30 [ 1.583140] x5 : fff00000ff6e9030 x4 : fff00000ff6e8f80 x3 : 0000000000000000 [ 1.583780] x2 : 0000000000000000 x1 : 0000000000000002 x0 : 0000000000000000 [ 1.584526] Call trace: [ 1.584945] teardown_hyp_mode+0xe4/0x180 (P) [ 1.585578] init_hyp_mode+0x920/0x994 [ 1.586005] kvm_arm_init+0xb4/0x25c [ 1.586387] do_one_initcall+0xe0/0x258 [ 1.586819] do_initcall_level+0xa0/0xd4 [ 1.587224] do_initcalls+0x54/0x94 [ 1.587606] do_basic_setup+0x1c/0x28 [ 1.587998] kernel_init_freeable+0xc8/0x130 [ 1.588409] kernel_init+0x20/0x1a4 [ 1.588768] ret_from_fork+0x10/0x20 [ 1.589568] Code: f875db48 8b1c0109 f100011f 9a8903e8 (f9463100) [ 1.590332] ---[ end trace 0000000000000000 ]--- As Quentin pointed, the order of free is also wrong, we need to free SVE state first before freeing the per CPU ptrs. I initially observed this on 6.12, but I could also repro in master. Signed-off-by: Mostafa Saleh Fixes: 66d5b53e20a6 ("KVM: arm64: Allocate memory mapped at hyp for host sve state in pKVM") Reviewed-by: Quentin Perret Link: https://lore.kernel.org/r/20250625123058.875179-1-smostafa@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 38a91bb5d4c7..6bdf79bc5d95 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2346,7 +2346,9 @@ static void __init teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); - free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + + if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]) + continue; if (free_sve) { struct cpu_sve_state *sve_state; @@ -2354,6 +2356,9 @@ static void __init teardown_hyp_mode(void) sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); } + + free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + } } From 0e02219f9cf4f0c0aa3dbf3c820e6612bf3f0c8c Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Thu, 26 Jun 2025 10:10:14 +0000 Subject: [PATCH 029/234] KVM: arm64: Don't free hyp pages with pKVM on GICv2 Marc reported that enabling protected mode on a device with GICv2 doesn't fail gracefully as one would expect, and leads to a host kernel crash. As it turns out, the first half of pKVM init happens before the vgic probe, and so by the time we find out we have a GICv2 we're already committed to keeping the pKVM vectors installed at EL2 -- pKVM rejects stub HVCs for obvious security reasons. However, the error path on KVM init leads to teardown_hyp_mode() which unconditionally frees hypervisor allocations (including the EL2 stacks and per-cpu pages) under the assumption that a previous cpu_hyp_uninit() execution has reset the vectors back to the stubs, which is false with pKVM. Interestingly, host stage-2 protection is not enabled yet at this point, so this use-after-free may go unnoticed for a while. The issue becomes more obvious after the finalize_pkvm() call. Fix this by keeping track of the CPUs on which pKVM is initialized in the kvm_hyp_initialized per-cpu variable, and use it from teardown_hyp_mode() to skip freeing pages that are in fact used. Fixes: a770ee80e662 ("KVM: arm64: pkvm: Disable GICv2 support") Reported-by: Marc Zyngier Signed-off-by: Quentin Perret Link: https://lore.kernel.org/r/20250626101014.1519345-1-qperret@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 6bdf79bc5d95..b223d21c063c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2129,7 +2129,7 @@ static void cpu_hyp_init(void *discard) static void cpu_hyp_uninit(void *discard) { - if (__this_cpu_read(kvm_hyp_initialized)) { + if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) { cpu_hyp_reset(); __this_cpu_write(kvm_hyp_initialized, 0); } @@ -2345,6 +2345,9 @@ static void __init teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { + if (per_cpu(kvm_hyp_initialized, cpu)) + continue; + free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]) From e0a911ac86857a73182edde9e50d9b4b949b7f01 Mon Sep 17 00:00:00 2001 From: Daniel Dadap Date: Thu, 26 Jun 2025 16:16:30 -0500 Subject: [PATCH 030/234] ALSA: hda: Add missing NVIDIA HDA codec IDs Add codec IDs for several NVIDIA products with HDA controllers to the snd_hda_id_hdmi[] patch table. Signed-off-by: Daniel Dadap Cc: Link: https://patch.msgid.link/aF24rqwMKFWoHu12@ddadap-lakeline.nvidia.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_hdmi.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 08308231b4ed..9a7793eb16e9 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4551,7 +4551,9 @@ HDA_CODEC_ENTRY(0x10de002e, "Tegra186 HDMI/DP1", patch_tegra_hdmi), HDA_CODEC_ENTRY(0x10de002f, "Tegra194 HDMI/DP2", patch_tegra_hdmi), HDA_CODEC_ENTRY(0x10de0030, "Tegra194 HDMI/DP3", patch_tegra_hdmi), HDA_CODEC_ENTRY(0x10de0031, "Tegra234 HDMI/DP", patch_tegra234_hdmi), +HDA_CODEC_ENTRY(0x10de0033, "SoC 33 HDMI/DP", patch_tegra234_hdmi), HDA_CODEC_ENTRY(0x10de0034, "Tegra264 HDMI/DP", patch_tegra234_hdmi), +HDA_CODEC_ENTRY(0x10de0035, "SoC 35 HDMI/DP", patch_tegra234_hdmi), HDA_CODEC_ENTRY(0x10de0040, "GPU 40 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi), @@ -4590,15 +4592,32 @@ HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009b, "GPU 9b HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009c, "GPU 9c HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a1, "GPU a1 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a8, "GPU a8 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a9, "GPU a9 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00aa, "GPU aa HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00ab, "GPU ab HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00ad, "GPU ad HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00ae, "GPU ae HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00af, "GPU af HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00b0, "GPU b0 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00b1, "GPU b1 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00c0, "GPU c0 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00c1, "GPU c1 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00c3, "GPU c3 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00c4, "GPU c4 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00c5, "GPU c5 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP", patch_gf_hdmi), From ce174b48aebb3fa18fe48c59a3d10a89c414fdf2 Mon Sep 17 00:00:00 2001 From: Edip Hazuri Date: Fri, 27 Jun 2025 23:34:16 +0300 Subject: [PATCH 031/234] ALSA: hda/realtek - Add mute LED support for HP Victus 15-fb2xxx The mute led on this laptop is using ALC245 but requires a quirk to work This patch enables the existing quirk for the device. Tested on my friend's Victus 15-fb2xxx Laptop. The LED behaviour works as intended. Cc: Signed-off-by: Edip Hazuri Link: https://patch.msgid.link/20250627203415.56785-2-edip@medip.dev Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5d6d01ecfee2..a33e8a6541d4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10881,6 +10881,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d07, "HP Victus 15-fb2xxx (MB 8D07)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8d18, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS), SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), From cbe876121633dadb2b0ce52711985328638e9aab Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Wed, 25 Jun 2025 10:05:04 +0800 Subject: [PATCH 032/234] ASoC: fsl_asrc: use internal measured ratio for non-ideal ratio mode When USRC=0, there is underrun issue for the non-ideal ratio mode; according to the reference mannual, the internal measured ratio can be used with USRC=1 and IDRC=0. Fixes: d0250cf4f2ab ("ASoC: fsl_asrc: Add an option to select internal ratio mode") Signed-off-by: Shengjiu Wang Reviewed-by: Daniel Baluta Link: https://patch.msgid.link/20250625020504.2728161-1-shengjiu.wang@nxp.com Signed-off-by: Mark Brown --- sound/soc/fsl/fsl_asrc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 677529916dc0..745532ccbdba 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -517,7 +517,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ATSi_MASK(index), ASRCTR_ATS(index)); regmap_update_bits(asrc->regmap, REG_ASRCTR, - ASRCTR_USRi_MASK(index), 0); + ASRCTR_IDRi_MASK(index) | ASRCTR_USRi_MASK(index), + ASRCTR_USR(index)); /* Set the input and output clock sources */ regmap_update_bits(asrc->regmap, REG_ASRCSR, From 960aed31eedbaeb2e47b1bc485b462fd38a53311 Mon Sep 17 00:00:00 2001 From: Bard Liao Date: Thu, 26 Jun 2025 14:44:20 +0800 Subject: [PATCH 033/234] ASoC: Intel: SND_SOC_INTEL_SOF_BOARD_HELPERS select SND_SOC_ACPI_INTEL_MATCH MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The helpers that are provided by SND_SOC_ACPI_INTEL_MATCH (soc-acpi-intel-ssp-common) are used in SND_SOC_INTEL_SOF_BOARD_HELPERS (sof_board_helpers). SND_SOC_ACPI_INTEL_MATCH is selected by machine drivers. When skl_hda_dsp_generic uses the board helpers, it select SND_SOC_INTEL_SOF_BOARD_HELPERS only but not SND_SOC_ACPI_INTEL_MATCH which initroduce the undefined symbol errors. However, it makes more sense that SND_SOC_INTEL_SOF_BOARD_HELPERS select SND_SOC_ACPI_INTEL_MATCH itself. Fixes: b28b23dea314 ("ASoC: Intel: skl_hda_dsp_generic: use common module for DAI links") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202506141543.dN0JJyZC-lkp@intel.com/ Signed-off-by: Bard Liao Reviewed-by: Péter Ujfalusi Reviewed-by: Liam Girdwood Link: https://patch.msgid.link/20250626064420.450334-1-yung-chuan.liao@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/intel/boards/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig index 2df7afa2f469..128b6876af83 100644 --- a/sound/soc/intel/boards/Kconfig +++ b/sound/soc/intel/boards/Kconfig @@ -42,6 +42,7 @@ config SND_SOC_INTEL_SOF_NUVOTON_COMMON tristate config SND_SOC_INTEL_SOF_BOARD_HELPERS + select SND_SOC_ACPI_INTEL_MATCH tristate if SND_SOC_INTEL_CATPT From dc78f7e59169d3f0e6c3c95d23dc8e55e95741e2 Mon Sep 17 00:00:00 2001 From: Arun Raghavan Date: Thu, 26 Jun 2025 09:08:25 -0400 Subject: [PATCH 034/234] ASoC: fsl_sai: Force a software reset when starting in consumer mode On an imx8mm platform with an external clock provider, when running the receiver (arecord) and triggering an xrun with xrun_injection, we see a channel swap/offset. This happens sometimes when running only the receiver, but occurs reliably if a transmitter (aplay) is also concurrently running. It seems that the SAI loses track of frame sync during the trigger stop -> trigger start cycle that occurs during an xrun. Doing just a FIFO reset in this case does not suffice, and only a software reset seems to get it back on track. This looks like the same h/w bug that is already handled for the producer case, so we now do the reset unconditionally on config disable. Signed-off-by: Arun Raghavan Reported-by: Pieterjan Camerlynck Fixes: 3e3f8bd56955 ("ASoC: fsl_sai: fix no frame clk in master mode") Cc: stable@vger.kernel.org Reviewed-by: Fabio Estevam Link: https://patch.msgid.link/20250626130858.163825-1-arun@arunraghavan.net Signed-off-by: Mark Brown --- sound/soc/fsl/fsl_sai.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index af1a168d35e3..50af6b725670 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -803,13 +803,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir) * anymore. Add software reset to fix this issue. * This is a hardware bug, and will be fix in the * next sai version. + * + * In consumer mode, this can happen even after a + * single open/close, especially if both tx and rx + * are running concurrently. */ - if (!sai->is_consumer_mode[tx]) { - /* Software Reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); - /* Clear SR bit to finish the reset */ - regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); - } + /* Software Reset */ + regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR); + /* Clear SR bit to finish the reset */ + regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0); } static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, From be2e1a63448b35bd6736b5934f7720534649b51e Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Jun 2025 17:03:29 +0100 Subject: [PATCH 035/234] MAINTAINERS: update Qualcomm audio codec drivers list Some of the codec drivers like wcd939x are missing in the MAINTAINERS which is resulting in incorrect list from get_maintainers script. Fix this by using wildcard matching on both wcd93* and wsa88* codec and bindings. Signed-off-by: Srinivas Kandagatla Link: https://patch.msgid.link/20250627160329.442795-1-srinivas.kandagatla@oss.qualcomm.com Signed-off-by: Mark Brown --- MAINTAINERS | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 4bac4ea21b64..5bfcaf0732ed 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20153,21 +20153,15 @@ S: Supported F: Documentation/devicetree/bindings/soc/qcom/qcom,apr* F: Documentation/devicetree/bindings/sound/qcom,* F: drivers/soc/qcom/apr.c -F: include/dt-bindings/sound/qcom,wcd9335.h -F: include/dt-bindings/sound/qcom,wcd934x.h -F: sound/soc/codecs/lpass-rx-macro.* -F: sound/soc/codecs/lpass-tx-macro.* -F: sound/soc/codecs/lpass-va-macro.c -F: sound/soc/codecs/lpass-wsa-macro.* +F: drivers/soundwire/qcom.c +F: include/dt-bindings/sound/qcom,wcd93* +F: sound/soc/codecs/lpass-*.* F: sound/soc/codecs/msm8916-wcd-analog.c F: sound/soc/codecs/msm8916-wcd-digital.c F: sound/soc/codecs/wcd-clsh-v2.* F: sound/soc/codecs/wcd-mbhc-v2.* -F: sound/soc/codecs/wcd9335.* -F: sound/soc/codecs/wcd934x.c -F: sound/soc/codecs/wsa881x.c -F: sound/soc/codecs/wsa883x.c -F: sound/soc/codecs/wsa884x.c +F: sound/soc/codecs/wcd93*.* +F: sound/soc/codecs/wsa88*.* F: sound/soc/qcom/ QCOM EMBEDDED USB DEBUGGER (EUD) From a7528e9beadbddcec21b394ce5fa8dc4e5cdaa24 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 26 Jun 2025 15:18:41 +0100 Subject: [PATCH 036/234] ASoC: Intel: soc-acpi: arl: Correct order of cs42l43 matches Matches should go from more specific to less specific, correct the ordering of two cs42l43 entries. Fixes: c0524067653d ("ASoC: Intel: soc-acpi: arl: Add match entries for new cs42l43 laptops") Signed-off-by: Charles Keepax Link: https://patch.msgid.link/20250626141841.77780-1-ckeepax@opensource.cirrus.com Signed-off-by: Mark Brown --- sound/soc/intel/common/soc-acpi-intel-arl-match.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c index 73e581e93755..1ad704ca2c5f 100644 --- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c @@ -467,13 +467,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = { .sof_tplg_filename = "sof-arl-cs42l43-l0.tplg", .get_function_tplg_files = sof_sdw_get_tplg_files, }, - { - .link_mask = BIT(2), - .links = arl_cs42l43_l2, - .drv_name = "sof_sdw", - .sof_tplg_filename = "sof-arl-cs42l43-l2.tplg", - .get_function_tplg_files = sof_sdw_get_tplg_files, - }, { .link_mask = BIT(2) | BIT(3), .links = arl_cs42l43_l2_cs35l56_l3, @@ -481,6 +474,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = { .sof_tplg_filename = "sof-arl-cs42l43-l2-cs35l56-l3.tplg", .get_function_tplg_files = sof_sdw_get_tplg_files, }, + { + .link_mask = BIT(2), + .links = arl_cs42l43_l2, + .drv_name = "sof_sdw", + .sof_tplg_filename = "sof-arl-cs42l43-l2.tplg", + .get_function_tplg_files = sof_sdw_get_tplg_files, + }, { .link_mask = 0x1, /* link0 required */ .links = arl_rvp, From 05f254a6369ac020fc0382a7cbd3ef64ad997c92 Mon Sep 17 00:00:00 2001 From: Alexander Tsoy Date: Mon, 30 Jun 2025 04:33:57 +0300 Subject: [PATCH 037/234] ALSA: usb-audio: Improve filtering of sample rates on Focusrite devices Previously we were filtering out only upper unsupported sampling rates. This patch adds filtering of the lower unsupported sampling rates. As a result there is 1:1 mapping between altsetting and supported rates. The issue was found on a Scarlett 3rd Gen card (see linked bug), but the same filtering is likely needed for the Scarlett 1st and 2nd Gen as well as the older Clarett cards which lacks Valid Alternate Setting Control. Patch was not tested on a real hardware. Link: https://bugzilla.kernel.org/show_bug.cgi?id=214493 Signed-off-by: Alexander Tsoy Link: https://patch.msgid.link/20250630013357.1327420-1-alexander@tsoy.me Signed-off-by: Takashi Iwai --- sound/usb/format.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/sound/usb/format.c b/sound/usb/format.c index 8cd54f7bf33a..0ee532acbb60 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -310,16 +310,14 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, struct audioformat *fp, unsigned int rate) { - struct usb_interface *iface; struct usb_host_interface *alts; unsigned char *fmt; unsigned int max_rate; - iface = usb_ifnum_to_if(chip->dev, fp->iface); - if (!iface) + alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting); + if (!alts) return true; - alts = &iface->altsetting[fp->altset_idx]; fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE); if (!fmt) @@ -328,20 +326,20 @@ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, if (fmt[0] == 10) { /* bLength */ max_rate = combine_quad(&fmt[6]); - /* Validate max rate */ - if (max_rate != 48000 && - max_rate != 96000 && - max_rate != 192000 && - max_rate != 384000) { - + switch (max_rate) { + case 48000: + return (rate == 44100 || rate == 48000); + case 96000: + return (rate == 88200 || rate == 96000); + case 192000: + return (rate == 176400 || rate == 192000); + default: usb_audio_info(chip, "%u:%d : unexpected max rate: %u\n", fp->iface, fp->altsetting, max_rate); return true; } - - return rate <= max_rate; } return true; From 1fe44a86ff0ff483aa1f1332f2b08f431fa51ce8 Mon Sep 17 00:00:00 2001 From: Lachlan Hodges Date: Thu, 26 Jun 2025 21:51:18 +1000 Subject: [PATCH 038/234] wifi: cfg80211: fix S1G beacon head validation in nl80211 S1G beacons contain fixed length optional fields that precede the variable length elements, ensure we take this into account when validating the beacon. This particular case was missed in 1e1f706fc2ce ("wifi: cfg80211/mac80211: correctly parse S1G beacon optional elements"). Fixes: 1d47f1198d58 ("nl80211: correctly validate S1G beacon head") Signed-off-by: Lachlan Hodges Link: https://patch.msgid.link/20250626115118.68660-1-lachlan.hodges@morsemicro.com [shorten/reword subject] Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 85f139016da2..50202d170f3a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -229,6 +229,7 @@ static int validate_beacon_head(const struct nlattr *attr, unsigned int len = nla_len(attr); const struct element *elem; const struct ieee80211_mgmt *mgmt = (void *)data; + const struct ieee80211_ext *ext; unsigned int fixedlen, hdrlen; bool s1g_bcn; @@ -237,8 +238,10 @@ static int validate_beacon_head(const struct nlattr *attr, s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control); if (s1g_bcn) { - fixedlen = offsetof(struct ieee80211_ext, - u.s1g_beacon.variable); + ext = (struct ieee80211_ext *)mgmt; + fixedlen = + offsetof(struct ieee80211_ext, u.s1g_beacon.variable) + + ieee80211_s1g_optional_len(ext->frame_control); hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon); } else { fixedlen = offsetof(struct ieee80211_mgmt, From 74b1ec9f5d627d2bdd5e5b6f3f81c23317657023 Mon Sep 17 00:00:00 2001 From: Daniil Dulov Date: Thu, 26 Jun 2025 14:46:19 +0300 Subject: [PATCH 039/234] wifi: zd1211rw: Fix potential NULL pointer dereference in zd_mac_tx_to_dev() There is a potential NULL pointer dereference in zd_mac_tx_to_dev(). For example, the following is possible: T0 T1 zd_mac_tx_to_dev() /* len == skb_queue_len(q) */ while (len > ZD_MAC_MAX_ACK_WAITERS) { filter_ack() spin_lock_irqsave(&q->lock, flags); /* position == skb_queue_len(q) */ for (i=1; itype == NL80211_IFTYPE_AP) skb = __skb_dequeue(q); spin_unlock_irqrestore(&q->lock, flags); skb_dequeue() -> NULL Since there is a small gap between checking skb queue length and skb being unconditionally dequeued in zd_mac_tx_to_dev(), skb_dequeue() can return NULL. Then the pointer is passed to zd_mac_tx_status() where it is dereferenced. In order to avoid potential NULL pointer dereference due to situations like above, check if skb is not NULL before passing it to zd_mac_tx_status(). Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 459c51ad6e1f ("zd1211rw: port to mac80211") Signed-off-by: Daniil Dulov Link: https://patch.msgid.link/20250626114619.172631-1-d.dulov@aladdin.ru Signed-off-by: Johannes Berg --- drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index 9653dbaac3c0..781510a3ec6d 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error) skb_queue_tail(q, skb); while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { - zd_mac_tx_status(hw, skb_dequeue(q), + skb = skb_dequeue(q); + if (!skb) + break; + + zd_mac_tx_status(hw, skb, mac->ack_pending ? mac->ack_signal : 0, NULL); mac->ack_pending = 0; From 643c0c9d0496c5727ca28cd841d069b5afee06cf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 25 Jun 2025 12:18:05 +0100 Subject: [PATCH 040/234] PCI: apple: Add tracking of probed root ports The apple driver relies on being able to directly find the matching root port structure from the platform device that represents this port. A previous hack stashed a pointer to the root port structure in the config window private pointer, but that ended up relying on assumptions that break other drivers. Instead, bite the bullet and track the association as part of the driver itself as a list of probed root ports. Signed-off-by: Marc Zyngier Signed-off-by: Bjorn Helgaas Reviewed-by: Geert Uytterhoeven Link: https://patch.msgid.link/20250625111806.4153773-3-maz@kernel.org --- drivers/pci/controller/pcie-apple.c | 53 ++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 77fe73976654..0380d300adca 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -187,6 +187,7 @@ struct apple_pcie { const struct hw_info *hw; unsigned long *bitmap; struct list_head ports; + struct list_head entry; struct completion event; struct irq_fwspec fwspec; u32 nvecs; @@ -205,6 +206,9 @@ struct apple_pcie_port { int idx; }; +static LIST_HEAD(pcie_list); +static DEFINE_MUTEX(pcie_list_lock); + static void rmw_set(u32 set, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) | set, addr); @@ -720,13 +724,45 @@ static int apple_msi_init(struct apple_pcie *pcie) return 0; } +static void apple_pcie_register(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_add_tail(&pcie->entry, &pcie_list); +} + +static void apple_pcie_unregister(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_del(&pcie->entry); +} + +static struct apple_pcie *apple_pcie_lookup(struct device *dev) +{ + struct apple_pcie *pcie; + + guard(mutex)(&pcie_list_lock); + + list_for_each_entry(pcie, &pcie_list, entry) { + if (pcie->dev == dev) + return pcie; + } + + return NULL; +} + static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) { struct pci_config_window *cfg = pdev->sysdata; - struct apple_pcie *pcie = cfg->priv; + struct apple_pcie *pcie; struct pci_dev *port_pdev; struct apple_pcie_port *port; + pcie = apple_pcie_lookup(cfg->parent); + if (WARN_ON(!pcie)) + return NULL; + /* Find the root port this device is on */ port_pdev = pcie_find_root_port(pdev); @@ -806,10 +842,14 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci static int apple_pcie_init(struct pci_config_window *cfg) { - struct apple_pcie *pcie = cfg->priv; struct device *dev = cfg->parent; + struct apple_pcie *pcie; int ret; + pcie = apple_pcie_lookup(dev); + if (WARN_ON(!pcie)) + return -ENOENT; + for_each_available_child_of_node_scoped(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { @@ -852,13 +892,18 @@ static int apple_pcie_probe(struct platform_device *pdev) mutex_init(&pcie->lock); INIT_LIST_HEAD(&pcie->ports); - dev_set_drvdata(dev, pcie); ret = apple_msi_init(pcie); if (ret) return ret; - return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); + apple_pcie_register(pcie); + + ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); + if (ret) + apple_pcie_unregister(pcie); + + return ret; } static const struct of_device_id apple_pcie_of_match[] = { From bdb32a0f6780660512d2335db2ebe32e6582cdc8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 25 Jun 2025 12:18:04 +0100 Subject: [PATCH 041/234] PCI: host-generic: Set driver_data before calling gen_pci_init() On MicroChip MPFS Icicle: microchip-pcie 2000000000.pcie: host bridge /soc/pcie@2000000000 ranges: microchip-pcie 2000000000.pcie: Parsing ranges property... microchip-pcie 2000000000.pcie: MEM 0x2008000000..0x2087ffffff -> 0x0008000000 Unable to handle kernel NULL pointer dereference at virtual address 0000000000000368 Current swapper/0 pgtable: 4K pagesize, 39-bit VAs, pgdp=0x00000000814f1000 [0000000000000368] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 Oops [#1] Modules linked in: CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.15.0-rc1-icicle-00003-gafc0a570bb61 #232 NONE Hardware name: Microchip PolarFire-SoC Icicle Kit (DT) [...] [] plda_pcie_setup_iomems+0xe/0x78 [] mc_platform_init+0x80/0x1d2 [] pci_ecam_create+0x104/0x1e2 [] pci_host_common_init+0x120/0x228 [] pci_host_common_probe+0x7c/0x8a The initialization of driver_data was moved after the call to gen_pci_init(), while the pci_ecam_ops.init() callback mc_platform_init() expects it has already been initialized. Fix this by moving the initialization of driver_data up. Fixes: afc0a570bb613871 ("PCI: host-generic: Extract an ECAM bridge creation helper from pci_host_common_probe()") Signed-off-by: Geert Uytterhoeven Signed-off-by: Marc Zyngier Signed-off-by: Bjorn Helgaas Link: https://lore.kernel.org/r/774290708a6f0f683711914fda110742c18a7fb2.1750787223.git.geert+renesas@glider.be Link: https://patch.msgid.link/20250625111806.4153773-2-maz@kernel.org --- drivers/pci/controller/pci-host-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index b0992325dd65..b37052863847 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -64,13 +64,13 @@ int pci_host_common_init(struct platform_device *pdev, of_pci_check_probe_only(); + platform_set_drvdata(pdev, bridge); + /* Parse and map our Configuration Space windows */ cfg = gen_pci_init(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); - platform_set_drvdata(pdev, bridge); - bridge->sysdata = cfg; bridge->ops = (struct pci_ops *)&ops->pci_ops; bridge->enable_device = ops->enable_device; From ba74278c638df7c333a970a265dfcc258e70807b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 25 Jun 2025 12:18:06 +0100 Subject: [PATCH 042/234] Revert "PCI: ecam: Allow cfg->priv to be pre-populated from the root port device" This reverts commit 4900454b4f819e88e9c57ed93542bf9325d7e161. Now that nobody relies of cfg->priv containing anything useful before the .init() callback is used, restore the previous behaviour. Signed-off-by: Marc Zyngier Signed-off-by: Bjorn Helgaas Reviewed-by: Geert Uytterhoeven Link: https://patch.msgid.link/20250625111806.4153773-4-maz@kernel.org --- drivers/pci/ecam.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 2c5e6446e00e..260b7de2dbd5 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c @@ -84,8 +84,6 @@ struct pci_config_window *pci_ecam_create(struct device *dev, goto err_exit_iomap; } - cfg->priv = dev_get_drvdata(dev); - if (ops->init) { err = ops->init(cfg); if (err) From f7690d058170dbc1520b0bfbc3d9daf046b59326 Mon Sep 17 00:00:00 2001 From: Simon Trimmer Date: Wed, 2 Jul 2025 11:22:35 +0000 Subject: [PATCH 043/234] ASoC: Intel: sof_sdw: Add quirks for Lenovo P1 and P16 These Lenovo Laptops have the DMICs connected to the host instead of the CS42L43 and so need the SOC_SDW_CODEC_MIC quirk. Signed-off-by: Simon Trimmer Link: https://patch.msgid.link/20250702112235.377479-1-simont@opensource.cirrus.com Signed-off-by: Mark Brown --- sound/soc/intel/boards/sof_sdw.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 81a914bd7ec2..504887505e68 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -783,6 +783,9 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = { SND_PCI_QUIRK(0x1043, 0x1e13, "ASUS Zenbook S14", SOC_SDW_CODEC_MIC), SND_PCI_QUIRK(0x1043, 0x1f43, "ASUS Zenbook S16", SOC_SDW_CODEC_MIC), + SND_PCI_QUIRK(0x17aa, 0x2347, "Lenovo P16", SOC_SDW_CODEC_MIC), + SND_PCI_QUIRK(0x17aa, 0x2348, "Lenovo P16", SOC_SDW_CODEC_MIC), + SND_PCI_QUIRK(0x17aa, 0x2349, "Lenovo P1", SOC_SDW_CODEC_MIC), {} }; From db98ee56851061082fecd7e6b4b6a93600562ec2 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 1 Jul 2025 16:34:11 +0300 Subject: [PATCH 044/234] ALSA: hda/realtek: Add quirk for ASUS ExpertBook B9403CVAR ASUS ExpertBook B9403CVAR needs the ALC294_FIXUP_ASUS_HPE for the headphones to work. Closes: https://github.com/thesofproject/linux/issues/5472 Signed-off-by: Peter Ujfalusi Tested-by: Anton Khirnov Link: https://patch.msgid.link/20250701133411.25275-1-peter.ujfalusi@linux.intel.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a33e8a6541d4..c7720a2f47c3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -11041,6 +11041,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1e93, "ASUS ExpertBook B9403CVAR", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), From 105485a182dc6ba32e55db46839af756c105afae Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Jul 2025 15:02:58 +0100 Subject: [PATCH 045/234] KVM: arm64: Fix handling of FEAT_GTG for unimplemented granule sizes Booting an EL2 guest on a system only supporting a subset of the possible page sizes leads to interesting situations. For example, on a system that only supports 4kB and 64kB, and is booted with a 4kB kernel, we end-up advertising 16kB support at stage-2, which is pretty weird. That's because we consider that any S2 bigger than our base granule is fair game, irrespective of what the HW actually supports. While this is not impossible to support (KVM would happily handle it), it is likely to be confusing for the guest. Add new checks that will verify that this granule size is actually supported before publishing it to the guest. Fixes: e7ef6ed4583ea ("KVM: arm64: Enforce NV limits on a per-idregs basis") Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier --- arch/arm64/kvm/nested.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 5b191f4dc566..dc1d26559bfa 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -1402,6 +1402,21 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu) } } +#define has_tgran_2(__r, __sz) \ + ({ \ + u64 _s1, _s2, _mmfr0 = __r; \ + \ + _s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \ + TGRAN##__sz##_2, _mmfr0); \ + \ + _s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \ + TGRAN##__sz, _mmfr0); \ + \ + ((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI && \ + _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \ + (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \ + _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI)); \ + }) /* * Our emulated CPU doesn't support all the possible features. For the * sake of simplicity (and probably mental sanity), wipe out a number @@ -1411,6 +1426,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu) */ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) { + u64 orig_val = val; + switch (reg) { case SYS_ID_AA64ISAR0_EL1: /* Support everything but TME */ @@ -1480,13 +1497,16 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val) */ switch (PAGE_SIZE) { case SZ_4K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); + if (has_tgran_2(orig_val, 4)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP); fallthrough; case SZ_16K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); + if (has_tgran_2(orig_val, 16)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP); fallthrough; case SZ_64K: - val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); + if (has_tgran_2(orig_val, 64)) + val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP); break; } From 42ce432522a17685f5a84529de49e555477c0a1f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 19 Jun 2025 14:48:17 +0100 Subject: [PATCH 046/234] KVM: arm64: Remove kvm_arch_vcpu_run_map_fp() Historically KVM hyp code saved the host's FPSIMD state into the hosts's fpsimd_state memory, and so it was necessary to map this into the hyp Stage-1 mappings before running a vCPU. This is no longer necessary as of commits: * fbc7e61195e2 ("KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state") * 8eca7f6d5100 ("KVM: arm64: Remove host FPSIMD saving for non-protected KVM") Since those commits, we eagerly save the host's FPSIMD state before calling into hyp to run a vCPU, and hyp code never reads nor writes the host's fpsimd_state memory. There's no longer any need to map the host's fpsimd_state memory into the hyp Stage-1, and kvm_arch_vcpu_run_map_fp() is unnecessary but benign. Remove kvm_arch_vcpu_run_map_fp(). Currently there is no code to perform a corresponding unmap, and we never mapped the host's SVE or SME state into the hyp Stage-1, so no other code needs to be removed. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Fuad Tabba Cc: Marc Zyngier Cc: Mark Brown Cc: Oliver Upton Cc: Will Deacon Cc: kvmarm@lists.linux.dev Reviewed-by: Mark Brown Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Link: https://lore.kernel.org/r/20250619134817.4075340-1-mark.rutland@arm.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/arm.c | 4 ---- arch/arm64/kvm/fpsimd.c | 26 -------------------------- 3 files changed, 31 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d27079968341..3e41a880b062 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1480,7 +1480,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range); /* Guest/host FPSIMD coordination helpers */ -int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b223d21c063c..23dd3f3fc3eb 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -825,10 +825,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (!kvm_arm_vcpu_is_finalized(vcpu)) return -EPERM; - ret = kvm_arch_vcpu_run_map_fp(vcpu); - if (ret) - return ret; - if (likely(vcpu_has_run_once(vcpu))) return 0; diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 8f6c8f57c6b9..15e17aca1dec 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -14,32 +14,6 @@ #include #include -/* - * Called on entry to KVM_RUN unless this vcpu previously ran at least - * once and the most recent prior KVM_RUN for this vcpu was called from - * the same task as current (highly likely). - * - * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), - * such that on entering hyp the relevant parts of current are already - * mapped. - */ -int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) -{ - struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; - int ret; - - /* pKVM has its own tracking of the host fpsimd state. */ - if (is_protected_kvm_enabled()) - return 0; - - /* Make sure the host task fpsimd state is visible to hyp: */ - ret = kvm_share_hyp(fpsimd, fpsimd + 1); - if (ret) - return ret; - - return 0; -} - /* * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. * The actual loading is done by the FPSIMD access trap taken to hyp. From 3b3312f28ee2d9c386602f8521e419cfc69f4823 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Thu, 3 Jul 2025 11:25:21 +0100 Subject: [PATCH 047/234] ASoC: cs35l56: probe() should fail if the device ID is not recognized Return an error from driver probe if the DEVID read from the chip is not one supported by this driver. In cs35l56_hw_init() there is a check for valid DEVID, but the invalid case was returning the value of ret. At this point in the code ret == 0 so the caller would think that cs35l56_hw_init() was successful. Signed-off-by: Richard Fitzgerald Fixes: 84851aa055c8 ("ASoC: cs35l56: Move part of cs35l56_init() to shared library") Link: https://patch.msgid.link/20250703102521.54204-1-rf@opensource.cirrus.com Signed-off-by: Mark Brown --- sound/soc/codecs/cs35l56-shared.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index d0831d609584..ba653f6ccfae 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -980,7 +980,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base) break; default: dev_err(cs35l56_base->dev, "Unknown device %x\n", devid); - return ret; + return -ENODEV; } cs35l56_base->type = devid & 0xFF; From ef9675b0ef030d135413e8638989f3a7d1f3217a Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Fri, 27 Jun 2025 12:31:33 -0400 Subject: [PATCH 048/234] Bluetooth: hci_sync: Fix not disabling advertising instance As the code comments on hci_setup_ext_adv_instance_sync suggests the advertising instance needs to be disabled in order to update its parameters, but it was wrongly checking that !adv->pending. Fixes: cba6b758711c ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 2") Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_sync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 77b3691f3423..0066627c05eb 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1345,7 +1345,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) * Command Disallowed error, so we must first disable the * instance if it is active. */ - if (adv && !adv->pending) { + if (adv) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; From 59710a26a289ad4e7ef227d22063e964930928b0 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Mon, 30 Jun 2025 15:37:46 -0400 Subject: [PATCH 049/234] Bluetooth: hci_core: Remove check of BDADDR_ANY in hci_conn_hash_lookup_big_state The check for destination to be BDADDR_ANY is no longer necessary with the introduction of BIS_LINK. Fixes: 23205562ffc8 ("Bluetooth: separate CIS_LINK and BIS_LINK link types") Signed-off-by: Luiz Augusto von Dentz --- include/net/bluetooth/hci_core.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 9fc8f544e20e..0da011fc8146 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1350,8 +1350,7 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != BIS_LINK || bacmp(&c->dst, BDADDR_ANY) || - c->state != state) + if (c->type != BIS_LINK || c->state != state) continue; if (handle == c->iso_qos.bcast.big) { From 314d30b1508682e27c8a324096262c66f23455d9 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Thu, 3 Jul 2025 09:45:08 -0400 Subject: [PATCH 050/234] Bluetooth: hci_sync: Fix attempting to send HCI_Disconnect to BIS handle BIS/PA connections do have their own cleanup proceedure which are performed by hci_conn_cleanup/bis_cleanup. Fixes: 23205562ffc8 ("Bluetooth: separate CIS_LINK and BIS_LINK link types") Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_sync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 0066627c05eb..5f178db8d40d 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -5493,7 +5493,7 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, { struct hci_cp_disconnect cp; - if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { + if (conn->type == BIS_LINK) { /* This is a BIS connection, hci_conn_del will * do the necessary cleanup. */ From c7349772c268ec3c91d83cbfbbcf63f1bd7c256c Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Fri, 27 Jun 2025 11:19:02 -0400 Subject: [PATCH 051/234] Bluetooth: hci_event: Fix not marking Broadcast Sink BIS as connected Upon receiving HCI_EVT_LE_BIG_SYNC_ESTABLISHED with status 0x00 (success) the corresponding BIS hci_conn state shall be set to BT_CONNECTED otherwise they will be left with BT_OPEN which is invalid at that point, also create the debugfs and sysfs entries following the same logic as the likes of Broadcast Source BIS and CIS connections. Fixes: f777d8827817 ("Bluetooth: ISO: Notify user space about failed bis connections") Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4d5ace9d245d..992131f88a45 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6966,7 +6966,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); if (!ev->status) { + bis->state = BT_CONNECTED; set_bit(HCI_CONN_BIG_SYNC, &bis->flags); + hci_debugfs_create_conn(bis); + hci_conn_add_sysfs(bis); hci_iso_setup_path(bis); } } From 68279380266a5fa70e664de754503338e2ec3f43 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 3 Jul 2025 10:23:16 -0700 Subject: [PATCH 052/234] crypto: s390/sha - Fix uninitialized variable in SHA-1 and SHA-2 Commit 88c02b3f79a6 ("s390/sha3: Support sha3 performance enhancements") added the field s390_sha_ctx::first_message_part and made it be used by s390_sha_update() (now s390_sha_update_blocks()). At the time, s390_sha_update() was used by all the s390 SHA-1, SHA-2, and SHA-3 algorithms. However, only the initialization functions for SHA-3 were updated, leaving SHA-1 and SHA-2 using first_message_part uninitialized. This could cause e.g. the function code CPACF_KIMD_SHA_512 | CPACF_KIMD_NIP to be used instead of just CPACF_KIMD_SHA_512. This apparently was harmless, as the SHA-1 and SHA-2 function codes ignore CPACF_KIMD_NIP; it is recognized only by the SHA-3 function codes (https://lore.kernel.org/r/73477fe9-a1dc-4e38-98a6-eba9921e8afa@linux.ibm.com/). Therefore, this bug was found only when first_message_part was later converted to a boolean and UBSAN detected its uninitialized use. Regardless, let's fix this by just initializing to zero. Note: in 6.16, we need to patch SHA-1, SHA-384, and SHA-512. In 6.15 and earlier, we'll also need to patch SHA-224 and SHA-256, as they hadn't yet been librarified (which incidentally fixed this bug). Fixes: 88c02b3f79a6 ("s390/sha3: Support sha3 performance enhancements") Cc: stable@vger.kernel.org Reported-by: Ingo Franzki Closes: https://lore.kernel.org/r/12740696-595c-4604-873e-aefe8b405fbf@linux.ibm.com Acked-by: Heiko Carstens Link: https://lore.kernel.org/r/20250703172316.7914-1-ebiggers@kernel.org Signed-off-by: Eric Biggers --- arch/s390/crypto/sha1_s390.c | 2 ++ arch/s390/crypto/sha512_s390.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index d229cbd2ba22..9b0d55be1239 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -38,6 +38,7 @@ static int s390_sha1_init(struct shash_desc *desc) sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = CPACF_KIMD_SHA_1; + sctx->first_message_part = 0; return 0; } @@ -60,6 +61,7 @@ static int s390_sha1_import(struct shash_desc *desc, const void *in) sctx->count = ictx->count; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); sctx->func = CPACF_KIMD_SHA_1; + sctx->first_message_part = 0; return 0; } diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c index 33711a29618c..6cbbf5e8555f 100644 --- a/arch/s390/crypto/sha512_s390.c +++ b/arch/s390/crypto/sha512_s390.c @@ -32,6 +32,7 @@ static int sha512_init(struct shash_desc *desc) ctx->count = 0; ctx->sha512.count_hi = 0; ctx->func = CPACF_KIMD_SHA_512; + ctx->first_message_part = 0; return 0; } @@ -57,6 +58,7 @@ static int sha512_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->state, sizeof(ictx->state)); sctx->func = CPACF_KIMD_SHA_512; + sctx->first_message_part = 0; return 0; } @@ -97,6 +99,7 @@ static int sha384_init(struct shash_desc *desc) ctx->count = 0; ctx->sha512.count_hi = 0; ctx->func = CPACF_KIMD_SHA_512; + ctx->first_message_part = 0; return 0; } From 78f88067d5c56d9aed69f27e238742841461cf67 Mon Sep 17 00:00:00 2001 From: Aaron Thompson Date: Thu, 3 Jul 2025 21:19:49 +0000 Subject: [PATCH 053/234] drm/nouveau: Do not fail module init on debugfs errors If CONFIG_DEBUG_FS is enabled, nouveau_drm_init() returns an error if it fails to create the "nouveau" directory in debugfs. One case where that will happen is when debugfs access is restricted by CONFIG_DEBUG_FS_ALLOW_NONE or by the boot parameter debugfs=off, which cause the debugfs APIs to return -EPERM. So just ignore errors from debugfs. Note that nouveau_debugfs_root may be an error now, but that is a standard pattern for debugfs. From include/linux/debugfs.h: "NOTE: it's expected that most callers should _ignore_ the errors returned by this function. Other debugfs functions handle the fact that the "dentry" passed to them could be an error and they don't crash in that case. Drivers should generally work fine even if debugfs fails to init anyway." Fixes: 97118a1816d2 ("drm/nouveau: create module debugfs root") Cc: stable@vger.kernel.org Signed-off-by: Aaron Thompson Acked-by: Timur Tabi Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20250703211949.9916-1-dev@aaront.org --- drivers/gpu/drm/nouveau/nouveau_debugfs.c | 6 +----- drivers/gpu/drm/nouveau/nouveau_debugfs.h | 5 ++--- drivers/gpu/drm/nouveau/nouveau_drm.c | 4 +--- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 200e65a7cefc..c7869a639bef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -314,14 +314,10 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) drm->debugfs = NULL; } -int +void nouveau_module_debugfs_init(void) { nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL); - if (IS_ERR(nouveau_debugfs_root)) - return PTR_ERR(nouveau_debugfs_root); - - return 0; } void diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h index b7617b344ee2..d05ed0e641c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h @@ -24,7 +24,7 @@ extern void nouveau_debugfs_fini(struct nouveau_drm *); extern struct dentry *nouveau_debugfs_root; -int nouveau_module_debugfs_init(void); +void nouveau_module_debugfs_init(void); void nouveau_module_debugfs_fini(void); #else static inline void @@ -42,10 +42,9 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) { } -static inline int +static inline void nouveau_module_debugfs_init(void) { - return 0; } static inline void diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0c82a63cd49d..1527b801f013 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1461,9 +1461,7 @@ nouveau_drm_init(void) if (!nouveau_modeset) return 0; - ret = nouveau_module_debugfs_init(); - if (ret) - return ret; + nouveau_module_debugfs_init(); #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER platform_driver_register(&nouveau_platform_driver); From e79d0ba605d54dd47f3d8a487d00f264b896966c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 3 Jul 2025 09:27:07 +1000 Subject: [PATCH 054/234] nouveau/gsp: add a 50ms delay between fbsr and driver unload rpcs This fixes a bunch of command hangs after runtime suspend/resume. This fixes a regression caused by code movement in the commit below, the commit seems to just change timings enough to cause this to happen now, and adding the sleep seems to avoid it. I've spent some time trying to root cause it to no great avail, it seems like a bug on the firmware side, but it could be a bug in our rpc handling that I can't find. Either way, we should land the workaround to fix the problem, while we continue to work out the root cause. Signed-off-by: Dave Airlie Cc: Ben Skeggs Cc: Danilo Krummrich Fixes: c21b039715ce ("drm/nouveau/gsp: add hals for fbsr.suspend/resume()") Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20250702232707.175679-1-airlied@gmail.com --- drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index baf42339f93e..23f80e167705 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -1744,6 +1744,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); return ret; } + + /* + * TODO: Debug the GSP firmware / RPC handling to find out why + * without this Turing (but none of the other architectures) + * ends up resetting all channels after resume. + */ + msleep(50); } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); From 8ff4fb276e2384a87ae7f65f3c28e1e139dbb3fe Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 27 Jun 2025 10:01:46 -0500 Subject: [PATCH 055/234] pinctrl: amd: Clear GPIO debounce for suspend soc-button-array hardcodes a debounce value by means of gpio_keys which uses pinctrl-amd as a backend to program debounce for a GPIO. This hardcoded value doesn't match what the firmware intended to be programmed in _AEI. The hardcoded debounce leads to problems waking from suspend. There isn't appetite to conditionalize the behavior in soc-button-array or gpio-keys so clear it when the system suspends to avoid problems with being able to resume. Cc: Dmitry Torokhov Cc: Hans de Goede Fixes: 5c4fa2a6da7fb ("Input: soc_button_array - debounce the buttons") Link: https://lore.kernel.org/linux-input/mkgtrb5gt7miyg6kvqdlbu4nj3elym6ijudobpdi26gp4xxay5@rsa6ytrjvj2q/ Link: https://lore.kernel.org/linux-input/20250625215813.3477840-1-superm1@kernel.org/ Signed-off-by: Mario Limonciello Reviewed-by: Hans de Goede Link: https://lore.kernel.org/20250627150155.3311574-1-superm1@kernel.org Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-amd.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 5cf3db6d78b7..b3f0d02aeeb3 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -979,6 +979,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend pin, is_suspend ? "suspend" : "hibernate"); } + /* + * debounce enabled over suspend has shown issues with a GPIO + * being unable to wake the system, as we're only interested in + * the actual wakeup event, clear it. + */ + if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) { + amd_gpio_set_debounce(gpio_dev, pin, 0); + pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n", + pin, is_suspend ? "suspend" : "hibernate"); + } + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } From 2e9fdbe5ec7a65b66da9c202cac621a3a366fde3 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Sun, 29 Jun 2025 17:37:42 +0200 Subject: [PATCH 056/234] rust: drm: device: drop_in_place() the drm::Device in release() In drm::Device::new() we allocate with __drm_dev_alloc() and return an ARef. When the reference count of the drm::Device falls to zero, the C code automatically calls drm_dev_release(), which eventually frees the memory allocated in drm::Device::new(). However, due to that, drm::Device::drop() is never called. As a result the destructor of the user's private data, i.e. drm::Device::data is never called. Hence, fix this by calling drop_in_place() from the DRM device's release callback. Fixes: 1e4b8896c0f3 ("rust: drm: add device abstraction") Reviewed-by: Alice Ryhl Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20250629153747.72536-1-dakr@kernel.org --- rust/kernel/drm/device.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs index 624d7a4c83ea..14c1aa402951 100644 --- a/rust/kernel/drm/device.rs +++ b/rust/kernel/drm/device.rs @@ -66,7 +66,7 @@ impl Device { open: Some(drm::File::::open_callback), postclose: Some(drm::File::::postclose_callback), unload: None, - release: None, + release: Some(Self::release), master_set: None, master_drop: None, debugfs_init: None, @@ -162,6 +162,16 @@ pub unsafe fn as_ref<'a>(ptr: *const bindings::drm_device) -> &'a Self { // SAFETY: `ptr` is valid by the safety requirements of this function. unsafe { &*ptr.cast() } } + + extern "C" fn release(ptr: *mut bindings::drm_device) { + // SAFETY: `ptr` is a valid pointer to a `struct drm_device` and embedded in `Self`. + let this = unsafe { Self::from_drm_device(ptr) }; + + // SAFETY: + // - When `release` runs it is guaranteed that there is no further access to `this`. + // - `this` is valid for dropping. + unsafe { core::ptr::drop_in_place(this) }; + } } impl Deref for Device { From 043faef334a1f3d96ae88e1b7618bfa2b4946388 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Thu, 3 Jul 2025 22:06:13 +0200 Subject: [PATCH 057/234] ALSA: ad1816a: Fix potential NULL pointer deref in snd_card_ad1816a_pnp() Use pr_warn() instead of dev_warn() when 'pdev' is NULL to avoid a potential NULL pointer dereference. Cc: stable@vger.kernel.org Fixes: 20869176d7a7 ("ALSA: ad1816a: Use standard print API") Signed-off-by: Thorsten Blum Link: https://patch.msgid.link/20250703200616.304309-2-thorsten.blum@linux.dev Signed-off-by: Takashi Iwai --- sound/isa/ad1816a/ad1816a.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c index 99006dc4777e..5c9e2d41d900 100644 --- a/sound/isa/ad1816a/ad1816a.c +++ b/sound/isa/ad1816a/ad1816a.c @@ -98,7 +98,7 @@ static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card, pdev = pnp_request_card_device(card, id->devs[1].id, NULL); if (pdev == NULL) { mpu_port[dev] = -1; - dev_warn(&pdev->dev, "MPU401 device busy, skipping.\n"); + pr_warn("MPU401 device busy, skipping.\n"); return 0; } From 30e0fd3c0273dc106320081793793a424f1f1950 Mon Sep 17 00:00:00 2001 From: Hugo Villeneuve Date: Thu, 3 Jul 2025 15:18:29 -0400 Subject: [PATCH 058/234] gpiolib: fix performance regression when using gpio_chip_get_multiple() commit 74abd086d2ee ("gpiolib: sanitize the return value of gpio_chip::get_multiple()") altered the value returned by gc->get_multiple() in case it is positive (> 0), but failed to return for other cases (<= 0). This may result in the "if (gc->get)" block being executed and thus negates the performance gain that is normally obtained by using gc->get_multiple(). Fix by returning the result of gc->get_multiple() if it is <= 0. Also move the "ret" variable to the scope where it is used, which as an added bonus fixes an indentation error introduced by the aforementioned commit. Fixes: 74abd086d2ee ("gpiolib: sanitize the return value of gpio_chip::get_multiple()") Cc: stable@vger.kernel.org Signed-off-by: Hugo Villeneuve Link: https://lore.kernel.org/r/20250703191829.2952986-1-hugo@hugovil.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpiolib.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index fdafa0df1b43..3a3eca5b4c40 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3297,14 +3297,15 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - int ret; - lockdep_assert_held(&gc->gpiodev->srcu); if (gc->get_multiple) { + int ret; + ret = gc->get_multiple(gc, mask, bits); if (ret > 0) return -EBADE; + return ret; } if (gc->get) { From 5285b5ed04ab6ad40f7b654eefbccd6ae8cbf415 Mon Sep 17 00:00:00 2001 From: Milan Krstic Date: Thu, 3 Jul 2025 14:30:39 +0000 Subject: [PATCH 059/234] pinctrl: aw9523: fix can_sleep flag for GPIO chip The GPIO expander is connected via I2C, thus the can_sleep flag has to be set to true. This fixes spurious "scheduling while atomic" bugs in the kernel ringbuffer. Signed-off-by: David Bauer Signed-off-by: Milan Krstic Link: https://lore.kernel.org/20250703143039.5809-1-milan.krstic@gmail.com Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-aw9523.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c index 9bf53de20be8..04afb344e9e5 100644 --- a/drivers/pinctrl/pinctrl-aw9523.c +++ b/drivers/pinctrl/pinctrl-aw9523.c @@ -784,7 +784,7 @@ static int aw9523_init_gpiochip(struct aw9523 *awi, unsigned int npins) gc->set_config = gpiochip_generic_config; gc->parent = dev; gc->owner = THIS_MODULE; - gc->can_sleep = false; + gc->can_sleep = true; return 0; } From 44306a684cd1699b8562a54945ddc43e2abc9eab Mon Sep 17 00:00:00 2001 From: Mikko Perttunen Date: Wed, 2 Jul 2025 11:08:07 +0900 Subject: [PATCH 060/234] drm/tegra: nvdec: Fix dma_alloc_coherent error check Check for NULL return value with dma_alloc_coherent, in line with Robin's fix for vic.c in 'drm/tegra: vic: Fix DMA API misuse'. Fixes: 46f226c93d35 ("drm/tegra: Add NVDEC driver") Signed-off-by: Mikko Perttunen Signed-off-by: Thierry Reding Link: https://lore.kernel.org/r/20250702-nvdec-dma-error-check-v1-1-c388b402c53a@nvidia.com --- drivers/gpu/drm/tegra/nvdec.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 2d9a0a3f6c38..7a38664e890e 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) if (!client->group) { virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(nvdec->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); if (IS_ERR(virt)) From b9fd9888a5654e59f6c6249337e36c53c1faa329 Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Wed, 2 Jul 2025 14:48:22 +0800 Subject: [PATCH 061/234] bnxt_en: eliminate the compile warning in bnxt_request_irq due to CONFIG_RFS_ACCEL I received a kernel-test-bot report[1] that shows the [-Wunused-but-set-variable] warning. Since the previous commit I made, as the 'Fixes' tag shows, gives users an option to turn on and off the CONFIG_RFS_ACCEL, the issue then can be discovered and reproduced with GCC specifically. Like Simon and Jakub suggested, use fewer #ifdefs which leads to fewer bugs. [1] All warnings (new ones prefixed by >>): drivers/net/ethernet/broadcom/bnxt/bnxt.c: In function 'bnxt_request_irq': >> drivers/net/ethernet/broadcom/bnxt/bnxt.c:10703:9: warning: variable 'j' set but not used [-Wunused-but-set-variable] 10703 | int i, j, rc = 0; | ^ Fixes: 9b6a30febddf ("net: allow rps/rfs related configs to be switched") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202506282102.x1tXt0qz-lkp@intel.com/ Signed-off-by: Jason Xing Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ae89a981e052..243cb13cb01c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11607,11 +11607,9 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { + struct cpu_rmap *rmap = NULL; int i, j, rc = 0; unsigned long flags = 0; -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif rc = bnxt_setup_int_mode(bp); if (rc) { @@ -11632,15 +11630,15 @@ static int bnxt_request_irq(struct bnxt *bp) int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; -#ifdef CONFIG_RFS_ACCEL - if (rmap && bp->bnapi[i]->rx_ring) { + if (IS_ENABLED(CONFIG_RFS_ACCEL) && + rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", j); j++; } -#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, bp->bnapi[i]); if (rc) From ef8923e6c051a98164c2889db943df9695a39888 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 4 Jul 2025 05:47:07 -0700 Subject: [PATCH 062/234] arm64: efi: Fix KASAN false positive for EFI runtime stack KASAN reports invalid accesses during arch_stack_walk() for EFI runtime services due to vmalloc tagging[1]. The EFI runtime stack must be allocated with KASAN tags reset to avoid false positives. This patch uses arch_alloc_vmap_stack() instead of __vmalloc_node() for EFI stack allocation, which internally calls kasan_reset_tag() The changes ensure EFI runtime stacks are properly sanitized for KASAN while maintaining functional consistency. Link: https://lore.kernel.org/all/aFVVEgD0236LdrL6@gmail.com/ [1] Suggested-by: Andrey Konovalov Suggested-by: Catalin Marinas Reviewed-by: Catalin Marinas Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20250704-arm_kasan-v2-1-32ebb4fd7607@debian.org Signed-off-by: Will Deacon --- arch/arm64/kernel/efi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 3857fd7ee8d4..62230d6dd919 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -15,6 +15,7 @@ #include #include +#include static bool region_is_misaligned(const efi_memory_desc_t *md) { @@ -214,9 +215,13 @@ static int __init arm64_efi_rt_init(void) if (!efi_enabled(EFI_RUNTIME_SERVICES)) return 0; - p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, - NUMA_NO_NODE, &&l); -l: if (!p) { + if (!IS_ENABLED(CONFIG_VMAP_STACK)) { + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return -ENOMEM; + } + + p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE); + if (!p) { pr_warn("Failed to allocate EFI runtime stack\n"); clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); return -ENOMEM; From 727c2a53cf959f599493c50a80fe2a356b8b1df6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 13 Jun 2025 15:19:36 +0100 Subject: [PATCH 063/234] arm64: Unconditionally select CONFIG_JUMP_LABEL Aneesh reports that his kernel fails to boot in nVHE mode with KVM's protected mode enabled. Further investigation by Mostafa reveals that this fails because CONFIG_JUMP_LABEL=n and that we have static keys shared between EL1 and EL2. While this can be worked around, it is obvious that we have long relied on having CONFIG_JUMP_LABEL enabled at all times, as all supported compilers now have 'asm goto' (which is the basic block for jump labels). Let's simplify our lives once and for all by mandating jump labels. It's not like anyone else is testing anything without them, and we already rely on them for other things (kfence, xfs, preempt). Link: https://lore.kernel.org/r/yq5ah60pkq03.fsf@kernel.org Reported-by: Aneesh Kumar K.V Reported-by: Mostafa Saleh Signed-off-by: Marc Zyngier Cc: Will Deacon Cc: Catalin marinas Cc: Mark Rutland Cc: Ard Biesheuvel Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20250613141936.2219895-1-maz@kernel.org Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/Makefile | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 55fc331af337..393d71124f5d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -256,6 +256,7 @@ config ARM64 select HOTPLUG_SMT if HOTPLUG_CPU select IRQ_DOMAIN select IRQ_FORCED_THREADING + select JUMP_LABEL select KASAN_VMALLOC if KASAN select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 2920b0a51403..a2faf0049dab 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ syscall.o proton-pack.o idle.o patching.o pi/ \ - rsi.o + rsi.o jump_label.o obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ sys_compat.o @@ -47,7 +47,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o -obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o obj-$(CONFIG_PCI) += pci.o From 0d1c86b840966a278d9b25a9d7c18881980f306e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 19 Jun 2025 22:15:41 +0100 Subject: [PATCH 064/234] arm64/gcs: Don't try to access GCS registers if arm64.nogcs is enabled During EL2 setup if GCS is advertised in the ID registers we will reset the GCS control registers GCSCR_EL1 and GCSCRE0_EL1 to known values in order to ensure it is disabled. This is done without taking into account overrides supplied on the command line, meaning that if the user has configured arm64.nogcs we will still access these GCS specific registers. If this was done because EL3 does not enable GCS this results in traps to EL3 and a failed boot which is not what users would expect from having set that parameter. Move the writes to these registers to finalise_el2_state where we can pay attention to the command line overrides. For simplicity we leave the updates to the traps in HCRX_EL2 and the FGT registers in place since these should only be relevant for KVM guests and KVM will manage them itself for guests. This follows the existing practice for other similar traps for overridable features such as those for TPIDR2_EL0 and SMPRI_EL1. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20250619-arm64-fix-nogcs-v1-1-febf2973672e@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/el2_setup.h | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index ba5df0df02a4..9f38340d24c2 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -287,17 +287,6 @@ .Lskip_fgt2_\@: .endm -.macro __init_el2_gcs - mrs_s x1, SYS_ID_AA64PFR1_EL1 - ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4 - cbz x1, .Lskip_gcs_\@ - - /* Ensure GCS is not enabled when we start trying to do BLs */ - msr_s SYS_GCSCR_EL1, xzr - msr_s SYS_GCSCRE0_EL1, xzr -.Lskip_gcs_\@: -.endm - /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -319,7 +308,6 @@ __init_el2_cptr __init_el2_fgt __init_el2_fgt2 - __init_el2_gcs .endm #ifndef __KVM_NVHE_HYPERVISOR__ @@ -371,6 +359,13 @@ msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 .Lskip_mpam_\@: + check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2 + +.Linit_gcs_\@: + msr_s SYS_GCSCR_EL1, xzr + msr_s SYS_GCSCRE0_EL1, xzr + +.Lskip_gcs_\@: check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 .Linit_sve_\@: /* SVE register access */ From d38376b3ee48d073c64e75e150510d7e6b4b04f7 Mon Sep 17 00:00:00 2001 From: Alessio Belle Date: Tue, 24 Jun 2025 15:22:08 +0100 Subject: [PATCH 065/234] drm/imagination: Fix kernel crash when hard resetting the GPU The GPU hard reset sequence calls pm_runtime_force_suspend() and pm_runtime_force_resume(), which according to their documentation should only be used during system-wide PM transitions to sleep states. The main issue though is that depending on some internal runtime PM state as seen by pm_runtime_force_suspend() (whether the usage count is <= 1), pm_runtime_force_resume() might not resume the device unless needed. If that happens, the runtime PM resume callback pvr_power_device_resume() is not called, the GPU clocks are not re-enabled, and the kernel crashes on the next attempt to access GPU registers as part of the power-on sequence. Replace calls to pm_runtime_force_suspend() and pm_runtime_force_resume() with direct calls to the driver's runtime PM callbacks, pvr_power_device_suspend() and pvr_power_device_resume(), to ensure clocks are re-enabled and avoid the kernel crash. Fixes: cc1aeedb98ad ("drm/imagination: Implement firmware infrastructure and META FW support") Signed-off-by: Alessio Belle Reviewed-by: Matt Coster Link: https://lore.kernel.org/r/20250624-fix-kernel-crash-gpu-hard-reset-v1-1-6d24810d72a6@imgtec.com Cc: stable@vger.kernel.org Signed-off-by: Matt Coster --- drivers/gpu/drm/imagination/pvr_power.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index 41f5d89e78b8..3e349d039fc0 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -386,13 +386,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; From 6c66bb655ca3fd5e9304163cf70796d08de512ed Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 20 Jun 2025 13:10:41 +0200 Subject: [PATCH 066/234] arm64: move smp_send_stop() cpu mask off stack For really large values of CONFIG_NR_CPUS, a CPU mask value should not be put on the stack: arch/arm64/kernel/smp.c:1188:1: error: the frame size of 8544 bytes is larger than 1536 bytes [-Werror=frame-larger-than=] This could be achieved using alloc_cpumask_var(), which makes it depend on CONFIG_CPUMASK_OFFSTACK, but as this function is already serialized and can only run on one CPU, making the variable 'static' is easier. Signed-off-by: Arnd Bergmann Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20250620111045.3364827-1-arnd@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3b3f6b56e733..21a795303568 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1143,7 +1143,7 @@ static inline unsigned int num_other_online_cpus(void) void smp_send_stop(void) { static unsigned long stop_in_progress; - cpumask_t mask; + static cpumask_t mask; unsigned long timeout; /* From a75ad2fc76a2ab70817c7eed3163b66ea84ca6ac Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 20 Jun 2025 12:28:48 +0100 Subject: [PATCH 067/234] arm64: Filter out SME hwcaps when FEAT_SME isn't implemented We have a number of hwcaps for various SME subfeatures enumerated via ID_AA64SMFR0_EL1. Currently we advertise these without cross checking against the main SME feature, advertised in ID_AA64PFR1_EL1.SME which means that if the two are out of sync userspace can see a confusing situation where SME subfeatures are advertised without the base SME hwcap. This can be readily triggered by using the arm64.nosme override which only masks out ID_AA64PFR1_EL1.SME, and there have also been reports of VMMs which do the same thing. Fix this as we did previously for SVE in 064737920bdb ("arm64: Filter out SVE hwcaps when FEAT_SVE isn't implemented") by filtering out the SME subfeature hwcaps when FEAT_SME is not present. Fixes: 5e64b862c482 ("arm64/sme: Basic enumeration support") Reported-by: Yury Khrustalev Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20250620-arm64-sme-filter-hwcaps-v1-1-02b9d3c2d8ef@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 57 +++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b34044e20128..e151585c6cca 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -3135,6 +3135,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope) } #endif +#ifdef CONFIG_ARM64_SME +static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope) +{ + return system_supports_sme() && has_user_cpuid_feature(cap, scope); +} +#endif + static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL), HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES), @@ -3223,31 +3230,31 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC), #ifdef CONFIG_ARM64_SME HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME), - HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), - HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), - HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), - HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), - HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), - HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), - HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), - HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), - HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), - HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), - HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), - HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP), + HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4), #endif /* CONFIG_ARM64_SME */ HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT), HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA), From 22f3a4f6085951eff28bd1e44d3f388c1d9a5f44 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Thu, 19 Jun 2025 17:00:41 +0100 Subject: [PATCH 068/234] arm64: poe: Handle spurious Overlay faults We do not currently issue an ISB after updating POR_EL0 when context-switching it, for instance. The rationale is that if the old value of POR_EL0 is more restrictive and causes a fault during uaccess, the access will be retried [1]. In other words, we are trading an ISB on every context-switching for the (unlikely) possibility of a spurious fault. We may also miss faults if the new value of POR_EL0 is more restrictive, but that's considered acceptable. However, as things stand, a spurious Overlay fault results in uaccess failing right away since it causes fault_from_pkey() to return true. If an Overlay fault is reported, we therefore need to double check POR_EL0 against vma_pkey(vma) - this is what arch_vma_access_permitted() already does. As it turns out, we already perform that explicit check if no Overlay fault is reported, and we need to keep that check (see comment added in fault_from_pkey()). Net result: the Overlay ISS2 bit isn't of much help to decide whether a pkey fault occurred. Remove the check for the Overlay bit from fault_from_pkey() and add a comment to try and explain the situation. While at it, also add a comment to permission_overlay_switch() in case anyone gets surprised by the lack of ISB. [1] https://lore.kernel.org/linux-arm-kernel/ZtYNGBrcE-j35fpw@arm.com/ Fixes: 160a8e13de6c ("arm64: context switch POR_EL0 register") Signed-off-by: Kevin Brodsky Link: https://lore.kernel.org/r/20250619160042.2499290-2-kevin.brodsky@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 5 +++++ arch/arm64/mm/fault.c | 30 +++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 5954cec19660..08b7042a2e2d 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -673,6 +673,11 @@ static void permission_overlay_switch(struct task_struct *next) current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); if (current->thread.por_el0 != next->thread.por_el0) { write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); + /* + * No ISB required as we can tolerate spurious Overlay faults - + * the fault handler will check again based on the new value + * of POR_EL0. + */ } } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index ec0a337891dd..11eb8d1adc84 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma, - unsigned int mm_flags) +static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags) { - unsigned long iss2 = ESR_ELx_ISS2(esr); - if (!system_supports_poe()) return false; - if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay)) - return true; - + /* + * We do not check whether an Overlay fault has occurred because we + * cannot make a decision based solely on its value: + * + * - If Overlay is set, a fault did occur due to POE, but it may be + * spurious in those cases where we update POR_EL0 without ISB (e.g. + * on context-switch). We would then need to manually check POR_EL0 + * against vma_pkey(vma), which is exactly what + * arch_vma_access_permitted() does. + * + * - If Overlay is not set, we may still need to report a pkey fault. + * This is the case if an access was made within a mapping but with no + * page mapped, and POR_EL0 forbids the access (according to + * vma_pkey()). Such access will result in a SIGSEGV regardless + * because core code checks arch_vma_access_permitted(), but in order + * to report the correct error code - SEGV_PKUERR - we must handle + * that case here. + */ return !arch_vma_access_permitted(vma, mm_flags & FAULT_FLAG_WRITE, mm_flags & FAULT_FLAG_INSTRUCTION, @@ -635,7 +647,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto bad_area; } - if (fault_from_pkey(esr, vma, mm_flags)) { + if (fault_from_pkey(vma, mm_flags)) { pkey = vma_pkey(vma); vma_end_read(vma); fault = 0; @@ -679,7 +691,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto bad_area; } - if (fault_from_pkey(esr, vma, mm_flags)) { + if (fault_from_pkey(vma, mm_flags)) { pkey = vma_pkey(vma); mmap_read_unlock(mm); fault = 0; From 9dd1757493416310a5e71146a08bc228869f8dae Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 4 Jul 2025 12:08:12 +0530 Subject: [PATCH 069/234] arm64/mm: Drop wrong writes into TCR2_EL1 Register X0 contains PIE_E1_ASM and should not be written into REG_TCR2_EL1 which could have an adverse impact otherwise. This has remained undetected till now probably because current value for PIE_E1_ASM (0xcc880e0ac0800000) clears TCR2_EL1 which again gets set subsequently with 'tcr2' after testing for FEAT_TCR2. Drop this unwarranted 'msr' which is a stray change from an earlier commit. This line got re-introduced when rebasing on top of the commit 926b66e2ebc8 ("arm64: setup: name 'tcr2' register"). Cc: Catalin Marinas Cc: Will Deacon Cc: Ryan Roberts Cc: Marc Zyngier Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Fixes: 7052e808c446 ("arm64/sysreg: Get rid of the TCR2_EL1x SysregFields") Acked-by: Marc Zyngier Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20250704063812.298914-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/proc.S | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 80d470aa469d..54dccfd6aa11 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -518,7 +518,6 @@ alternative_else_nop_endif msr REG_PIR_EL1, x0 orr tcr2, tcr2, TCR2_EL1_PIE - msr REG_TCR2_EL1, x0 .Lskip_indirection: From 63d6e9311999a3dd125ad3e0560a769e047fd7b1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 2 Jul 2025 13:28:55 -0400 Subject: [PATCH 070/234] bcachefs: bch2_fpunch_snapshot() Add a new version of fpunch for operating on a snapshot ID, not a subvolume - and use it for "extent past end of inode" repair. Previously, repair would try to delete everything at once, but deleting too many extents at once can overflow the btree_trans bump allocator, as well as causing other problems - the new helper properly uses bch2_extent_trim_atomic(). Reported-and-tested-by: Edoardo Codeglia Signed-off-by: Kent Overstreet --- fs/bcachefs/fsck.c | 33 ++++++--------------------------- fs/bcachefs/io_misc.c | 27 +++++++++++++++++++++++++++ fs/bcachefs/io_misc.h | 2 ++ 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index dbf161e4311a..856eb2b41896 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -12,6 +12,7 @@ #include "fs.h" #include "fsck.h" #include "inode.h" +#include "io_misc.h" #include "keylist.h" #include "namei.h" #include "recovery_passes.h" @@ -1919,33 +1920,11 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, "extent type past end of inode %llu:%u, i_size %llu\n%s", i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - struct bkey_i *whiteout = bch2_trans_kmalloc(trans, sizeof(*whiteout)); - ret = PTR_ERR_OR_ZERO(whiteout); - if (ret) - goto err; - - bkey_init(&whiteout->k); - whiteout->k.p = SPOS(k.k->p.inode, - last_block, - i->inode.bi_snapshot); - bch2_key_resize(&whiteout->k, - min(KEY_SIZE_MAX & (~0 << c->block_bits), - U64_MAX - whiteout->k.p.offset)); - - - /* - * Need a normal (not BTREE_ITER_all_snapshots) - * iterator, if we're deleting in a different - * snapshot and need to emit a whiteout - */ - struct btree_iter iter2; - bch2_trans_iter_init(trans, &iter2, BTREE_ID_extents, - bkey_start_pos(&whiteout->k), - BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(trans, &iter2) ?: - bch2_trans_update(trans, &iter2, whiteout, - BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &iter2); + ret = bch2_fpunch_snapshot(trans, + SPOS(i->inode.bi_inum, + last_block, + i->inode.bi_snapshot), + POS(i->inode.bi_inum, U64_MAX)); if (ret) goto err; diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index bf72b1d2e2cb..07023667a475 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -135,6 +135,33 @@ int bch2_extent_fallocate(struct btree_trans *trans, return ret; } +/* For fsck */ +int bch2_fpunch_snapshot(struct btree_trans *trans, struct bpos start, struct bpos end) +{ + u32 restart_count = trans->restart_count; + struct bch_fs *c = trans->c; + struct disk_reservation disk_res = bch2_disk_reservation_init(c, 0); + unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits); + struct bkey_i delete; + + int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents, + start, end, 0, k, + &disk_res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + bkey_init(&delete.k); + delete.k.p = iter.pos; + + /* create the biggest key we can */ + bch2_key_resize(&delete.k, max_sectors); + bch2_cut_back(end, &delete); + + bch2_extent_trim_atomic(trans, &iter, &delete) ?: + bch2_trans_update(trans, &iter, &delete, 0); + })); + + bch2_disk_reservation_put(c, &disk_res); + return ret ?: trans_was_restarted(trans, restart_count); +} + /* * Returns -BCH_ERR_transacton_restart if we had to drop locks: */ diff --git a/fs/bcachefs/io_misc.h b/fs/bcachefs/io_misc.h index 9cb44a7c43c1..b93e4d4b3c0c 100644 --- a/fs/bcachefs/io_misc.h +++ b/fs/bcachefs/io_misc.h @@ -5,6 +5,8 @@ int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *, u64, struct bch_io_opts, s64 *, struct write_point_specifier); + +int bch2_fpunch_snapshot(struct btree_trans *, struct bpos, struct bpos); int bch2_fpunch_at(struct btree_trans *, struct btree_iter *, subvol_inum, u64, s64 *); int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *); From ddb9680a7226d6081b42869d2875715a17ecb51d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 3 Jul 2025 15:08:19 -0400 Subject: [PATCH 071/234] bcachefs: Fix bch2_io_failures_to_text() This wasn't updated when we added tracking for btree validate errors. Signed-off-by: Kent Overstreet --- fs/bcachefs/extents.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 036e4ad95987..83cbd77dcb9c 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -50,19 +50,17 @@ void bch2_io_failures_to_text(struct printbuf *out, struct bch_io_failures *failed) { static const char * const error_types[] = { - "io", "checksum", "ec reconstruct", NULL + "btree validate", "io", "checksum", "ec reconstruct", NULL }; for (struct bch_dev_io_failures *f = failed->devs; f < failed->devs + failed->nr; f++) { unsigned errflags = - ((!!f->failed_io) << 0) | - ((!!f->failed_csum_nr) << 1) | - ((!!f->failed_ec) << 2); - - if (!errflags) - continue; + ((!!f->failed_btree_validate) << 0) | + ((!!f->failed_io) << 1) | + ((!!f->failed_csum_nr) << 2) | + ((!!f->failed_ec) << 3); bch2_printbuf_make_room(out, 1024); out->atomic++; @@ -77,7 +75,9 @@ void bch2_io_failures_to_text(struct printbuf *out, prt_char(out, ' '); - if (is_power_of_2(errflags)) { + if (!errflags) { + prt_str(out, "no error - confused"); + } else if (is_power_of_2(errflags)) { prt_bitflags(out, error_types, errflags); prt_str(out, " error"); } else { From c72d628469b8f46251b3afc361269cb15de0c988 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 3 Jul 2025 15:53:51 -0400 Subject: [PATCH 072/234] bcachefs: Fix btree for nonexistent tree depth The fix for when we should increase tree depth in journal replay was entirely bogus. We should only increase the tree depth in journal replay when recovery from btree node scan, and then only for keys found by btree node scan. This needs additional work - we should be shooting down existing interior node pointers when recovery from scan, they shouldn't be showing up here. Fixes: b47a82ff4772 ("bcachefs: Only run 'increase_depth' for keys from btree node csan") Cc: Alan Huang Reported-by: syzbot+8deb6ff4415db67a9f18@syzkaller.appspotmail.com Signed-off-by: Kent Overstreet --- fs/bcachefs/recovery.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index d0b7e3a36a54..c94debb12d2f 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -273,24 +273,35 @@ static int bch2_journal_replay_key(struct btree_trans *trans, goto out; struct btree_path *path = btree_iter_path(trans, &iter); - if (unlikely(!btree_path_node(path, k->level) && - !k->allocated)) { + if (unlikely(!btree_path_node(path, k->level))) { struct bch_fs *c = trans->c; + CLASS(printbuf, buf)(); + prt_str(&buf, "btree="); + bch2_btree_id_to_text(&buf, k->btree_id); + prt_printf(&buf, " level=%u ", k->level); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k->k)); + if (!(c->recovery.passes_complete & (BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes)| BIT_ULL(BCH_RECOVERY_PASS_check_topology)))) { - bch_err(c, "have key in journal replay for btree depth that does not exist, confused"); + bch_err(c, "have key in journal replay for btree depth that does not exist, confused\n%s", + buf.buf); ret = -EINVAL; } -#if 0 + + if (!k->allocated) { + bch_notice(c, "dropping key in journal replay for depth that does not exist because we're recovering from scan\n%s", + buf.buf); + k->overwritten = true; + goto out; + } + bch2_trans_iter_exit(trans, &iter); bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p, BTREE_MAX_DEPTH, 0, iter_flags); ret = bch2_btree_iter_traverse(trans, &iter) ?: bch2_btree_increase_depth(trans, iter.path, 0) ?: -BCH_ERR_transaction_restart_nested; -#endif - k->overwritten = true; goto out; } From c2b2c7d1da8f26c6db40de2bed142d66e238bd8a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 3 Jul 2025 18:50:18 -0400 Subject: [PATCH 073/234] bcachefs: Tweak btree cache helpers for use by btree node scan btree node scan needs to not use the btree node cache: that causes interference from prior failed reads and parallel workers. Instead we need to allocate btree nodes that don't live in the btree cache, so that we can call bch2_btree_node_read_done() directly. This patch tweaks the low level helpers so they don't touch the btree cache lists. Cc: Nikita Ofitserov Reviewed-by: Nikita Ofitserov Reported-and-tested-by: Edoardo Codeglia Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_cache.c | 26 +++++++++++++------------- fs/bcachefs/btree_cache.h | 1 + fs/bcachefs/debug.c | 2 -- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 91e0aa796e6b..83c9860e6b82 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -85,7 +85,7 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) six_unlock_intent(&b->c.lock); } -static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) +void __btree_node_data_free(struct btree *b) { BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); @@ -112,16 +112,17 @@ static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) munmap(b->aux_data, btree_aux_data_bytes(b)); #endif b->aux_data = NULL; - - btree_node_to_freedlist(bc, b); } static void btree_node_data_free(struct btree_cache *bc, struct btree *b) { BUG_ON(list_empty(&b->list)); list_del_init(&b->list); + + __btree_node_data_free(b); + --bc->nr_freeable; - __btree_node_data_free(bc, b); + btree_node_to_freedlist(bc, b); } static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, @@ -185,10 +186,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) { - struct btree_cache *bc = &c->btree_cache; - struct btree *b; - - b = __btree_node_mem_alloc(c, GFP_KERNEL); + struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL); if (!b) return NULL; @@ -198,8 +196,6 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) } bch2_btree_lock_init(&b->c, 0, GFP_KERNEL); - - __bch2_btree_node_to_freelist(bc, b); return b; } @@ -524,7 +520,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, --touched;; } else if (!btree_node_reclaim(c, b)) { __bch2_btree_node_hash_remove(bc, b); - __btree_node_data_free(bc, b); + __btree_node_data_free(b); + btree_node_to_freedlist(bc, b); freed++; bc->nr_freed++; @@ -652,9 +649,12 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bch2_recalc_btree_reserve(c); - for (i = 0; i < bc->nr_reserve; i++) - if (!__bch2_btree_node_mem_alloc(c)) + for (i = 0; i < bc->nr_reserve; i++) { + struct btree *b = __bch2_btree_node_mem_alloc(c); + if (!b) goto err; + __bch2_btree_node_to_freelist(bc, b); + } list_splice_init(&bc->live[0].list, &bc->freeable); diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index ca3c1b145330..be275f87a60e 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -30,6 +30,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsig void bch2_btree_cache_cannibalize_unlock(struct btree_trans *); int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *); +void __btree_node_data_free(struct btree *); struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool); diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index 901f643ead83..79d64052215c 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -153,8 +153,6 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) c->verify_data = __bch2_btree_node_mem_alloc(c); if (!c->verify_data) goto out; - - list_del_init(&c->verify_data->list); } BUG_ON(b->nsets != 1); From 162c90154422e67c3b2dc209a4304a95e293cd58 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 29 Jul 2024 15:17:18 +0300 Subject: [PATCH 074/234] MAINTAINERS: Change habanalabs maintainer I will be leaving Intel soon, Yaron Avizrat will take the role of habanalabs driver maintainer. Signed-off-by: Ofir Bitton Signed-off-by: Lukas Wunner Acked-by: Yaron Avizrat Acked-by: Jani Nikula Acked-by: Oded Gabbay Link: https://lore.kernel.org/r/20240729121718.540489-2-obitton@habana.ai --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index a92290fffa16..362e3ae900ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10504,7 +10504,7 @@ S: Maintained F: block/partitions/efi.* HABANALABS PCI DRIVER -M: Ofir Bitton +M: Yaron Avizrat L: dri-devel@lists.freedesktop.org S: Supported C: irc://irc.oftc.net/dri-devel From 3d44147494385b245f021a3a3a5c1408be1d50d1 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Fri, 4 Jul 2025 15:50:27 -0400 Subject: [PATCH 075/234] rust: drm: remove unnecessary imports `kernel::str::CStr` is included in the prelude. Signed-off-by: Tamir Duberstein Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20250704-cstr-include-drm-v1-1-a279dfc4d753@gmail.com --- drivers/gpu/drm/drm_panic_qr.rs | 2 +- rust/kernel/drm/driver.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index dd55b1cb764d..18492daae4b3 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -27,7 +27,7 @@ //! * //! * -use kernel::{prelude::*, str::CStr}; +use kernel::prelude::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] struct Version(usize); diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs index acb638086131..af93d46d03d3 100644 --- a/rust/kernel/drm/driver.rs +++ b/rust/kernel/drm/driver.rs @@ -10,7 +10,6 @@ drm, error::{to_result, Result}, prelude::*, - str::CStr, types::ARef, }; use macros::vtable; From d67ed2ccd2d1dcfda9292c0ea8697a9d0f2f0d98 Mon Sep 17 00:00:00 2001 From: Wang Jinchao Date: Thu, 12 Jun 2025 19:28:40 +0800 Subject: [PATCH 076/234] md/raid1: Fix stack memory use after return in raid1_reshape In the raid1_reshape function, newpool is allocated on the stack and assigned to conf->r1bio_pool. This results in conf->r1bio_pool.wait.head pointing to a stack address. Accessing this address later can lead to a kernel panic. Example access path: raid1_reshape() { // newpool is on the stack mempool_t newpool, oldpool; // initialize newpool.wait.head to stack address mempool_init(&newpool, ...); conf->r1bio_pool = newpool; } raid1_read_request() or raid1_write_request() { alloc_r1bio() { mempool_alloc() { // if pool->alloc fails remove_element() { --pool->curr_nr; } } } } mempool_free() { if (pool->curr_nr < pool->min_nr) { // pool->wait.head is a stack address // wake_up() will try to access this invalid address // which leads to a kernel panic return; wake_up(&pool->wait); } } Fix: reinit conf->r1bio_pool.wait after assigning newpool. Fixes: afeee514ce7f ("md: convert to bioset_init()/mempool_init()") Signed-off-by: Wang Jinchao Reviewed-by: Yu Kuai Link: https://lore.kernel.org/linux-raid/20250612112901.3023950-1-wangjinchao600@gmail.com Signed-off-by: Yu Kuai --- drivers/md/raid1.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 19c5a0ce5a40..fd4ce2a4136f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3428,6 +3428,7 @@ static int raid1_reshape(struct mddev *mddev) /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; + init_waitqueue_head(&conf->r1bio_pool.wait); for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; From 43806c3d5b9bb7d74ba4e33a6a8a41ac988bde24 Mon Sep 17 00:00:00 2001 From: Nigel Croxon Date: Thu, 3 Jul 2025 11:23:04 -0400 Subject: [PATCH 077/234] raid10: cleanup memleak at raid10_make_request If raid10_read_request or raid10_write_request registers a new request and the REQ_NOWAIT flag is set, the code does not free the malloc from the mempool. unreferenced object 0xffff8884802c3200 (size 192): comm "fio", pid 9197, jiffies 4298078271 hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 88 41 02 00 00 00 00 00 .........A...... 08 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace (crc c1a049a2): __kmalloc+0x2bb/0x450 mempool_alloc+0x11b/0x320 raid10_make_request+0x19e/0x650 [raid10] md_handle_request+0x3b3/0x9e0 __submit_bio+0x394/0x560 __submit_bio_noacct+0x145/0x530 submit_bio_noacct_nocheck+0x682/0x830 __blkdev_direct_IO_async+0x4dc/0x6b0 blkdev_read_iter+0x1e5/0x3b0 __io_read+0x230/0x1110 io_read+0x13/0x30 io_issue_sqe+0x134/0x1180 io_submit_sqes+0x48c/0xe90 __do_sys_io_uring_enter+0x574/0x8b0 do_syscall_64+0x5c/0xe0 entry_SYSCALL_64_after_hwframe+0x76/0x7e V4: changing backing tree to see if CKI tests will pass. The patch code has not changed between any versions. Fixes: c9aa889b035f ("md: raid10 add nowait support") Signed-off-by: Nigel Croxon Link: https://lore.kernel.org/linux-raid/c0787379-9caa-42f3-b5fc-369aed784400@redhat.com Signed-off-by: Yu Kuai --- drivers/md/raid10.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b74780af4c22..917055dc01ed 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1182,8 +1182,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } } - if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) + if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { + raid_end_bio_io(r10_bio); return; + } + rdev = read_balance(conf, r10_bio, &max_sectors); if (!rdev) { if (err_rdev) { @@ -1370,8 +1373,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, } sectors = r10_bio->sectors; - if (!regular_request_wait(mddev, conf, bio, sectors)) + if (!regular_request_wait(mddev, conf, bio, sectors)) { + raid_end_bio_io(r10_bio); return; + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && From 5fa31c49928139fa948f078b094d80f12ed83f5f Mon Sep 17 00:00:00 2001 From: Zheng Qixing Date: Wed, 2 Jul 2025 18:23:41 +0800 Subject: [PATCH 078/234] md/raid1,raid10: strip REQ_NOWAIT from member bios RAID layers don't implement proper non-blocking semantics for REQ_NOWAIT, making the flag potentially misleading when propagated to member disks. This patch clear REQ_NOWAIT from cloned bios in raid1/raid10. Retain original bio's REQ_NOWAIT flag for upper layer error handling. Maybe we can implement non-blocking I/O handling mechanisms within RAID in future work. Fixes: 9f346f7d4ea7 ("md/raid1,raid10: don't handle IO error for REQ_RAHEAD and REQ_NOWAIT") Signed-off-by: Zheng Qixing Link: https://lore.kernel.org/linux-raid/20250702102341.1969154-1-zhengqixing@huaweicloud.com Signed-off-by: Yu Kuai --- drivers/md/raid1.c | 3 ++- drivers/md/raid10.c | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fd4ce2a4136f..64b8176907a9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1399,7 +1399,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, } read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp, &mddev->bio_set); - + read_bio->bi_opf &= ~REQ_NOWAIT; r1_bio->bios[rdisk] = read_bio; read_bio->bi_iter.bi_sector = r1_bio->sector + @@ -1649,6 +1649,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, wait_for_serialization(rdev, r1_bio); } + mbio->bi_opf &= ~REQ_NOWAIT; r1_bio->bios[i] = mbio; mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 917055dc01ed..c9bd2005bfd0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1224,6 +1224,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, r10_bio->master_bio = bio; } read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); + read_bio->bi_opf &= ~REQ_NOWAIT; r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].rdev = rdev; @@ -1259,6 +1260,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, conf->mirrors[devnum].rdev; mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set); + mbio->bi_opf &= ~REQ_NOWAIT; if (replacement) r10_bio->devs[n_copy].repl_bio = mbio; else From c17fb542dbd1db745c9feac15617056506dd7195 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5kon=20Bugge?= Date: Wed, 2 Jul 2025 11:10:34 +0200 Subject: [PATCH 079/234] md/md-bitmap: fix GPF in bitmap_get_stats() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit message of commit 6ec1f0239485 ("md/md-bitmap: fix stats collection for external bitmaps") states: Remove the external bitmap check as the statistics should be available regardless of bitmap storage location. Return -EINVAL only for invalid bitmap with no storage (neither in superblock nor in external file). But, the code does not adhere to the above, as it does only check for a valid super-block for "internal" bitmaps. Hence, we observe: Oops: GPF, probably for non-canonical address 0x1cd66f1f40000028 RIP: 0010:bitmap_get_stats+0x45/0xd0 Call Trace: seq_read_iter+0x2b9/0x46a seq_read+0x12f/0x180 proc_reg_read+0x57/0xb0 vfs_read+0xf6/0x380 ksys_read+0x6d/0xf0 do_syscall_64+0x8c/0x1b0 entry_SYSCALL_64_after_hwframe+0x76/0x7e We fix this by checking the existence of a super-block for both the internal and external case. Fixes: 6ec1f0239485 ("md/md-bitmap: fix stats collection for external bitmaps") Cc: stable@vger.kernel.org Reported-by: Gerald Gibson Signed-off-by: HÃ¥kon Bugge Link: https://lore.kernel.org/linux-raid/20250702091035.2061312-1-haakon.bugge@oracle.com Signed-off-by: Yu Kuai --- drivers/md/md-bitmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index bd694910b01b..7f524a26cebc 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2366,8 +2366,7 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (!bitmap->mddev->bitmap_info.external && - !bitmap->storage.sb_page) + if (!bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); From a77ffbe34d450dd685e55ba942b025ded2517aff Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 3 Jul 2025 18:03:10 -0400 Subject: [PATCH 080/234] bcachefs: btree node scan no longer uses btree cache Previously, btree node scan used the btree node cache to check if btree nodes were readable, but this is subject to interference from threads scanning different devices trying to read the same node - and more critically, nodes that we already attempted and failed to read before kicking off scan. Instead, we now allocate a 'struct btree' that does not live in the btree node cache, and call bch2_btree_node_read_done() directly. Cc: Nikita Ofitserov Reviewed-by: Nikita Ofitserov Reported-and-tested-by: Edoardo Codeglia Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_node_scan.c | 84 +++++++++++++++++------------------ 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 23d8c62ea4b6..42c9eb2c786e 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -75,39 +75,6 @@ static inline u64 bkey_journal_seq(struct bkey_s_c k) } } -static bool found_btree_node_is_readable(struct btree_trans *trans, - struct found_btree_node *f) -{ - struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } tmp; - - found_btree_node_to_key(&tmp.k, f); - - struct btree *b = bch2_btree_node_get_noiter(trans, &tmp.k, f->btree_id, f->level, false); - bool ret = !IS_ERR_OR_NULL(b); - if (!ret) - return ret; - - f->sectors_written = b->written; - f->journal_seq = le64_to_cpu(b->data->keys.journal_seq); - - struct bkey_s_c k; - struct bkey unpacked; - struct btree_node_iter iter; - for_each_btree_node_key_unpack(b, k, &iter, &unpacked) - f->journal_seq = max(f->journal_seq, bkey_journal_seq(k)); - - six_unlock_read(&b->c.lock); - - /* - * We might update this node's range; if that happens, we need the node - * to be re-read so the read path can trim keys that are no longer in - * this node - */ - if (b != btree_node_root(trans->c, b)) - bch2_btree_node_evict(trans, &tmp.k); - return ret; -} - static int found_btree_node_cmp_cookie(const void *_l, const void *_r) { const struct found_btree_node *l = _l; @@ -159,17 +126,17 @@ static const struct min_heap_callbacks found_btree_node_heap_cbs = { }; static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, - struct bio *bio, struct btree_node *bn, u64 offset) + struct btree *b, struct bio *bio, u64 offset) { struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes); + struct btree_node *bn = b->data; bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); bio->bi_iter.bi_sector = offset; - bch2_bio_map(bio, bn, PAGE_SIZE); + bch2_bio_map(bio, b->data, c->opts.block_size); u64 submit_time = local_clock(); submit_bio_wait(bio); - bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); if (bio->bi_status) { @@ -201,6 +168,14 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX) return; + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ); + bio->bi_iter.bi_sector = offset; + bch2_bio_map(bio, b->data, c->opts.btree_node_size); + + submit_time = local_clock(); + submit_bio_wait(bio); + bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status); + rcu_read_lock(); struct found_btree_node n = { .btree_id = BTREE_NODE_ID(bn), @@ -217,7 +192,20 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, }; rcu_read_unlock(); - if (bch2_trans_run(c, found_btree_node_is_readable(trans, &n))) { + found_btree_node_to_key(&b->key, &n); + + CLASS(printbuf, buf)(); + if (!bch2_btree_node_read_done(c, ca, b, NULL, &buf)) { + /* read_done will swap out b->data for another buffer */ + bn = b->data; + /* + * Grab journal_seq here because we want the max journal_seq of + * any bset; read_done sorts down to a single set and picks the + * max journal_seq + */ + n.journal_seq = le64_to_cpu(bn->keys.journal_seq), + n.sectors_written = b->written; + mutex_lock(&f->lock); if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) { bch_err(c, "try_read_btree_node() can't handle endian conversion"); @@ -237,12 +225,20 @@ static int read_btree_nodes_worker(void *p) struct find_btree_nodes_worker *w = p; struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes); struct bch_dev *ca = w->ca; - void *buf = (void *) __get_free_page(GFP_KERNEL); - struct bio *bio = bio_alloc(NULL, 1, 0, GFP_KERNEL); unsigned long last_print = jiffies; + struct btree *b = NULL; + struct bio *bio = NULL; - if (!buf || !bio) { - bch_err(c, "read_btree_nodes_worker: error allocating bio/buf"); + b = __bch2_btree_node_mem_alloc(c); + if (!b) { + bch_err(c, "read_btree_nodes_worker: error allocating buf"); + w->f->ret = -ENOMEM; + goto err; + } + + bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL); + if (!bio) { + bch_err(c, "read_btree_nodes_worker: error allocating bio"); w->f->ret = -ENOMEM; goto err; } @@ -266,11 +262,13 @@ static int read_btree_nodes_worker(void *p) !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c))) continue; - try_read_btree_node(w->f, ca, bio, buf, sector); + try_read_btree_node(w->f, ca, b, bio, sector); } err: + if (b) + __btree_node_data_free(b); + kfree(b); bio_put(bio); - free_page((unsigned long) buf); enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan); closure_put(w->cl); kfree(w); From 14dd95647ea533c7d7c528b9bb7e163e433b47b9 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 3 Jul 2025 19:19:21 -0400 Subject: [PATCH 081/234] bcachefs: btree read retry fixes Fix btree node read retries after validate errors: __btree_err() is the wrong place to flag a topology error: that is done by btree_lost_data(). Additionally, some calls to bch2_bkey_pick_read_device() were not updated in the 6.16 rework for improved log messages; we were failing to signal that we still had a retry. Cc: Nikita Ofitserov Cc: Alan Huang Reported-and-tested-by: Edoardo Codeglia Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_io.c | 8 +++----- fs/bcachefs/errcode.h | 1 - fs/bcachefs/error.c | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index e874a4357f64..a4cc72986e36 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -568,9 +568,9 @@ static int __btree_err(int ret, bch2_mark_btree_validate_failure(failed, ca->dev_idx); struct extent_ptr_decoded pick; - have_retry = !bch2_bkey_pick_read_device(c, + have_retry = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), - failed, &pick, -1); + failed, &pick, -1) == 1; } if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) @@ -615,7 +615,6 @@ static int __btree_err(int ret, goto out; case -BCH_ERR_btree_node_read_err_bad_node: prt_str(&out, ", "); - ret = __bch2_topology_error(c, &out); break; } @@ -644,7 +643,6 @@ static int __btree_err(int ret, goto out; case -BCH_ERR_btree_node_read_err_bad_node: prt_str(&out, ", "); - ret = __bch2_topology_error(c, &out); break; } print: @@ -1408,7 +1406,7 @@ static void btree_node_read_work(struct work_struct *work) ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), &failed, &rb->pick, -1); - if (ret) { + if (ret <= 0) { set_btree_node_read_error(b); break; } diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 86a842f1e88e..acc3b7b67704 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -282,7 +282,6 @@ x(EIO, sb_not_downgraded) \ x(EIO, btree_node_write_all_failed) \ x(EIO, btree_node_read_error) \ - x(EIO, btree_node_read_validate_error) \ x(EIO, btree_need_topology_repair) \ x(EIO, bucket_ref_update) \ x(EIO, trigger_alloc) \ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index b2a6c041e165..ea37f5af1800 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -103,7 +103,7 @@ int __bch2_topology_error(struct bch_fs *c, struct printbuf *out) return bch_err_throw(c, btree_need_topology_repair); } else { return bch2_run_explicit_recovery_pass(c, out, BCH_RECOVERY_PASS_check_topology, 0) ?: - bch_err_throw(c, btree_node_read_validate_error); + bch_err_throw(c, btree_need_topology_repair); } } From 63a83463d278ea9f9eff7f58708322856e106d87 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 4 Jul 2025 12:18:02 -0400 Subject: [PATCH 082/234] bcachefs: Fix bch2_btree_transactions_read() synchronization Since we're accessing btree_trans objects owned by another thread, we need to guard against using pointers to freed key cache entries: we need our own srcu read lock, and we should skip a btree_trans if it didn't hold the srcu lock (and thus it might have pointers to freed key cache entries). 00693 Mem abort info: 00693 ESR = 0x0000000096000005 00693 EC = 0x25: DABT (current EL), IL = 32 bits 00693 SET = 0, FnV = 0 00693 EA = 0, S1PTW = 0 00693 FSC = 0x05: level 1 translation fault 00693 Data abort info: 00693 ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000 00693 CM = 0, WnR = 0, TnD = 0, TagAccess = 0 00693 GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 00693 user pgtable: 4k pages, 39-bit VAs, pgdp=000000012e650000 00693 [000000008fb96218] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 00693 Internal error: Oops: 0000000096000005 [#1] SMP 00693 Modules linked in: 00693 CPU: 0 UID: 0 PID: 4307 Comm: cat Not tainted 6.16.0-rc2-ktest-g9e15af94fd86 #27578 NONE 00693 Hardware name: linux,dummy-virt (DT) 00693 pstate: 60001005 (nZCv daif -PAN -UAO -TCO -DIT +SSBS BTYPE=--) 00693 pc : six_lock_counts+0x20/0xe8 00693 lr : bch2_btree_bkey_cached_common_to_text+0x38/0x130 00693 sp : ffffff80ca98bb60 00693 x29: ffffff80ca98bb60 x28: 000000008fb96200 x27: 0000000000000007 00693 x26: ffffff80eafd06b8 x25: 0000000000000000 x24: ffffffc080d75a60 00693 x23: ffffff80eafd0000 x22: ffffffc080bdfcc0 x21: ffffff80eafd0210 00693 x20: ffffff80c192ff08 x19: 000000008fb96200 x18: 00000000ffffffff 00693 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000ffffffff 00693 x14: 0000000000000000 x13: ffffff80ceb5a29a x12: 20796220646c6568 00693 x11: 72205d3e303c5b20 x10: 0000000000000020 x9 : ffffffc0805fb6b0 00693 x8 : 0000000000000020 x7 : 0000000000000000 x6 : 0000000000000020 00693 x5 : ffffff80ceb5a29c x4 : 0000000000000001 x3 : 000000000000029c 00693 x2 : 0000000000000000 x1 : ffffff80ef66c000 x0 : 000000008fb96200 00693 Call trace: 00693 six_lock_counts+0x20/0xe8 (P) 00693 bch2_btree_bkey_cached_common_to_text+0x38/0x130 00693 bch2_btree_trans_to_text+0x260/0x2a8 00693 bch2_btree_transactions_read+0xac/0x1e8 00693 full_proxy_read+0x74/0xd8 00693 vfs_read+0x90/0x300 00693 ksys_read+0x6c/0x108 00693 __arm64_sys_read+0x20/0x30 00693 invoke_syscall.constprop.0+0x54/0xe8 00693 do_el0_svc+0x44/0xc8 00693 el0_svc+0x18/0x58 00693 el0t_64_sync_handler+0x104/0x130 00693 el0t_64_sync+0x154/0x158 00693 Code: 910003fd f9423c22 f90017e2 d2800002 (f9400c01) 00693 ---[ end trace 0000000000000000 ]--- Signed-off-by: Kent Overstreet --- fs/bcachefs/debug.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index 79d64052215c..07c2a0f73cc2 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -584,6 +584,8 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, i->ubuf = buf; i->size = size; i->ret = 0; + + int srcu_idx = srcu_read_lock(&c->btree_trans_barrier); restart: seqmutex_lock(&c->btree_trans_lock); list_sort(&c->btree_trans_list, list_ptr_order_cmp); @@ -597,6 +599,11 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, if (!closure_get_not_zero(&trans->ref)) continue; + if (!trans->srcu_held) { + closure_put(&trans->ref); + continue; + } + u32 seq = seqmutex_unlock(&c->btree_trans_lock); bch2_btree_trans_to_text(&i->buf, trans); @@ -618,6 +625,8 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, } seqmutex_unlock(&c->btree_trans_lock); unlocked: + srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); + if (i->buf.allocation_failure) ret = -ENOMEM; From 9ee124caae1b0defd0e02c65686f539845a3ac9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 4 Jul 2025 19:24:17 +0200 Subject: [PATCH 083/234] pwm: Fix invalid state detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 9dd42d019e63 ("pwm: Allow pwm state transitions from an invalid state") intended to allow some state transitions that were not allowed before. The idea is sane and back then I also got the code comment right, but the check for enabled is bogus. This resulted in state transitions for enabled states to be allowed to have invalid duty/period settings and thus it can happen that low-level drivers get requests for invalid states🙄. Invert the check to allow state transitions for disabled states only. Fixes: 9dd42d019e63 ("pwm: Allow pwm state transitions from an invalid state") Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20250704172416.626433-2-u.kleine-koenig@baylibre.com Cc: stable@vger.kernel.org Signed-off-by: Uwe Kleine-König --- drivers/pwm/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 4d842c692194..edf776b8ad53 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -596,7 +596,7 @@ static bool pwm_state_valid(const struct pwm_state *state) * and supposed to be ignored. So also ignore any strange values and * consider the state ok. */ - if (state->enabled) + if (!state->enabled) return true; if (!state->period) From 505b730ede7f5c4083ff212aa955155b5b92e574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 4 Jul 2025 19:27:27 +0200 Subject: [PATCH 084/234] pwm: mediatek: Ensure to disable clocks in error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After enabling the clocks each error path must disable the clocks again. One of them failed to do so. Unify the error paths to use goto to make it harder for future changes to add a similar bug. Fixes: 7ca59947b5fc ("pwm: mediatek: Prevent divide-by-zero in pwm_mediatek_config()") Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20250704172728.626815-2-u.kleine-koenig@baylibre.com Cc: stable@vger.kernel.org Signed-off-by: Uwe Kleine-König --- drivers/pwm/pwm-mediatek.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 7eaab5831499..33d3554b9197 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -130,8 +130,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, return ret; clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); - if (!clk_rate) - return -EINVAL; + if (!clk_rate) { + ret = -EINVAL; + goto out; + } /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) @@ -150,9 +152,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, } if (clkdiv > PWM_CLK_DIV_MAX) { - pwm_mediatek_clk_disable(chip, pwm); dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns); - return -EINVAL; + ret = -EINVAL; + goto out; } if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) { @@ -169,9 +171,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); +out: pwm_mediatek_clk_disable(chip, pwm); - return 0; + return ret; } static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) From c5fd399a24c8e2865524361f7dc4d4a6899be4f4 Mon Sep 17 00:00:00 2001 From: Lachlan Hodges Date: Tue, 1 Jul 2025 17:55:41 +1000 Subject: [PATCH 085/234] wifi: mac80211: correctly identify S1G short beacon mac80211 identifies a short beacon by the presence of the next TBTT field, however the standard actually doesn't explicitly state that the next TBTT can't be in a long beacon or even that it is required in a short beacon - and as a result this validation does not work for all vendor implementations. The standard explicitly states that an S1G long beacon shall contain the S1G beacon compatibility element as the first element in a beacon transmitted at a TBTT that is not a TSBTT (Target Short Beacon Transmission Time) as per IEEE80211-2024 11.1.3.10.1. This is validated by 9.3.4.3 Table 9-76 which states that the S1G beacon compatibility element is only allowed in the full set and is not allowed in the minimum set of elements permitted for use within short beacons. Correctly identify short beacons by the lack of an S1G beacon compatibility element as the first element in an S1G beacon frame. Fixes: 9eaffe5078ca ("cfg80211: convert S1G beacon to scan results") Signed-off-by: Simon Wadsworth Signed-off-by: Lachlan Hodges Link: https://patch.msgid.link/20250701075541.162619-1-lachlan.hodges@morsemicro.com Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 45 ++++++++++++++++++++++++++++----------- net/mac80211/mlme.c | 7 ++++-- 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 22f39e5e2ff1..996be3c2cff0 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -662,18 +662,6 @@ static inline bool ieee80211_s1g_has_cssid(__le16 fc) (fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID)); } -/** - * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon - * @fc: frame control bytes in little-endian byteorder - * Return: whether or not the frame is an S1G short beacon, - * i.e. it is an S1G beacon with 'next TBTT' flag set - */ -static inline bool ieee80211_is_s1g_short_beacon(__le16 fc) -{ - return ieee80211_is_s1g_beacon(fc) && - (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT)); -} - /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder @@ -4901,6 +4889,39 @@ static inline bool ieee80211_is_ftm(struct sk_buff *skb) return false; } +/** + * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon + * @fc: frame control bytes in little-endian byteorder + * @variable: pointer to the beacon frame elements + * @variable_len: length of the frame elements + * Return: whether or not the frame is an S1G short beacon. As per + * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall + * always be present as the first element in beacon frames generated at a + * TBTT (Target Beacon Transmission Time), so any frame not containing + * this element must have been generated at a TSBTT (Target Short Beacon + * Transmission Time) that is not a TBTT. Additionally, short beacons are + * prohibited from containing the S1G beacon compatibility element as per + * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with + * either no elements or the first element is not the beacon compatibility + * element, we have a short beacon. + */ +static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable, + size_t variable_len) +{ + if (!ieee80211_is_s1g_beacon(fc)) + return false; + + /* + * If the frame does not contain at least 1 element (this is perfectly + * valid in a short beacon) and is an S1G beacon, we have a short + * beacon. + */ + if (variable_len < 2) + return true; + + return variable[0] != WLAN_EID_S1G_BCN_COMPAT; +} + struct element { u8 id; u8 datalen; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2d46d4af60d7..7ddb8e77b4c7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -7195,6 +7195,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_mgmt *mgmt = (void *) hdr; + struct ieee80211_ext *ext = NULL; size_t baselen; struct ieee802_11_elems *elems; struct ieee80211_local *local = sdata->local; @@ -7220,7 +7221,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, /* Process beacon from the current BSS */ bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { - struct ieee80211_ext *ext = (void *) mgmt; + ext = (void *)mgmt; variable = ext->u.s1g_beacon.variable + ieee80211_s1g_optional_len(ext->frame_control); } @@ -7407,7 +7408,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, } if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || - ieee80211_is_s1g_short_beacon(mgmt->frame_control)) + (ext && ieee80211_is_s1g_short_beacon(ext->frame_control, + parse_params.start, + parse_params.len))) goto free; link->u.mgd.beacon_crc = ncrc; link->u.mgd.beacon_crc_valid = true; From 8af596e8ae44c3bcf36d1aea09fc9a6f17c555e5 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 1 Jul 2025 09:22:13 +0200 Subject: [PATCH 086/234] wifi: mac80211: clear frame buffer to never leak stack In disconnect paths paths, local frame buffers are used to build deauthentication frames to send them over the air and as notifications to userspace. Some internal error paths (that, given no other bugs, cannot happen) don't always initialize the buffers before sending them to userspace, so in the presence of other bugs they can leak stack content. Initialize the buffers to avoid the possibility of this happening. Suggested-by: Zhongqiu Han Link: https://patch.msgid.link/20250701072213.13004-2-johannes@sipsolutions.net Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7ddb8e77b4c7..d26dcee5683a 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3934,6 +3934,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, lockdep_assert_wiphy(local->hw.wiphy); + if (frame_buf) + memset(frame_buf, 0, IEEE80211_DEAUTH_FRAME_LEN); + if (WARN_ON(!ap_sta)) return; From e1e6ebf490e55fee1ae573aa443c1d4aea5e4a40 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 30 Jun 2025 15:45:01 +0200 Subject: [PATCH 087/234] wifi: mac80211: fix non-transmitted BSSID profile search When the non-transmitted BSSID profile is found, immediately return from the search to not return the wrong profile_len when the profile is found in a multiple BSSID element that isn't the last one in the frame. Fixes: 5023b14cf4df ("mac80211: support profile split between elements") Reported-by: Michael-CY Lee Link: https://patch.msgid.link/20250630154501.f26cd45a0ecd.I28e0525d06e8a99e555707301bca29265cf20dc8@changeid Signed-off-by: Johannes Berg --- net/mac80211/parse.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index 96584b39215e..c5e0f7f46004 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -758,7 +758,6 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, { const struct element *elem, *sub; size_t profile_len = 0; - bool found = false; if (!bss || !bss->transmitted_bss) return profile_len; @@ -809,15 +808,14 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, index[2], new_bssid); if (ether_addr_equal(new_bssid, bss->bssid)) { - found = true; elems->bssid_index_len = index[1]; elems->bssid_index = (void *)&index[2]; - break; + return profile_len; } } } - return found ? profile_len : 0; + return 0; } static void From 3b602ddc0df723992721b0d286c90c9bdd755b34 Mon Sep 17 00:00:00 2001 From: Vitor Soares Date: Tue, 1 Jul 2025 15:26:43 +0100 Subject: [PATCH 088/234] wifi: mwifiex: discard erroneous disassoc frames on STA interface When operating in concurrent STA/AP mode with host MLME enabled, the firmware incorrectly sends disassociation frames to the STA interface when clients disconnect from the AP interface. This causes kernel warnings as the STA interface processes disconnect events that don't apply to it: [ 1303.240540] WARNING: CPU: 0 PID: 513 at net/wireless/mlme.c:141 cfg80211_process_disassoc+0x78/0xec [cfg80211] [ 1303.250861] Modules linked in: 8021q garp stp mrp llc rfcomm bnep btnxpuart nls_iso8859_1 nls_cp437 onboard_us [ 1303.327651] CPU: 0 UID: 0 PID: 513 Comm: kworker/u9:2 Not tainted 6.16.0-rc1+ #3 PREEMPT [ 1303.335937] Hardware name: Toradex Verdin AM62 WB on Verdin Development Board (DT) [ 1303.343588] Workqueue: MWIFIEX_RX_WORK_QUEUE mwifiex_rx_work_queue [mwifiex] [ 1303.350856] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 1303.357904] pc : cfg80211_process_disassoc+0x78/0xec [cfg80211] [ 1303.364065] lr : cfg80211_process_disassoc+0x70/0xec [cfg80211] [ 1303.370221] sp : ffff800083053be0 [ 1303.373590] x29: ffff800083053be0 x28: 0000000000000000 x27: 0000000000000000 [ 1303.380855] x26: 0000000000000000 x25: 00000000ffffffff x24: ffff000002c5b8ae [ 1303.388120] x23: ffff000002c5b884 x22: 0000000000000001 x21: 0000000000000008 [ 1303.395382] x20: ffff000002c5b8ae x19: ffff0000064dd408 x18: 0000000000000006 [ 1303.402646] x17: 3a36333a61623a30 x16: 32206d6f72662063 x15: ffff800080bfe048 [ 1303.409910] x14: ffff000003625300 x13: 0000000000000001 x12: 0000000000000000 [ 1303.417173] x11: 0000000000000002 x10: ffff000003958600 x9 : ffff000003625300 [ 1303.424434] x8 : ffff00003fd9ef40 x7 : ffff0000039fc280 x6 : 0000000000000002 [ 1303.431695] x5 : ffff0000038976d4 x4 : 0000000000000000 x3 : 0000000000003186 [ 1303.438956] x2 : 000000004836ba20 x1 : 0000000000006986 x0 : 00000000d00479de [ 1303.446221] Call trace: [ 1303.448722] cfg80211_process_disassoc+0x78/0xec [cfg80211] (P) [ 1303.454894] cfg80211_rx_mlme_mgmt+0x64/0xf8 [cfg80211] [ 1303.460362] mwifiex_process_mgmt_packet+0x1ec/0x460 [mwifiex] [ 1303.466380] mwifiex_process_sta_rx_packet+0x1bc/0x2a0 [mwifiex] [ 1303.472573] mwifiex_handle_rx_packet+0xb4/0x13c [mwifiex] [ 1303.478243] mwifiex_rx_work_queue+0x158/0x198 [mwifiex] [ 1303.483734] process_one_work+0x14c/0x28c [ 1303.487845] worker_thread+0x2cc/0x3d4 [ 1303.491680] kthread+0x12c/0x208 [ 1303.495014] ret_from_fork+0x10/0x20 Add validation in the STA receive path to verify that disassoc/deauth frames originate from the connected AP. Frames that fail this check are discarded early, preventing them from reaching the MLME layer and triggering WARN_ON(). This filtering logic is similar with that used in the ieee80211_rx_mgmt_disassoc() function in mac80211, which drops disassoc frames that don't match the current BSSID (!ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)), ensuring only relevant frames are processed. Tested on: - 8997 with FW 16.68.1.p197 Fixes: 36995892c271 ("wifi: mwifiex: add host mlme for client mode") Cc: stable@vger.kernel.org Signed-off-by: Vitor Soares Reviewed-by: Jeff Chen Reviewed-by: Francesco Dolcini Link: https://patch.msgid.link/20250701142643.658990-1-ivitro@gmail.com Signed-off-by: Johannes Berg --- drivers/net/wireless/marvell/mwifiex/util.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 4c5b1de0e936..6882e90e90b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -459,7 +459,9 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, "auth: receive authentication from %pM\n", ieee_hdr->addr3); } else { - if (!priv->wdev.connected) + if (!priv->wdev.connected || + !ether_addr_equal(ieee_hdr->addr3, + priv->curr_bss_params.bss_descriptor.mac_address)) return 0; if (ieee80211_is_deauth(ieee_hdr->frame_control)) { From 58fcb1b4287ce38850402bb2bb16d09bf77b91d9 Mon Sep 17 00:00:00 2001 From: Moon Hee Lee Date: Thu, 3 Jul 2025 12:37:57 -0700 Subject: [PATCH 089/234] wifi: mac80211: reject VHT opmode for unsupported channel widths VHT operating mode notifications are not defined for channel widths below 20 MHz. In particular, 5 MHz and 10 MHz are not valid under the VHT specification and must be rejected. Without this check, malformed notifications using these widths may reach ieee80211_chan_width_to_rx_bw(), leading to a WARN_ON due to invalid input. This issue was reported by syzbot. Reject these unsupported widths early in sta_link_apply_parameters() when opmode_notif is used. The accepted set includes 20, 40, 80, 160, and 80+80 MHz, which are valid for VHT. While 320 MHz is not defined for VHT, it is allowed to avoid rejecting HE or EHT clients that may still send a VHT opmode notification. Reported-by: syzbot+ededba317ddeca8b3f08@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=ededba317ddeca8b3f08 Fixes: 751e7489c1d7 ("wifi: mac80211: expose ieee80211_chan_width_to_rx_bw() to drivers") Tested-by: syzbot+ededba317ddeca8b3f08@syzkaller.appspotmail.com Signed-off-by: Moon Hee Lee Link: https://patch.msgid.link/20250703193756.46622-2-moonhee.lee.ca@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d9d88f2f2831..954795b0fe48 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1959,6 +1959,20 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, ieee80211_sta_init_nss(link_sta); if (params->opmode_notif_used) { + enum nl80211_chan_width width = link->conf->chanreq.oper.width; + + switch (width) { + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_320: /* not VHT, allowed for HE/EHT */ + break; + default: + return -EINVAL; + } + /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it */ From 2ce6ad9262256dd345cb104ba0ac6cf4aeed25a3 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 6 Jul 2025 11:20:53 +0200 Subject: [PATCH 090/234] wifi: rt2x00: fix remove callback type mismatch The function is used as remove callback for a platform driver. It was missed during the conversion from int to void Fixes: 0edb555a65d1 ("platform: Make platform_driver::remove() return void") Signed-off-by: Felix Fietkau Link: https://patch.msgid.link/20250706092053.97724-1-nbd@nbd.name Signed-off-by: Johannes Berg --- drivers/net/wireless/ralink/rt2x00/rt2x00soc.c | 4 +--- drivers/net/wireless/ralink/rt2x00/rt2x00soc.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index eface610178d..f7f3a2340c39 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -108,7 +108,7 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) } EXPORT_SYMBOL_GPL(rt2x00soc_probe); -int rt2x00soc_remove(struct platform_device *pdev) +void rt2x00soc_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; @@ -119,8 +119,6 @@ int rt2x00soc_remove(struct platform_device *pdev) rt2x00lib_remove_dev(rt2x00dev); rt2x00soc_free_reg(rt2x00dev); ieee80211_free_hw(hw); - - return 0; } EXPORT_SYMBOL_GPL(rt2x00soc_remove); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h index 021fd06b3627..d6226b8a10e0 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h @@ -17,7 +17,7 @@ * SoC driver handlers. */ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops); -int rt2x00soc_remove(struct platform_device *pdev); +void rt2x00soc_remove(struct platform_device *pdev); #ifdef CONFIG_PM int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state); int rt2x00soc_resume(struct platform_device *pdev); From 737bb912ebbe4571195c56eba557c4d7315b26fb Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Mon, 16 Jun 2025 02:46:35 +0200 Subject: [PATCH 091/234] wifi: prevent A-MSDU attacks in mesh networks This patch is a mitigation to prevent the A-MSDU spoofing vulnerability for mesh networks. The initial update to the IEEE 802.11 standard, in response to the FragAttacks, missed this case (CVE-2025-27558). It can be considered a variant of CVE-2020-24588 but for mesh networks. This patch tries to detect if a standard MSDU was turned into an A-MSDU by an adversary. This is done by parsing a received A-MSDU as a standard MSDU, calculating the length of the Mesh Control header, and seeing if the 6 bytes after this header equal the start of an rfc1042 header. If equal, this is a strong indication of an ongoing attack attempt. This defense was tested with mac80211_hwsim against a mesh network that uses an empty Mesh Address Extension field, i.e., when four addresses are used, and when using a 12-byte Mesh Address Extension field, i.e., when six addresses are used. Functionality of normal MSDUs and A-MSDUs was also tested, and confirmed working, when using both an empty and 12-byte Mesh Address Extension field. It was also tested with mac80211_hwsim that A-MSDU attacks in non-mesh networks keep being detected and prevented. Note that the vulnerability being patched, and the defense being implemented, was also discussed in the following paper and in the following IEEE 802.11 presentation: https://papers.mathyvanhoef.com/wisec2025.pdf https://mentor.ieee.org/802.11/dcn/25/11-25-0949-00-000m-a-msdu-mesh-spoof-protection.docx Cc: stable@vger.kernel.org Signed-off-by: Mathy Vanhoef Link: https://patch.msgid.link/20250616004635.224344-1-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- net/wireless/util.c | 52 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index ed868c0f7ca8..1ad5a6bdfd75 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -820,6 +820,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr) } EXPORT_SYMBOL(ieee80211_is_valid_amsdu); + +/* + * Detects if an MSDU frame was maliciously converted into an A-MSDU + * frame by an adversary. This is done by parsing the received frame + * as if it were a regular MSDU, even though the A-MSDU flag is set. + * + * For non-mesh interfaces, detection involves checking whether the + * payload, when interpreted as an MSDU, begins with a valid RFC1042 + * header. This is done by comparing the A-MSDU subheader's destination + * address to the start of the RFC1042 header. + * + * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field + * and an optional variable-length Mesh Address Extension field before + * the RFC1042 header. The position of the RFC1042 header must therefore + * be calculated based on the mesh header length. + * + * Since this function intentionally parses an A-MSDU frame as an MSDU, + * it only assumes that the A-MSDU subframe header is present, and + * beyond this it performs its own bounds checks under the assumption + * that the frame is instead parsed as a non-aggregated MSDU. + */ +static bool +is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb, + enum nl80211_iftype iftype) +{ + int offset; + + /* Non-mesh case can be directly compared */ + if (iftype != NL80211_IFTYPE_MESH_POINT) + return ether_addr_equal(eth->h_dest, rfc1042_header); + + offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]); + if (offset == 6) { + /* Mesh case with empty address extension field */ + return ether_addr_equal(eth->h_source, rfc1042_header); + } else if (offset + ETH_ALEN <= skb->len) { + /* Mesh case with non-empty address extension field */ + u8 temp[ETH_ALEN]; + + skb_copy_bits(skb, offset, temp, ETH_ALEN); + return ether_addr_equal(temp, rfc1042_header); + } + + return false; +} + void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, const u8 *addr, enum nl80211_iftype iftype, const unsigned int extra_headroom, @@ -861,8 +907,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, /* the last MSDU has no padding */ if (subframe_len > remaining) goto purge; - /* mitigate A-MSDU aggregation injection attacks */ - if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header)) + /* mitigate A-MSDU aggregation injection attacks, to be + * checked when processing first subframe (offset == 0). + */ + if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype)) goto purge; offset += sizeof(struct ethhdr); From 85e323bdbe28d4638aaefd8d9192763874efe9b0 Mon Sep 17 00:00:00 2001 From: Baojun Xu Date: Mon, 7 Jul 2025 17:05:13 +0800 Subject: [PATCH 092/234] ALSA: hda/tas2781: Fix calibration data parser issue We will copy calibration data from position behind to front. We have created a variable (tmp_val) point on top of calibration data buffer, and tmp_val[1] is max of node number in original calibration data structure, it will be overwritten after first data copy, so can't be used as max node number check in for loop. So we create a new variable to save max of node number (tmp_val[1]), used to check if max node number was reached in for loop. And a point need to be increased to point at calibration data in node. Data saved position also need to be increased one byte. Fixes: 4fe238513407 ("ALSA: hda/tas2781: Move and unified the calibrated-data getting function for SPI and I2C into the tas2781_hda lib") Signed-off-by: Baojun Xu Link: https://patch.msgid.link/20250707090513.1462-1-baojun.xu@ti.com Signed-off-by: Takashi Iwai --- sound/pci/hda/tas2781_hda.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/sound/pci/hda/tas2781_hda.c b/sound/pci/hda/tas2781_hda.c index 5f1d4b3e9688..34217ce9f28e 100644 --- a/sound/pci/hda/tas2781_hda.c +++ b/sound/pci/hda/tas2781_hda.c @@ -44,7 +44,7 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) TASDEVICE_REG(0, 0x13, 0x70), TASDEVICE_REG(0, 0x18, 0x7c), }; - unsigned int crc, oft; + unsigned int crc, oft, node_num; unsigned char *buf; int i, j, k, l; @@ -80,8 +80,9 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) dev_err(p->dev, "%s: CRC error\n", __func__); return; } + node_num = tmp_val[1]; - for (j = 0, k = 0; j < tmp_val[1]; j++) { + for (j = 0, k = 0; j < node_num; j++) { oft = j * 6 + 3; if (tmp_val[oft] == TASDEV_UEFI_CALI_REG_ADDR_FLG) { for (i = 0; i < TASDEV_CALIB_N; i++) { @@ -99,8 +100,9 @@ static void tas2781_apply_calib(struct tasdevice_priv *p) } data[l] = k; + oft++; for (i = 0; i < TASDEV_CALIB_N * 4; i++) - data[l + i] = data[4 * oft + i]; + data[l + i + 1] = data[4 * oft + i]; k++; } } From d78f76457d70d30e80b5d2e067d45de7a0505fc0 Mon Sep 17 00:00:00 2001 From: Edson Juliano Drosdeck Date: Mon, 7 Jul 2025 08:45:37 -0300 Subject: [PATCH 093/234] ALSA: hda/realtek: Enable headset Mic on Positivo K116J Positivo K116J is equipped with ALC269VC, and needs a fix to make the headset mic to work. Also must to limits the internal microphone boost. Signed-off-by: Edson Juliano Drosdeck Link: https://patch.msgid.link/20250707114537.8291-1-edson.drosdeck@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c7720a2f47c3..060db37eab83 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -11426,6 +11426,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), SND_PCI_QUIRK(0x2782, 0x1407, "Positivo P15X", ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC), + SND_PCI_QUIRK(0x2782, 0x1409, "Positivo K116J", ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC), SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX), SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX), SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), From 203817de269539c062724d97dfa5af3cdf77a3ec Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 7 Jul 2025 09:52:33 +0100 Subject: [PATCH 094/234] io_uring/zcrx: fix pp destruction warnings With multiple page pools and in some other cases we can have allocated niovs on page pool destruction. Remove a misplaced warning checking that all niovs are returned to zcrx on io_pp_zc_destroy(). It was reported before but apparently got lost. Reported-by: Pedro Tammela Fixes: 34a3e60821ab9 ("io_uring/zcrx: implement zerocopy receive pp memory provider") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b9e6d919d2964bc48ddbf8eb52fc9f5d118e9bc1.1751878185.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/zcrx.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index ade4da9c4e31..67c518d22e0c 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -862,10 +862,7 @@ static int io_pp_zc_init(struct page_pool *pp) static void io_pp_zc_destroy(struct page_pool *pp) { struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp); - struct io_zcrx_area *area = ifq->area; - if (WARN_ON_ONCE(area->free_count != area->nia.num_niovs)) - return; percpu_ref_put(&ifq->ctx->refs); } From d133036a0b23d3ef781d067ccdea6bbfb381e0cf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 17 Jun 2025 14:00:36 +1000 Subject: [PATCH 095/234] drm/nouveau/gsp: fix potential leak of memory used during acpi init If any of the ACPI calls fail, memory allocated for the input buffer would be leaked. Fix failure paths to free allocated memory. Also add checks to ensure the allocations succeeded in the first place. Reported-by: Danilo Krummrich Fixes: 176fdcbddfd2 ("drm/nouveau/gsp/r535: add support for booting GSP-RM") Signed-off-by: Ben Skeggs Signed-off-by: Danilo Krummrich Link: https://lore.kernel.org/r/20250617040036.2932-1-bskeggs@nvidia.com --- .../drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 23f80e167705..588cb4ab85cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -719,7 +719,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -727,17 +726,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -754,24 +758,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); From a0c5eac9181025b6d65ff25c203a7f10274f80c1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 5 Jun 2025 13:14:16 +0200 Subject: [PATCH 096/234] wifi: mt76: Assume __mt76_connac_mcu_alloc_sta_req runs in atomic context Rely on GFP_ATOMIC flag in __mt76_connac_mcu_alloc_sta_req since it can run in atomic context. This is a preliminary patch to fix a 'sleep while atomic' issue in mt7996_mac_sta_rc_work(). Fixes: 0762bdd30279 ("wifi: mt76: mt7996: rework mt7996_mac_sta_rc_work to support MLO") Signed-off-by: Lorenzo Bianconi Link: https://patch.msgid.link/20250605-mt7996-sleep-while-atomic-v1-1-d46d15f9203c@kernel.org Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index cb13d0a76878..fa08e952dffa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -287,7 +287,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif_link *mvif mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, &hdr.wlan_idx_hi); - skb = mt76_mcu_msg_alloc(dev, NULL, len); + skb = __mt76_mcu_msg_alloc(dev, NULL, len, len, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); From c772cd726eea6fe8fb81d2aeeacb18cecff73a7b Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 5 Jun 2025 13:14:17 +0200 Subject: [PATCH 097/234] wifi: mt76: Move RCU section in mt7996_mcu_set_fixed_field() Since mt76_mcu_skb_send_msg() routine can't be executed in atomic context, move RCU section in mt7996_mcu_set_fixed_field() and execute mt76_mcu_skb_send_msg() in non-atomic context. This is a preliminary patch to fix a 'sleep while atomic' issue in mt7996_mac_sta_rc_work(). Fixes: 0762bdd30279 ("wifi: mt76: mt7996: rework mt7996_mac_sta_rc_work to support MLO") Signed-off-by: Lorenzo Bianconi Link: https://patch.msgid.link/20250605-mt7996-sleep-while-atomic-v1-2-d46d15f9203c@kernel.org Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7996/mac.c | 5 +- .../net/wireless/mediatek/mt76/mt7996/main.c | 3 +- .../net/wireless/mediatek/mt76/mt7996/mcu.c | 68 +++++++++++++------ .../wireless/mediatek/mt76/mt7996/mt7996.h | 10 ++- 4 files changed, 57 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 0dbd4662bc84..7444bd374b50 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -2405,11 +2405,10 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) IEEE80211_RC_BW_CHANGED)) mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, link_sta, link, msta_link, - true); + link_id, true); if (changed & IEEE80211_RC_SMPS_CHANGED) - mt7996_mcu_set_fixed_field(dev, link_sta, link, - msta_link, NULL, + mt7996_mcu_set_fixed_field(dev, msta, NULL, link_id, RATE_PARAM_MMPS_UPDATE); spin_lock_bh(&dev->mt76.sta_poll_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 78ae9f5cb176..a096b5bab001 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -1114,7 +1114,8 @@ mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif, err = mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, link_sta, link, - msta_link, false); + msta_link, link_id, + false); if (err) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index f0adc0b4b8b6..33c61e795b73 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -1905,22 +1905,35 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, MCU_WM_UNI_CMD(RA), true); } -int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, - struct ieee80211_link_sta *link_sta, - struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, - void *data, u32 field) +int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct mt7996_sta *msta, + void *data, u8 link_id, u32 field) { - struct sta_phy_uni *phy = data; + struct mt7996_vif *mvif = msta->vif; + struct mt7996_sta_link *msta_link; struct sta_rec_ra_fixed_uni *ra; + struct sta_phy_uni *phy = data; + struct mt76_vif_link *mlink; struct sk_buff *skb; + int err = -ENODEV; struct tlv *tlv; - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + rcu_read_lock(); + + mlink = rcu_dereference(mvif->mt76.link[link_id]); + if (!mlink) + goto error_unlock; + + msta_link = rcu_dereference(msta->link[link_id]); + if (!msta_link) + goto error_unlock; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, &msta_link->wcid, MT7996_STA_UPDATE_MAX_SIZE); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto error_unlock; + } tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); ra = (struct sta_rec_ra_fixed_uni *)tlv; @@ -1935,27 +1948,45 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, if (phy) ra->phy = *phy; break; - case RATE_PARAM_MMPS_UPDATE: + case RATE_PARAM_MMPS_UPDATE: { + struct ieee80211_sta *sta = wcid_to_sta(&msta_link->wcid); + struct ieee80211_link_sta *link_sta; + + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) { + dev_kfree_skb(skb); + goto error_unlock; + } + ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode); break; + } default: break; } ra->field = cpu_to_le32(field); + rcu_read_unlock(); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +error_unlock: + rcu_read_unlock(); + + return err; } static int mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_link_sta *link_sta, struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link) + struct mt7996_sta_link *msta_link, + u8 link_id) { struct cfg80211_chan_def *chandef = &link->phy->mt76->chandef; struct cfg80211_bitrate_mask *mask = &link->bitrate_mask; enum nl80211_band band = chandef->chan->band; + struct mt7996_sta *msta = msta_link->sta; struct sta_phy_uni phy = {}; int ret, nrates = 0; @@ -1996,8 +2027,7 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, /* fixed single rate */ if (nrates == 1) { - ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, - msta_link, &phy, + ret = mt7996_mcu_set_fixed_field(dev, msta, &phy, link_id, RATE_PARAM_FIXED_MCS); if (ret) return ret; @@ -2018,8 +2048,7 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, else mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); - ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, - msta_link, &phy, + ret = mt7996_mcu_set_fixed_field(dev, msta, &phy, link_id, RATE_PARAM_FIXED_GI); if (ret) return ret; @@ -2027,8 +2056,7 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, /* fixed HE_LTF */ if (mask->control[band].he_ltf != GENMASK(7, 0)) { - ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, - msta_link, &phy, + ret = mt7996_mcu_set_fixed_field(dev, msta, &phy, link_id, RATE_PARAM_FIXED_HE_LTF); if (ret) return ret; @@ -2150,7 +2178,8 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_bss_conf *link_conf, struct ieee80211_link_sta *link_sta, struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, bool changed) + struct mt7996_sta_link *msta_link, + u8 link_id, bool changed) { struct sk_buff *skb; int ret; @@ -2178,7 +2207,8 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, if (ret) return ret; - return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link); + return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link, + link_id); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 1ad6bc046f7c..a529c2bae7cb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -625,18 +625,16 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_bss_conf *link_conf, struct ieee80211_link_sta *link_sta, struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, bool changed); + struct mt7996_sta_link *msta_link, + u8 link_id, bool changed); int mt7996_set_channel(struct mt76_phy *mphy); int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag); int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, void *data, u16 version); -int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, - struct ieee80211_link_sta *link_sta, - struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, - void *data, u32 field); +int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct mt7996_sta *msta, + void *data, u8 link_id, u32 field); int mt7996_mcu_set_eeprom(struct mt7996_dev *dev); int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len); int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num); From 28d519d0d493a8cf3f8ca01f10d962c56cec1825 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 5 Jun 2025 13:14:18 +0200 Subject: [PATCH 098/234] wifi: mt76: Move RCU section in mt7996_mcu_add_rate_ctrl_fixed() Since mt7996_mcu_set_fixed_field() can't be executed in a RCU critical section, move RCU section in mt7996_mcu_add_rate_ctrl_fixed() and run mt7996_mcu_set_fixed_field() in non-atomic context. This is a preliminary patch to fix a 'sleep while atomic' issue in mt7996_mac_sta_rc_work(). Fixes: 0762bdd30279 ("wifi: mt76: mt7996: rework mt7996_mac_sta_rc_work to support MLO") Signed-off-by: Lorenzo Bianconi Link: https://patch.msgid.link/20250605-mt7996-sleep-while-atomic-v1-3-d46d15f9203c@kernel.org Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7996/mcu.c | 86 ++++++++++++------- 1 file changed, 57 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 33c61e795b73..742497ba2a6b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -1977,51 +1977,74 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct mt7996_sta *msta, } static int -mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, - struct ieee80211_link_sta *link_sta, - struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, - u8 link_id) +mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct mt7996_sta *msta, + struct ieee80211_vif *vif, u8 link_id) { - struct cfg80211_chan_def *chandef = &link->phy->mt76->chandef; - struct cfg80211_bitrate_mask *mask = &link->bitrate_mask; - enum nl80211_band band = chandef->chan->band; - struct mt7996_sta *msta = msta_link->sta; + struct ieee80211_link_sta *link_sta; + struct cfg80211_bitrate_mask mask; + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; struct sta_phy_uni phy = {}; - int ret, nrates = 0; + struct ieee80211_sta *sta; + int ret, nrates = 0, idx; + enum nl80211_band band; + bool has_he; #define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ do { \ - u8 i, gi = mask->control[band]._gi; \ + u8 i, gi = mask.control[band]._gi; \ gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ phy.sgi = gi; \ - phy.he_ltf = mask->control[band].he_ltf; \ - for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \ - if (!mask->control[band]._mcs[i]) \ + phy.he_ltf = mask.control[band].he_ltf; \ + for (i = 0; i < ARRAY_SIZE(mask.control[band]._mcs); i++) { \ + if (!mask.control[band]._mcs[i]) \ continue; \ - nrates += hweight16(mask->control[band]._mcs[i]); \ - phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \ + nrates += hweight16(mask.control[band]._mcs[i]); \ + phy.mcs = ffs(mask.control[band]._mcs[i]) - 1; \ if (_ht) \ phy.mcs += 8 * i; \ } \ } while (0) - if (link_sta->he_cap.has_he) { + rcu_read_lock(); + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + goto error_unlock; + + msta_link = rcu_dereference(msta->link[link_id]); + if (!msta_link) + goto error_unlock; + + sta = wcid_to_sta(&msta_link->wcid); + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + goto error_unlock; + + band = link->phy->mt76->chandef.chan->band; + has_he = link_sta->he_cap.has_he; + mask = link->bitrate_mask; + idx = msta_link->wcid.idx; + + if (has_he) { __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); } else if (link_sta->vht_cap.vht_supported) { __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); } else if (link_sta->ht_cap.ht_supported) { __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); } else { - nrates = hweight32(mask->control[band].legacy); - phy.mcs = ffs(mask->control[band].legacy) - 1; + nrates = hweight32(mask.control[band].legacy); + phy.mcs = ffs(mask.control[band].legacy) - 1; } + + rcu_read_unlock(); + #undef __sta_phy_bitrate_mask_check /* fall back to auto rate control */ - if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && - mask->control[band].he_gi == GENMASK(7, 0) && - mask->control[band].he_ltf == GENMASK(7, 0) && + if (mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI && + mask.control[band].he_gi == GENMASK(7, 0) && + mask.control[band].he_ltf == GENMASK(7, 0) && nrates != 1) return 0; @@ -2034,16 +2057,16 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, } /* fixed GI */ - if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || - mask->control[band].he_gi != GENMASK(7, 0)) { + if (mask.control[band].gi != NL80211_TXRATE_DEFAULT_GI || + mask.control[band].he_gi != GENMASK(7, 0)) { u32 addr; /* firmware updates only TXCMD but doesn't take WTBL into * account, so driver should update here to reflect the * actual txrate hardware sends out. */ - addr = mt7996_mac_wtbl_lmac_addr(dev, msta_link->wcid.idx, 7); - if (link_sta->he_cap.has_he) + addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 7); + if (has_he) mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); else mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); @@ -2055,7 +2078,7 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, } /* fixed HE_LTF */ - if (mask->control[band].he_ltf != GENMASK(7, 0)) { + if (mask.control[band].he_ltf != GENMASK(7, 0)) { ret = mt7996_mcu_set_fixed_field(dev, msta, &phy, link_id, RATE_PARAM_FIXED_HE_LTF); if (ret) @@ -2063,6 +2086,11 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, } return 0; + +error_unlock: + rcu_read_unlock(); + + return -ENODEV; } static void @@ -2181,6 +2209,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct mt7996_sta_link *msta_link, u8 link_id, bool changed) { + struct mt7996_sta *msta = msta_link->sta; struct sk_buff *skb; int ret; @@ -2207,8 +2236,7 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, if (ret) return ret; - return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link, - link_id); + return mt7996_mcu_add_rate_ctrl_fixed(dev, msta, vif, link_id); } static int From 3dd6f67c669c860b93ff533f790f23ee1cb36f25 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 5 Jun 2025 13:14:19 +0200 Subject: [PATCH 099/234] wifi: mt76: Move RCU section in mt7996_mcu_add_rate_ctrl() Since mt76_mcu_skb_send_msg() routine can't be executed in atomic context, move RCU section in mt7996_mcu_add_rate_ctrl() and execute mt76_mcu_skb_send_msg() in non-atomic context. This is a preliminary patch to fix a 'sleep while atomic' issue in mt7996_mac_sta_rc_work(). Fixes: 0762bdd30279 ("wifi: mt76: mt7996: rework mt7996_mac_sta_rc_work to support MLO") Signed-off-by: Lorenzo Bianconi Link: https://patch.msgid.link/20250605-mt7996-sleep-while-atomic-v1-4-d46d15f9203c@kernel.org Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7996/mac.c | 6 +-- .../net/wireless/mediatek/mt76/mt7996/main.c | 6 +-- .../net/wireless/mediatek/mt76/mt7996/mcu.c | 50 +++++++++++++++---- .../wireless/mediatek/mt76/mt7996/mt7996.h | 10 ++-- 4 files changed, 45 insertions(+), 27 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 7444bd374b50..329dc9edc80d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -2356,7 +2356,6 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) struct ieee80211_bss_conf *link_conf; struct ieee80211_link_sta *link_sta; struct mt7996_sta_link *msta_link; - struct mt7996_vif_link *link; struct mt76_vif_link *mlink; struct ieee80211_sta *sta; struct ieee80211_vif *vif; @@ -2398,13 +2397,10 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) spin_unlock_bh(&dev->mt76.sta_poll_lock); - link = (struct mt7996_vif_link *)mlink; - if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED | IEEE80211_RC_BW_CHANGED)) - mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, - link_sta, link, msta_link, + mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, link_id, true); if (changed & IEEE80211_RC_SMPS_CHANGED) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index a096b5bab001..07dd75ce94a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -1112,10 +1112,8 @@ mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif, if (err) return err; - err = mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, - link_sta, link, - msta_link, link_id, - false); + err = mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, + link_id, false); if (err) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 742497ba2a6b..77ab1f4854a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -2201,23 +2201,44 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi)); } -int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - struct ieee80211_link_sta *link_sta, - struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, - u8 link_id, bool changed) +int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct mt7996_sta *msta, + struct ieee80211_vif *vif, u8 link_id, + bool changed) { - struct mt7996_sta *msta = msta_link->sta; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + struct ieee80211_sta *sta; struct sk_buff *skb; - int ret; + int ret = -ENODEV; + + rcu_read_lock(); + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + goto error_unlock; + + msta_link = rcu_dereference(msta->link[link_id]); + if (!msta_link) + goto error_unlock; + + sta = wcid_to_sta(&msta_link->wcid); + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + goto error_unlock; + + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (!link_conf) + goto error_unlock; skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, &msta_link->wcid, MT7996_STA_UPDATE_MAX_SIZE); - if (IS_ERR(skb)) - return PTR_ERR(skb); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + goto error_unlock; + } /* firmware rc algorithm refers to sta_rec_he for HE control. * once dev->rc_work changes the settings driver should also @@ -2231,12 +2252,19 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, */ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, link_conf, link_sta, link); + rcu_read_unlock(); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); if (ret) return ret; return mt7996_mcu_add_rate_ctrl_fixed(dev, msta, vif, link_id); + +error_unlock: + rcu_read_unlock(); + + return ret; } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index a529c2bae7cb..33ac16b64ef1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -620,13 +620,9 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct mt7996_vif_link *link, struct ieee80211_he_obss_pd *he_obss_pd); -int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf, - struct ieee80211_link_sta *link_sta, - struct mt7996_vif_link *link, - struct mt7996_sta_link *msta_link, - u8 link_id, bool changed); +int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct mt7996_sta *msta, + struct ieee80211_vif *vif, u8 link_id, + bool changed); int mt7996_set_channel(struct mt76_phy *mphy); int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag); int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif, From 71532576f41e5b0ec967a82ed49d5dfb1027ccdb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 5 Jun 2025 13:14:20 +0200 Subject: [PATCH 100/234] wifi: mt76: Remove RCU section in mt7996_mac_sta_rc_work() Since mt7996_mcu_add_rate_ctrl() and mt7996_mcu_set_fixed_field() can't run in atomic context, move RCU critical section in mt7996_mcu_add_rate_ctrl() and mt7996_mcu_set_fixed_field(). This patch fixes a 'sleep while atomic' issue in mt7996_mac_sta_rc_work(). Fixes: 0762bdd30279 ("wifi: mt76: mt7996: rework mt7996_mac_sta_rc_work to support MLO") Signed-off-by: Lorenzo Bianconi Tested-by: Ben Greear Link: https://patch.msgid.link/20250605-mt7996-sleep-while-atomic-v1-5-d46d15f9203c@kernel.org Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7996/mac.c | 35 ++++--------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 329dc9edc80d..445fe149ac0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -2353,19 +2353,12 @@ void mt7996_mac_update_stats(struct mt7996_phy *phy) void mt7996_mac_sta_rc_work(struct work_struct *work) { struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); - struct ieee80211_bss_conf *link_conf; - struct ieee80211_link_sta *link_sta; struct mt7996_sta_link *msta_link; - struct mt76_vif_link *mlink; - struct ieee80211_sta *sta; struct ieee80211_vif *vif; - struct mt7996_sta *msta; struct mt7996_vif *mvif; LIST_HEAD(list); u32 changed; - u8 link_id; - rcu_read_lock(); spin_lock_bh(&dev->mt76.sta_poll_lock); list_splice_init(&dev->sta_rc_list, &list); @@ -2376,24 +2369,9 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) changed = msta_link->changed; msta_link->changed = 0; - - sta = wcid_to_sta(&msta_link->wcid); - link_id = msta_link->wcid.link_id; - msta = msta_link->sta; - mvif = msta->vif; - vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); - - mlink = rcu_dereference(mvif->mt76.link[link_id]); - if (!mlink) - continue; - - link_sta = rcu_dereference(sta->link[link_id]); - if (!link_sta) - continue; - - link_conf = rcu_dereference(vif->link_conf[link_id]); - if (!link_conf) - continue; + mvif = msta_link->sta->vif; + vif = container_of((void *)mvif, struct ieee80211_vif, + drv_priv); spin_unlock_bh(&dev->mt76.sta_poll_lock); @@ -2401,17 +2379,18 @@ void mt7996_mac_sta_rc_work(struct work_struct *work) IEEE80211_RC_NSS_CHANGED | IEEE80211_RC_BW_CHANGED)) mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, - link_id, true); + msta_link->wcid.link_id, + true); if (changed & IEEE80211_RC_SMPS_CHANGED) - mt7996_mcu_set_fixed_field(dev, msta, NULL, link_id, + mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL, + msta_link->wcid.link_id, RATE_PARAM_MMPS_UPDATE); spin_lock_bh(&dev->mt76.sta_poll_lock); } spin_unlock_bh(&dev->mt76.sta_poll_lock); - rcu_read_unlock(); } void mt7996_mac_work(struct work_struct *work) From d20de55332e92f9e614c34783c00bb6ce2fec067 Mon Sep 17 00:00:00 2001 From: Ming Yen Hsieh Date: Thu, 12 Jun 2025 14:09:31 +0800 Subject: [PATCH 101/234] wifi: mt76: mt7925: fix the wrong config for tx interrupt MT_INT_TX_DONE_MCU_WM may cause tx interrupt to be mishandled during a reset failure, leading to the reset process failing. By using MT_INT_TX_DONE_MCU instead of MT_INT_TX_DONE_MCU_WM, the handling of tx interrupt is improved. Cc: stable@vger.kernel.org Fixes: c948b5da6bbe ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips") Signed-off-by: Ming Yen Hsieh Link: https://patch.msgid.link/20250612060931.135635-1-mingyen.hsieh@mediatek.com Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7925/regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h index 547489092c29..341987e47f67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -58,7 +58,7 @@ #define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ MT_INT_TX_DONE_FWDL) -#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \ +#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU | \ MT_INT_TX_DONE_BAND0 | \ GENMASK(18, 4)) From c701574c54121af2720648572efbfe77564652d1 Mon Sep 17 00:00:00 2001 From: Michael Lo Date: Thu, 12 Jun 2025 14:20:46 +0800 Subject: [PATCH 102/234] wifi: mt76: mt7925: fix invalid array index in ssid assignment during hw scan Update the destination index to use 'n_ssids', which is incremented only when a valid SSID is present. Previously, both mt76_connac_mcu_hw_scan() and mt7925_mcu_hw_scan() used the loop index 'i' for the destination array, potentially leaving gaps if any source SSIDs had zero length. Cc: stable@vger.kernel.org Fixes: c948b5da6bbe ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips") Signed-off-by: Michael Lo Signed-off-by: Ming Yen Hsieh Link: https://patch.msgid.link/20250612062046.160598-1-mingyen.hsieh@mediatek.com Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index fa08e952dffa..16db0f2082d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -1740,8 +1740,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (!sreq->ssids[i].ssid_len) continue; - req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, + req->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(req->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index b8542be0d945..0ba33409924a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -2869,8 +2869,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (i > MT7925_RNR_SCAN_MAX_BSSIDS) break; - ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, + ssid->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(ssid->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } From 9f8f4a51f3c133030ac5a4bcd4759ae2b947b7bd Mon Sep 17 00:00:00 2001 From: Ming Yen Hsieh Date: Mon, 16 Jun 2025 14:36:49 +0800 Subject: [PATCH 103/234] wifi: mt76: mt7925: fix incorrect scan probe IE handling for hw_scan The IEs should be processed and filled into the command tlv separately according to each band. Cc: stable@vger.kernel.org Fixes: c948b5da6bbe ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips") Signed-off-by: Ming Yen Hsieh Link: https://patch.msgid.link/20250616063649.1100503-1-mingyen.hsieh@mediatek.com Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7925/main.c | 2 +- .../net/wireless/mediatek/mt76/mt7925/mcu.c | 75 +++++++++++++++---- .../net/wireless/mediatek/mt76/mt7925/mcu.h | 5 +- 3 files changed, 63 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 94b0099dcd41..f0f1b1e4d507 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1481,7 +1481,7 @@ mt7925_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt7925_mcu_sched_scan_req(mphy, vif, req); + err = mt7925_mcu_sched_scan_req(mphy, vif, req, ies); if (err < 0) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 0ba33409924a..8ac6fbb736ab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -164,6 +164,7 @@ mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, bool suspend, struct cfg80211_wowlan *wowlan) { struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv; + struct ieee80211_scan_ies ies = {}; struct mt76_dev *dev = phy->dev; struct { struct { @@ -194,7 +195,7 @@ mt7925_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, req.wow_ctrl_tlv.trigger |= (UNI_WOW_DETECT_TYPE_DISCONNECT | UNI_WOW_DETECT_TYPE_BCN_LOST); if (wowlan->nd_config) { - mt7925_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + mt7925_mcu_sched_scan_req(phy, vif, wowlan->nd_config, &ies); req.wow_ctrl_tlv.trigger |= UNI_WOW_DETECT_TYPE_SCH_SCAN_HIT; mt7925_mcu_sched_scan_enable(phy, vif, suspend); } @@ -2818,6 +2819,54 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable) return err; } +static void +mt7925_mcu_build_scan_ie_tlv(struct mt76_dev *mdev, + struct sk_buff *skb, + struct ieee80211_scan_ies *scan_ies) +{ + u32 max_len = sizeof(struct scan_ie_tlv) + MT76_CONNAC_SCAN_IE_LEN; + struct scan_ie_tlv *ie; + enum nl80211_band i; + struct tlv *tlv; + const u8 *ies; + u16 ies_len; + + for (i = 0; i <= NL80211_BAND_6GHZ; i++) { + if (i == NL80211_BAND_60GHZ) + continue; + + ies = scan_ies->ies[i]; + ies_len = scan_ies->len[i]; + + if (!ies || !ies_len) + continue; + + if (ies_len > max_len) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, + sizeof(*ie) + ies_len); + ie = (struct scan_ie_tlv *)tlv; + + memcpy(ie->ies, ies, ies_len); + ie->ies_len = cpu_to_le16(ies_len); + + switch (i) { + case NL80211_BAND_2GHZ: + ie->band = 1; + break; + case NL80211_BAND_6GHZ: + ie->band = 3; + break; + default: + ie->band = 2; + break; + } + + max_len -= (sizeof(*ie) + ies_len); + } +} + int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req) { @@ -2843,7 +2892,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + sizeof(*bssid) * MT7925_RNR_SCAN_MAX_BSSIDS + - sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie); + sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie) + + MT76_CONNAC_SCAN_IE_LEN; skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); if (!skb) @@ -2925,13 +2975,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, } chan_info->channel_type = sreq->n_channels ? 4 : 0; - tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); - ie = (struct scan_ie_tlv *)tlv; - if (sreq->ie_len > 0) { - memcpy(ie->ies, sreq->ie, sreq->ie_len); - ie->ies_len = cpu_to_le16(sreq->ie_len); - } - req->scan_func |= SCAN_FUNC_SPLIT_SCAN; tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_MISC, sizeof(*misc)); @@ -2942,6 +2985,9 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->scan_func |= SCAN_FUNC_RANDOM_MAC; } + /* Append scan probe IEs as the last tlv */ + mt7925_mcu_build_scan_ie_tlv(mdev, skb, &scan_req->ies); + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), true); if (err < 0) @@ -2953,7 +2999,8 @@ EXPORT_SYMBOL_GPL(mt7925_mcu_hw_scan); int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *sreq) + struct cfg80211_sched_scan_request *sreq, + struct ieee80211_scan_ies *ies) { struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv; struct ieee80211_channel **scan_list = sreq->channels; @@ -3041,12 +3088,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, } chan_info->channel_type = sreq->n_channels ? 4 : 0; - tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_IE, sizeof(*ie)); - ie = (struct scan_ie_tlv *)tlv; - if (sreq->ie_len > 0) { - memcpy(ie->ies, sreq->ie, sreq->ie_len); - ie->ies_len = cpu_to_le16(sreq->ie_len); - } + /* Append scan probe IEs as the last tlv */ + mt7925_mcu_build_scan_ie_tlv(mdev, skb, ies); return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), true); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index ee6fb16e83c5..a40764d89a1f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -269,7 +269,7 @@ struct scan_ie_tlv { __le16 ies_len; u8 band; u8 pad; - u8 ies[MT76_CONNAC_SCAN_IE_LEN]; + u8 ies[]; }; struct scan_misc_tlv { @@ -673,7 +673,8 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif); int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *sreq); + struct cfg80211_sched_scan_request *sreq, + struct ieee80211_scan_ies *ies); int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, struct ieee80211_vif *vif, bool enable); From 35ad47c0b3da04b00b19a8b9ed5632e2f2520472 Mon Sep 17 00:00:00 2001 From: Deren Wu Date: Sun, 25 May 2025 14:11:21 +0800 Subject: [PATCH 104/234] wifi: mt76: mt7925: prevent NULL pointer dereference in mt7925_sta_set_decap_offload() Add a NULL check for msta->vif before accessing its members to prevent a kernel panic in AP mode deployment. This also fix the issue reported in [1]. The crash occurs when this function is triggered before the station is fully initialized. The call trace shows a page fault at mt7925_sta_set_decap_offload() due to accessing resources when msta->vif is NULL. Fix this by adding an early return if msta->vif is NULL and also check wcid.sta is ready. This ensures we only proceed with decap offload configuration when the station's state is properly initialized. [14739.655703] Unable to handle kernel paging request at virtual address ffffffffffffffa0 [14739.811820] CPU: 0 UID: 0 PID: 895854 Comm: hostapd Tainted: G [14739.821394] Tainted: [C]=CRAP, [O]=OOT_MODULE [14739.825746] Hardware name: Raspberry Pi 4 Model B Rev 1.1 (DT) [14739.831577] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [14739.838538] pc : mt7925_sta_set_decap_offload+0xc0/0x1b8 [mt7925_common] [14739.845271] lr : mt7925_sta_set_decap_offload+0x58/0x1b8 [mt7925_common] [14739.851985] sp : ffffffc085efb500 [14739.855295] x29: ffffffc085efb500 x28: 0000000000000000 x27: ffffff807803a158 [14739.862436] x26: ffffff8041ececb8 x25: 0000000000000001 x24: 0000000000000001 [14739.869577] x23: 0000000000000001 x22: 0000000000000008 x21: ffffff8041ecea88 [14739.876715] x20: ffffff8041c19ca0 x19: ffffff8078031fe0 x18: 0000000000000000 [14739.883853] x17: 0000000000000000 x16: ffffffe2aeac1110 x15: 000000559da48080 [14739.890991] x14: 0000000000000001 x13: 0000000000000000 x12: 0000000000000000 [14739.898130] x11: 0a10020001008e88 x10: 0000000000001a50 x9 : ffffffe26457bfa0 [14739.905269] x8 : ffffff8042013bb0 x7 : ffffff807fb6cbf8 x6 : dead000000000100 [14739.912407] x5 : dead000000000122 x4 : ffffff80780326c8 x3 : 0000000000000000 [14739.919546] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffffff8041ececb8 [14739.926686] Call trace: [14739.929130] mt7925_sta_set_decap_offload+0xc0/0x1b8 [mt7925_common] [14739.935505] ieee80211_check_fast_rx+0x19c/0x510 [mac80211] [14739.941344] _sta_info_move_state+0xe4/0x510 [mac80211] [14739.946860] sta_info_move_state+0x1c/0x30 [mac80211] [14739.952116] sta_apply_auth_flags.constprop.0+0x90/0x1b0 [mac80211] [14739.958708] sta_apply_parameters+0x234/0x5e0 [mac80211] [14739.964332] ieee80211_add_station+0xdc/0x190 [mac80211] [14739.969950] nl80211_new_station+0x46c/0x670 [cfg80211] [14739.975516] genl_family_rcv_msg_doit+0xdc/0x150 [14739.980158] genl_rcv_msg+0x218/0x298 [14739.983830] netlink_rcv_skb+0x64/0x138 [14739.987670] genl_rcv+0x40/0x60 [14739.990816] netlink_unicast+0x314/0x380 [14739.994742] netlink_sendmsg+0x198/0x3f0 [14739.998664] __sock_sendmsg+0x64/0xc0 [14740.002324] ____sys_sendmsg+0x260/0x298 [14740.006242] ___sys_sendmsg+0xb4/0x110 Cc: stable@vger.kernel.org Link: https://github.com/morrownr/USB-WiFi/issues/603 [1] Fixes: b859ad65309a ("wifi: mt76: mt7925: add link handling in mt7925_sta_set_decap_offload") Signed-off-by: Deren Wu Link: https://patch.msgid.link/35aedbffa050e98939264300407a52ba4e236d52.1748149855.git.deren.wu@mediatek.com Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7925/main.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index f0f1b1e4d507..5b001548dffc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1603,6 +1603,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, unsigned long valid = mvif->valid_links; u8 i; + if (!msta->vif) + return; + mt792x_mutex_acquire(dev); valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); @@ -1617,6 +1620,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, else clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + if (!mlink->wcid.sta) + continue; + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); } From 7035a082348acf1d43ffb9ff735899f8e3863f8f Mon Sep 17 00:00:00 2001 From: Deren Wu Date: Sun, 25 May 2025 14:11:22 +0800 Subject: [PATCH 105/234] wifi: mt76: mt7921: prevent decap offload config before STA initialization The decap offload configuration should only be applied after the STA has been successfully initialized. Attempting to configure it earlier can lead to corruption of the MAC configuration in the chip's hardware state. Add an early check for `msta->deflink.wcid.sta` to ensure the station peer is properly initialized before proceeding with decapsulation offload configuration. Cc: stable@vger.kernel.org Fixes: 24299fc869f7 ("mt76: mt7921: enable rx header traslation offload") Signed-off-by: Deren Wu Link: https://patch.msgid.link/f23a72ba7a3c1ad38ba9e13bb54ef21d6ef44ffb.1748149855.git.deren.wu@mediatek.com Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 1fffa43379b2..77f73ae1d7ec 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -1180,6 +1180,9 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); + if (!msta->deflink.wcid.sta) + return; + mt792x_mutex_acquire(dev); if (enabled) From dc66a129adf1f25e944d0b93cd2df2ee0f0bd4d6 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 7 Jul 2025 17:47:01 +0200 Subject: [PATCH 106/234] wifi: mt76: add a wrapper for wcid access with validation Several places use rcu_dereference to get a wcid entry without validating if the index exceeds the array boundary. Fix this by using a helper function, which handles validation. Link: https://patch.msgid.link/20250707154702.1726-1-nbd@nbd.name Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 10 ++++++++++ drivers/net/wireless/mediatek/mt76/mt7603/dma.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7603/mac.c | 10 ++-------- drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 7 ++----- drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x02.h | 5 +---- drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | 4 +--- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 12 +++--------- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 5 +---- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt792x_mac.c | 5 +---- drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 12 +++--------- drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 11 ++++------- drivers/net/wireless/mediatek/mt76/tx.c | 8 +++----- drivers/net/wireless/mediatek/mt76/util.c | 2 +- 17 files changed, 41 insertions(+), 68 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5f8d81cda6cd..74b75035d361 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1224,6 +1224,16 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, #define mt76_dereference(p, dev) \ rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) +static inline struct mt76_wcid * +__mt76_wcid_ptr(struct mt76_dev *dev, u16 idx) +{ + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + return rcu_dereference(dev->wcid[idx]); +} + +#define mt76_wcid_ptr(dev, idx) __mt76_wcid_ptr(&(dev)->mt76, idx) + struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, const struct ieee80211_ops *ops, const struct mt76_driver_ops *drv_ops); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 863e5770df51..e26cc78fff94 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -44,7 +44,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (idx >= MT7603_WTBL_STA - 1) goto free; - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (!wcid) goto free; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 413973d05b43..6387f9e61060 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -487,10 +487,7 @@ mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) struct mt7603_sta *sta; struct mt76_wcid *wcid; - if (idx >= MT7603_WTBL_SIZE) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (unicast || !wcid) return wcid; @@ -1266,12 +1263,9 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) if (pid == MT_PACKET_ID_NO_ACK) return; - if (wcidx >= MT7603_WTBL_SIZE) - return; - rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 3ca4fae7c4b0..f8d2cc94b742 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -90,10 +90,7 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, struct mt7615_sta *sta; struct mt76_wcid *wcid; - if (idx >= MT7615_WTBL_SIZE) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (unicast || !wcid) return wcid; @@ -1504,7 +1501,7 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index e9ac8a7317a1..0db00efe88b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -1172,7 +1172,7 @@ void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t, wcid_idx = wcid->idx; } else { wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); - wcid = rcu_dereference(dev->wcid[wcid_idx]); + wcid = __mt76_wcid_ptr(dev, wcid_idx); if (wcid && wcid->sta) { sta = container_of((void *)wcid, struct ieee80211_sta, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 4cd63bacd742..9d7ee09b6cc9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -262,10 +262,7 @@ mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) { struct mt76_wcid *wcid; - if (idx >= MT76x02_N_WCIDS) - return NULL; - - wcid = rcu_dereference(dev->wcid[idx]); + wcid = __mt76_wcid_ptr(dev, idx); if (!wcid) return NULL; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index d5db6ffd6d36..83488b2d6efb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -564,9 +564,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, rcu_read_lock(); - if (stat->wcid < MT76x02_N_WCIDS) - wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); - + wcid = mt76_wcid_ptr(dev, stat->wcid); if (wcid && wcid->sta) { void *priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 9400e4af2a04..6639976afcee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -56,10 +56,7 @@ static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, struct mt7915_sta *sta; struct mt76_wcid *wcid; - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (unicast || !wcid) return wcid; @@ -917,7 +914,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) u16 idx; idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); sta = wcid_to_sta(wcid); if (!sta) continue; @@ -1013,12 +1010,9 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) if (pid < MT_PACKET_ID_WED) return; - if (wcidx >= mt7915_wtbl_size(dev)) - return; - rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 427542777abc..c6584d2b7509 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -3986,7 +3986,7 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + wcid = mt76_wcid_ptr(dev, wlan_idx); if (wcid) wcid->stats.tx_packets += le32_to_cpu(res->tx_packets); else diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 9c4d5cea0c42..4a82f8e4c118 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -587,12 +587,9 @@ static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed, dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); - if (idx >= mt7915_wtbl_size(dev)) - return; - rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (wcid) { wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt); wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 5dd57de59f27..f1f76506b0a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -465,7 +465,7 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; @@ -516,7 +516,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) count++; idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); sta = wcid_to_sta(wcid); if (!sta) continue; @@ -816,7 +816,7 @@ void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, u16 idx; idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); - wcid = rcu_dereference(mdev->wcid[idx]); + wcid = __mt76_wcid_ptr(mdev, idx); sta = wcid_to_sta(wcid); if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index c871d2f9688b..75823c9fd3a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -1040,7 +1040,7 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; @@ -1122,7 +1122,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); sta = wcid_to_sta(wcid); if (!sta) continue; @@ -1445,7 +1445,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, u16 idx; idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); - wcid = rcu_dereference(mdev->wcid[idx]); + wcid = __mt76_wcid_ptr(mdev, idx); sta = wcid_to_sta(wcid); if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c index 05978d9c7b91..3f1d9ba49076 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c @@ -142,10 +142,7 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, struct mt792x_sta *sta; struct mt76_wcid *wcid; - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (unicast || !wcid) return wcid; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 445fe149ac0d..92148518f6a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -61,10 +61,7 @@ static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, struct mt76_wcid *wcid; int i; - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (!wcid) return NULL; @@ -1249,7 +1246,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); sta = wcid_to_sta(wcid); if (!sta) goto next; @@ -1471,12 +1468,9 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) if (pid < MT_PACKET_ID_NO_SKB) return; - if (wcidx >= mt7996_wtbl_size(dev)) - return; - rcu_read_lock(); - wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + wcid = mt76_wcid_ptr(dev, wcidx); if (!wcid) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 77ab1f4854a4..994526c65bfc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -555,7 +555,7 @@ mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb) switch (le16_to_cpu(res->tag)) { case UNI_ALL_STA_TXRX_RATE: wlan_idx = le16_to_cpu(res->rate[i].wlan_idx); - wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + wcid = mt76_wcid_ptr(dev, wlan_idx); if (!wcid) break; @@ -565,7 +565,7 @@ mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb) break; case UNI_ALL_STA_TXRX_ADM_STAT: wlan_idx = le16_to_cpu(res->adm_stat[i].wlan_idx); - wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + wcid = mt76_wcid_ptr(dev, wlan_idx); if (!wcid) break; @@ -579,7 +579,7 @@ mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb) break; case UNI_ALL_STA_TXRX_MSDU_COUNT: wlan_idx = le16_to_cpu(res->msdu_cnt[i].wlan_idx); - wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + wcid = mt76_wcid_ptr(dev, wlan_idx); if (!wcid) break; @@ -676,10 +676,7 @@ mt7996_mcu_wed_rro_event(struct mt7996_dev *dev, struct sk_buff *skb) e = (void *)skb->data; idx = le16_to_cpu(e->wlan_id); - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - break; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); + wcid = mt76_wcid_ptr(dev, idx); if (!wcid || !wcid->sta) break; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 513916469ca2..251ee3ce5e4d 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -64,7 +64,7 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); struct mt76_wcid *wcid; - wcid = rcu_dereference(dev->wcid[cb->wcid]); + wcid = __mt76_wcid_ptr(dev, cb->wcid); if (wcid) { status.sta = wcid_to_sta(wcid); if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { @@ -251,9 +251,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * rcu_read_lock(); - if (wcid_idx < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[wcid_idx]); - + wcid = __mt76_wcid_ptr(dev, wcid_idx); mt76_tx_check_non_aql(dev, wcid, skb); #ifdef CONFIG_NL80211_TESTMODE @@ -538,7 +536,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) break; mtxq = (struct mt76_txq *)txq->drv_priv; - wcid = rcu_dereference(dev->wcid[mtxq->wcid]); + wcid = __mt76_wcid_ptr(dev, mtxq->wcid); if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) continue; diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index 95b3dc96e4c4..97249ebb4bc8 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -83,7 +83,7 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx) if (!(mask & 1)) continue; - wcid = rcu_dereference(dev->wcid[j]); + wcid = __mt76_wcid_ptr(dev, j); if (!wcid || wcid->phy_idx != phy_idx) continue; From dedf2ec30fe417d181490896adf89cd6b9885b23 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 7 Jul 2025 17:47:02 +0200 Subject: [PATCH 107/234] wifi: mt76: fix queue assignment for deauth packets When running in AP mode and deauthenticating a client that's in powersave mode, the disassoc/deauth packet can get stuck in a tx queue along with other buffered frames. This can fill up hardware queues with frames that are only released after the WTBL slot is reused for another client. Fix this by moving deauth packets to the ALTX queue. Reported-by: Chad Monroe Link: https://patch.msgid.link/20250707154702.1726-2-nbd@nbd.name Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/tx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 251ee3ce5e4d..e6cf16706667 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -615,7 +615,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid, if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && !ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(skb)) + (!ieee80211_is_bufferable_mmpdu(skb) || + ieee80211_is_deauth(hdr->frame_control))) qid = MT_TXQ_PSD; q = phy->q_tx[qid]; From 03ee8f73801a8f46d83dfc2bf73fb9ffa5a21602 Mon Sep 17 00:00:00 2001 From: Henry Martin Date: Wed, 25 Jun 2025 20:49:01 +0800 Subject: [PATCH 108/234] wifi: mt76: mt7925: Fix null-ptr-deref in mt7925_thermal_init() devm_kasprintf() returns NULL on error. Currently, mt7925_thermal_init() does not check for this case, which results in a NULL pointer dereference. Add NULL check after devm_kasprintf() to prevent this issue. Fixes: 396e41a74a88 ("wifi: mt76: mt7925: support temperature sensor") Signed-off-by: Henry Martin Reviewed-by: AngeloGioacchino Del Regno Link: https://patch.msgid.link/20250625124901.1839832-1-bsdhenryma@tencent.com Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7925/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index 2a83ff59a968..4249bad83c93 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -52,6 +52,8 @@ static int mt7925_thermal_init(struct mt792x_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7925_hwmon_groups); From eb8352ee2d1e70f916fac02094dca2b435076fa4 Mon Sep 17 00:00:00 2001 From: Leon Yen Date: Wed, 25 Jun 2025 15:37:20 +0800 Subject: [PATCH 109/234] wifi: mt76: mt792x: Limit the concurrent STA and SoftAP to operate on the same channel Due to the lack of NoA(Notice of Absence) mechanism in SoftAP mode, it is inappropriate to allow concurrent SoftAP and STA to operate on the different channels. This patch restricts the concurrent SoftAP and STA to be setup on the same channel only. Signed-off-by: Leon Yen Signed-off-by: Ming Yen Hsieh Link: https://patch.msgid.link/20250625073720.1385210-1-mingyen.hsieh@mediatek.com Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt792x_core.c | 32 ++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index a50c1723ca29..05130ec1e5f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -28,7 +28,7 @@ static const struct ieee80211_iface_combination if_comb[] = { }, }; -static const struct ieee80211_iface_limit if_limits_chanctx[] = { +static const struct ieee80211_iface_limit if_limits_chanctx_mcc[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | @@ -36,8 +36,23 @@ static const struct ieee80211_iface_limit if_limits_chanctx[] = { }, { .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) + .types = BIT(NL80211_IFTYPE_P2P_GO) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + } +}; + +static const struct ieee80211_iface_limit if_limits_chanctx_scc[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) }, { .max = 1, @@ -47,11 +62,18 @@ static const struct ieee80211_iface_limit if_limits_chanctx[] = { static const struct ieee80211_iface_combination if_comb_chanctx[] = { { - .limits = if_limits_chanctx, - .n_limits = ARRAY_SIZE(if_limits_chanctx), + .limits = if_limits_chanctx_mcc, + .n_limits = ARRAY_SIZE(if_limits_chanctx_mcc), .max_interfaces = 3, .num_different_channels = 2, .beacon_int_infra_match = false, + }, + { + .limits = if_limits_chanctx_scc, + .n_limits = ARRAY_SIZE(if_limits_chanctx_scc), + .max_interfaces = 3, + .num_different_channels = 1, + .beacon_int_infra_match = false, } }; From aa9552438ebf015fc5f9f890dbfe39f0c53cf37e Mon Sep 17 00:00:00 2001 From: Zheng Qixing Date: Thu, 12 Jun 2025 21:24:05 +0800 Subject: [PATCH 110/234] nbd: fix uaf in nbd_genl_connect() error path There is a use-after-free issue in nbd: block nbd6: Receive control failed (result -104) block nbd6: shutting down sockets ================================================================== BUG: KASAN: slab-use-after-free in recv_work+0x694/0xa80 drivers/block/nbd.c:1022 Write of size 4 at addr ffff8880295de478 by task kworker/u33:0/67 CPU: 2 UID: 0 PID: 67 Comm: kworker/u33:0 Not tainted 6.15.0-rc5-syzkaller-00123-g2c89c1b655c0 #0 PREEMPT(full) Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 Workqueue: nbd6-recv recv_work Call Trace: __dump_stack lib/dump_stack.c:94 [inline] dump_stack_lvl+0x116/0x1f0 lib/dump_stack.c:120 print_address_description mm/kasan/report.c:408 [inline] print_report+0xc3/0x670 mm/kasan/report.c:521 kasan_report+0xe0/0x110 mm/kasan/report.c:634 check_region_inline mm/kasan/generic.c:183 [inline] kasan_check_range+0xef/0x1a0 mm/kasan/generic.c:189 instrument_atomic_read_write include/linux/instrumented.h:96 [inline] atomic_dec include/linux/atomic/atomic-instrumented.h:592 [inline] recv_work+0x694/0xa80 drivers/block/nbd.c:1022 process_one_work+0x9cc/0x1b70 kernel/workqueue.c:3238 process_scheduled_works kernel/workqueue.c:3319 [inline] worker_thread+0x6c8/0xf10 kernel/workqueue.c:3400 kthread+0x3c2/0x780 kernel/kthread.c:464 ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:153 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245 nbd_genl_connect() does not properly stop the device on certain error paths after nbd_start_device() has been called. This causes the error path to put nbd->config while recv_work continue to use the config after putting it, leading to use-after-free in recv_work. This patch moves nbd_start_device() after the backend file creation. Reported-by: syzbot+48240bab47e705c53126@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/68227a04.050a0220.f2294.00b5.GAE@google.com/T/ Fixes: 6497ef8df568 ("nbd: provide a way for userspace processes to identify device backends") Signed-off-by: Zheng Qixing Reviewed-by: Yu Kuai Link: https://lore.kernel.org/r/20250612132405.364904-1-zhengqixing@huaweicloud.com Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 7bdc7eb808ea..2592bd19ebc1 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2198,9 +2198,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) goto out; } } - ret = nbd_start_device(nbd); - if (ret) - goto out; + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], GFP_KERNEL); @@ -2216,6 +2214,8 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) goto out; } set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); + + ret = nbd_start_device(nbd); out: mutex_unlock(&nbd->config_lock); if (!ret) { From 4cdf1bdd45ac78a088773722f009883af30ad318 Mon Sep 17 00:00:00 2001 From: Pankaj Raghav Date: Fri, 4 Jul 2025 11:21:34 +0200 Subject: [PATCH 111/234] block: reject bs > ps block devices when THP is disabled If THP is disabled and when a block device with logical block size > page size is present, the following null ptr deref panic happens during boot: [ [13.2 mK AOSAN: null-ptr-deref in range [0x0000000000000000-0x0000000000K0 0 0[07] [ 13.017749] RIP: 0010:create_empty_buffers+0x3b/0x380 [ 13.025448] Call Trace: [ 13.025692] [ 13.025895] block_read_full_folio+0x610/0x780 [ 13.026379] ? __pfx_blkdev_get_block+0x10/0x10 [ 13.027008] ? __folio_batch_add_and_move+0x1fa/0x2b0 [ 13.027548] ? __pfx_blkdev_read_folio+0x10/0x10 [ 13.028080] filemap_read_folio+0x9b/0x200 [ 13.028526] ? __pfx_filemap_read_folio+0x10/0x10 [ 13.029030] ? __filemap_get_folio+0x43/0x620 [ 13.029497] do_read_cache_folio+0x155/0x3b0 [ 13.029962] ? __pfx_blkdev_read_folio+0x10/0x10 [ 13.030381] read_part_sector+0xb7/0x2a0 [ 13.030805] read_lba+0x174/0x2c0 [ 13.045348] nvme_scan_ns+0x684/0x850 [nvme_core] [ 13.045858] ? __pfx_nvme_scan_ns+0x10/0x10 [nvme_core] [ 13.046414] ? _raw_spin_unlock+0x15/0x40 [ 13.046843] ? __switch_to+0x523/0x10a0 [ 13.047253] ? kvm_clock_get_cycles+0x14/0x30 [ 13.047742] ? __pfx_nvme_scan_ns_async+0x10/0x10 [nvme_core] [ 13.048353] async_run_entry_fn+0x96/0x4f0 [ 13.048787] process_one_work+0x667/0x10a0 [ 13.049219] worker_thread+0x63c/0xf60 As large folio support depends on THP, only allow bs > ps block devices if THP is enabled. Fixes: 47dd67532303 ("block/bdev: lift block size restrictions to 64k") Signed-off-by: Pankaj Raghav Reviewed-by: Luis Chamberlain Link: https://lore.kernel.org/r/20250704092134.289491-1-p.raghav@samsung.com Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 332b56f323d9..369a8e63c865 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -269,11 +269,16 @@ static inline dev_t disk_devt(struct gendisk *disk) return MKDEV(disk->major, disk->first_minor); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER) * however we constrain this to what we can validate and test. */ #define BLK_MAX_BLOCK_SIZE SZ_64K +#else +#define BLK_MAX_BLOCK_SIZE PAGE_SIZE +#endif + /* blk_validate_limits() validates bsize, so drivers don't usually need to */ static inline int blk_validate_block_size(unsigned long bsize) From 7de3c8b4077e052a6f7880a9ae0f42342c49631b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 6 Jul 2025 19:25:27 -0400 Subject: [PATCH 112/234] bcachefs: Don't schedule non persistent passes persistently if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) should have been if (!in_recovery && !(flags & RUN_RECOVERY_PASS_nopersistent))) But the !in_recovery part was also wrong: the assumption is that if we're in recovery we'll just rewind and run the recovery pass immediately, but we're not able to do so if we've already gone RW and the pass must be run before we go RW. In that case, we need to schedule it in the superblock so it can be run on the next mount attempt. Scheduling it persistently is fine, because it'll be cleared in the superblock immediately when the pass completes successfully. Signed-off-by: Kent Overstreet --- fs/bcachefs/recovery_passes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index c09ed2dd4639..6a039e011064 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -360,7 +360,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, !(r->passes_complete & BIT_ULL(pass)); bool ratelimit = flags & RUN_RECOVERY_PASS_ratelimit; - if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) { + if (!(flags & RUN_RECOVERY_PASS_nopersistent)) { struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); } From 74f3931a1bfea5822e8953a15c8ebc587dc6b4bc Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 7 Jul 2025 13:54:31 -0400 Subject: [PATCH 113/234] bcachefs: Fix additional misalignment in journal space calculations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Additional fix on top of f54b2a80d0df bcachefs: Fix misaligned bucket check in journal space calculations Make sure that when we calculate space for the next entry it's not misaligned: we need to round_down() to filesystem block size in multiple places (next entry size calculation as well as total space available). Reported-by: OndÅ™ej Kraus Signed-off-by: Kent Overstreet --- fs/bcachefs/journal_reclaim.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index cd6201741c59..0042d43b8e57 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -169,6 +169,12 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne if (nr_devs < nr_devs_want) return (struct journal_space) { 0, 0 }; + /* + * It's possible for bucket size to be misaligned w.r.t. the filesystem + * block size: + */ + min_bucket_size = round_down(min_bucket_size, block_sectors(c)); + /* * We sorted largest to smallest, and we want the smallest out of the * @nr_devs_want largest devices: From e31cf3cce2102af984656fed6e2254cbdd46da02 Mon Sep 17 00:00:00 2001 From: Luo Jie Date: Fri, 4 Jul 2025 13:31:13 +0800 Subject: [PATCH 114/234] net: phy: qcom: move the WoL function to shared library Move the WoL (Wake-on-LAN) functionality to a shared library to enable its reuse by the QCA808X PHY driver, incorporating support for WoL functionality similar to the implementation in at8031_set_wol(). Reviewed-by: Maxime Chevallier Signed-off-by: Luo Jie Link: https://patch.msgid.link/20250704-qcom_phy_wol_support-v1-1-053342b1538d@quicinc.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/qcom/at803x.c | 27 --------------------------- drivers/net/phy/qcom/qcom-phy-lib.c | 25 +++++++++++++++++++++++++ drivers/net/phy/qcom/qcom.h | 5 +++++ 3 files changed, 30 insertions(+), 27 deletions(-) diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index 26350b962890..8f26e395e39f 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -26,9 +26,6 @@ #define AT803X_LED_CONTROL 0x18 -#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 -#define AT803X_WOL_EN BIT(5) - #define AT803X_REG_CHIP_CONFIG 0x1f #define AT803X_BT_BX_REG_SEL 0x8000 @@ -866,30 +863,6 @@ static int at8031_config_init(struct phy_device *phydev) return at803x_config_init(phydev); } -static int at8031_set_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) -{ - int ret; - - /* First setup MAC address and enable WOL interrupt */ - ret = at803x_set_wol(phydev, wol); - if (ret) - return ret; - - if (wol->wolopts & WAKE_MAGIC) - /* Enable WOL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - 0, AT803X_WOL_EN); - else - /* Disable WoL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - AT803X_WOL_EN, 0); - - return ret; -} - static int at8031_config_intr(struct phy_device *phydev) { struct at803x_priv *priv = phydev->priv; diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c index d28815ef56bb..af7d0d8e81be 100644 --- a/drivers/net/phy/qcom/qcom-phy-lib.c +++ b/drivers/net/phy/qcom/qcom-phy-lib.c @@ -115,6 +115,31 @@ int at803x_set_wol(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(at803x_set_wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + /* First setup MAC address and enable WOL interrupt */ + ret = at803x_set_wol(phydev, wol); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + /* Enable WOL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + else + /* Disable WoL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + + return ret; +} +EXPORT_SYMBOL_GPL(at8031_set_wol); + void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h index 4bb541728846..7f7151c8baca 100644 --- a/drivers/net/phy/qcom/qcom.h +++ b/drivers/net/phy/qcom/qcom.h @@ -172,6 +172,9 @@ #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A +#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 +#define AT803X_WOL_EN BIT(5) + #define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E @@ -215,6 +218,8 @@ int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data); int at803x_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol); void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); int at803x_ack_interrupt(struct phy_device *phydev); From 4ab9ada765b7acb5cd02fe27632ec2586b7868ee Mon Sep 17 00:00:00 2001 From: Luo Jie Date: Fri, 4 Jul 2025 13:31:14 +0800 Subject: [PATCH 115/234] net: phy: qcom: qca808x: Fix WoL issue by utilizing at8031_set_wol() The previous commit unintentionally removed the code responsible for enabling WoL via MMD3 register 0x8012 BIT5. As a result, Wake-on-LAN (WoL) support for the QCA808X PHY is no longer functional. The WoL (Wake-on-LAN) feature for the QCA808X PHY is enabled via MMD3 register 0x8012, BIT5. This implementation is aligned with the approach used in at8031_set_wol(). Fixes: e58f30246c35 ("net: phy: at803x: fix the wol setting functions") Signed-off-by: Luo Jie Reviewed-by: Maxime Chevallier Link: https://patch.msgid.link/20250704-qcom_phy_wol_support-v1-2-053342b1538d@quicinc.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/qcom/qca808x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c index 71498c518f0f..6de16c0eaa08 100644 --- a/drivers/net/phy/qcom/qca808x.c +++ b/drivers/net/phy/qcom/qca808x.c @@ -633,7 +633,7 @@ static struct phy_driver qca808x_driver[] = { .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, - .set_wol = at803x_set_wol, + .set_wol = at8031_set_wol, .get_wol = at803x_get_wol, .get_features = qca808x_get_features, .config_aneg = qca808x_config_aneg, From ae8f160e7eb24240a2a79fc4c815c6a0d4ee16cc Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 4 Jul 2025 05:48:18 +0000 Subject: [PATCH 116/234] netlink: Fix wraparounds of sk->sk_rmem_alloc. Netlink has this pattern in some places if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) atomic_add(skb->truesize, &sk->sk_rmem_alloc); , which has the same problem fixed by commit 5a465a0da13e ("udp: Fix multiple wraparounds of sk->sk_rmem_alloc."). For example, if we set INT_MAX to SO_RCVBUFFORCE, the condition is always false as the two operands are of int. Then, a single socket can eat as many skb as possible until OOM happens, and we can see multiple wraparounds of sk->sk_rmem_alloc. Let's fix it by using atomic_add_return() and comparing the two variables as unsigned int. Before: [root@fedora ~]# ss -f netlink Recv-Q Send-Q Local Address:Port Peer Address:Port -1668710080 0 rtnl:nl_wraparound/293 * After: [root@fedora ~]# ss -f netlink Recv-Q Send-Q Local Address:Port Peer Address:Port 2147483072 0 rtnl:nl_wraparound/290 * ^ `--- INT_MAX - 576 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Jason Baron Closes: https://lore.kernel.org/netdev/cover.1750285100.git.jbaron@akamai.com/ Signed-off-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250704054824.1580222-1-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/netlink/af_netlink.c | 89 ++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 36 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index e8972a857e51..79fbaf7333ce 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) WARN_ON(skb->sk != NULL); skb->sk = sk; skb->destructor = netlink_skb_destructor; - atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); } @@ -1212,41 +1211,48 @@ struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast) int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { + DECLARE_WAITQUEUE(wait, current); struct netlink_sock *nlk; + unsigned int rmem; nlk = nlk_sk(sk); + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); - if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - test_bit(NETLINK_S_CONGESTED, &nlk->state))) { - DECLARE_WAITQUEUE(wait, current); - if (!*timeo) { - if (!ssk || netlink_is_kernel(ssk)) - netlink_overrun(sk); - sock_put(sk); - kfree_skb(skb); - return -EAGAIN; - } - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&nlk->wait, &wait); - - if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || - test_bit(NETLINK_S_CONGESTED, &nlk->state)) && - !sock_flag(sk, SOCK_DEAD)) - *timeo = schedule_timeout(*timeo); - - __set_current_state(TASK_RUNNING); - remove_wait_queue(&nlk->wait, &wait); - sock_put(sk); - - if (signal_pending(current)) { - kfree_skb(skb); - return sock_intr_errno(*timeo); - } - return 1; + if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) && + !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { + netlink_skb_set_owner_r(skb, sk); + return 0; } - netlink_skb_set_owner_r(skb, sk); - return 0; + + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + + if (!*timeo) { + if (!ssk || netlink_is_kernel(ssk)) + netlink_overrun(sk); + sock_put(sk); + kfree_skb(skb); + return -EAGAIN; + } + + __set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&nlk->wait, &wait); + rmem = atomic_read(&sk->sk_rmem_alloc); + + if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) || + test_bit(NETLINK_S_CONGESTED, &nlk->state)) && + !sock_flag(sk, SOCK_DEAD)) + *timeo = schedule_timeout(*timeo); + + __set_current_state(TASK_RUNNING); + remove_wait_queue(&nlk->wait, &wait); + sock_put(sk); + + if (signal_pending(current)) { + kfree_skb(skb); + return sock_intr_errno(*timeo); + } + + return 1; } static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) @@ -1307,6 +1313,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); netlink_skb_set_owner_r(skb, sk); NETLINK_CB(skb).sk = ssk; netlink_deliver_tap_kernel(sk, ssk, skb); @@ -1383,13 +1390,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check); static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); + unsigned int rmem, rcvbuf; - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + rcvbuf = READ_ONCE(sk->sk_rcvbuf); + + if ((rmem != skb->truesize || rmem <= rcvbuf) && !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { netlink_skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); - return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); + return rmem > (rcvbuf >> 1); } + + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); return -1; } @@ -2249,6 +2262,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken) struct module *module; int err = -ENOBUFS; int alloc_min_size; + unsigned int rmem; int alloc_size; if (!lock_taken) @@ -2258,9 +2272,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken) goto errout_skb; } - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) - goto errout_skb; - /* NLMSG_GOODSIZE is small to avoid high order allocations being * required, but it makes sense to _attempt_ a 32KiB allocation * to reduce number of system calls on dump operations, if user @@ -2283,6 +2294,12 @@ static int netlink_dump(struct sock *sk, bool lock_taken) if (!skb) goto errout_skb; + rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); + if (rmem >= READ_ONCE(sk->sk_rcvbuf)) { + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + goto errout_skb; + } + /* Trim skb to allocated size. User is expected to provide buffer as * large as max(min_dump_alloc, 32KiB (max_recvmsg_len capped at * netlink_recvmsg())). dump will pack as many smaller messages as From 1e3b66e326015f77bc4b36976bebeedc2ac0f588 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Thu, 3 Jul 2025 13:23:29 +0200 Subject: [PATCH 117/234] vsock: fix `vsock_proto` declaration From commit 634f1a7110b4 ("vsock: support sockmap"), `struct proto vsock_proto`, defined in af_vsock.c, is not static anymore, since it's used by vsock_bpf.c. If CONFIG_BPF_SYSCALL is not defined, `make C=2` will print a warning: $ make O=build C=2 W=1 net/vmw_vsock/ ... CC [M] net/vmw_vsock/af_vsock.o CHECK ../net/vmw_vsock/af_vsock.c ../net/vmw_vsock/af_vsock.c:123:14: warning: symbol 'vsock_proto' was not declared. Should it be static? Declare `vsock_proto` regardless of CONFIG_BPF_SYSCALL, since it's defined in af_vsock.c, which is built regardless of CONFIG_BPF_SYSCALL. Fixes: 634f1a7110b4 ("vsock: support sockmap") Signed-off-by: Stefano Garzarella Acked-by: Michael S. Tsirkin Link: https://patch.msgid.link/20250703112329.28365-1-sgarzare@redhat.com Signed-off-by: Jakub Kicinski --- include/net/af_vsock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index d56e6e135158..d40e978126e3 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -243,8 +243,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); -#ifdef CONFIG_BPF_SYSCALL extern struct proto vsock_proto; +#ifdef CONFIG_BPF_SYSCALL int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void __init vsock_bpf_build_proto(void); #else From 667eeab4999e981c96b447a4df5f20bdf5c26f13 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 2 Jul 2025 01:43:40 +0000 Subject: [PATCH 118/234] tipc: Fix use-after-free in tipc_conn_close(). syzbot reported a null-ptr-deref in tipc_conn_close() during netns dismantle. [0] tipc_topsrv_stop() iterates tipc_net(net)->topsrv->conn_idr and calls tipc_conn_close() for each tipc_conn. The problem is that tipc_conn_close() is called after releasing the IDR lock. At the same time, there might be tipc_conn_recv_work() running and it could call tipc_conn_close() for the same tipc_conn and release its last ->kref. Once we release the IDR lock in tipc_topsrv_stop(), there is no guarantee that the tipc_conn is alive. Let's hold the ref before releasing the lock and put the ref after tipc_conn_close() in tipc_topsrv_stop(). [0]: BUG: KASAN: use-after-free in tipc_conn_close+0x122/0x140 net/tipc/topsrv.c:165 Read of size 8 at addr ffff888099305a08 by task kworker/u4:3/435 CPU: 0 PID: 435 Comm: kworker/u4:3 Not tainted 4.19.204-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: netns cleanup_net Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1fc/0x2ef lib/dump_stack.c:118 print_address_description.cold+0x54/0x219 mm/kasan/report.c:256 kasan_report_error.cold+0x8a/0x1b9 mm/kasan/report.c:354 kasan_report mm/kasan/report.c:412 [inline] __asan_report_load8_noabort+0x88/0x90 mm/kasan/report.c:433 tipc_conn_close+0x122/0x140 net/tipc/topsrv.c:165 tipc_topsrv_stop net/tipc/topsrv.c:701 [inline] tipc_topsrv_exit_net+0x27b/0x5c0 net/tipc/topsrv.c:722 ops_exit_list+0xa5/0x150 net/core/net_namespace.c:153 cleanup_net+0x3b4/0x8b0 net/core/net_namespace.c:553 process_one_work+0x864/0x1570 kernel/workqueue.c:2153 worker_thread+0x64c/0x1130 kernel/workqueue.c:2296 kthread+0x33f/0x460 kernel/kthread.c:259 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:415 Allocated by task 23: kmem_cache_alloc_trace+0x12f/0x380 mm/slab.c:3625 kmalloc include/linux/slab.h:515 [inline] kzalloc include/linux/slab.h:709 [inline] tipc_conn_alloc+0x43/0x4f0 net/tipc/topsrv.c:192 tipc_topsrv_accept+0x1b5/0x280 net/tipc/topsrv.c:470 process_one_work+0x864/0x1570 kernel/workqueue.c:2153 worker_thread+0x64c/0x1130 kernel/workqueue.c:2296 kthread+0x33f/0x460 kernel/kthread.c:259 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:415 Freed by task 23: __cache_free mm/slab.c:3503 [inline] kfree+0xcc/0x210 mm/slab.c:3822 tipc_conn_kref_release net/tipc/topsrv.c:150 [inline] kref_put include/linux/kref.h:70 [inline] conn_put+0x2cd/0x3a0 net/tipc/topsrv.c:155 process_one_work+0x864/0x1570 kernel/workqueue.c:2153 worker_thread+0x64c/0x1130 kernel/workqueue.c:2296 kthread+0x33f/0x460 kernel/kthread.c:259 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:415 The buggy address belongs to the object at ffff888099305a00 which belongs to the cache kmalloc-512 of size 512 The buggy address is located 8 bytes inside of 512-byte region [ffff888099305a00, ffff888099305c00) The buggy address belongs to the page: page:ffffea000264c140 count:1 mapcount:0 mapping:ffff88813bff0940 index:0x0 flags: 0xfff00000000100(slab) raw: 00fff00000000100 ffffea00028b6b88 ffffea0002cd2b08 ffff88813bff0940 raw: 0000000000000000 ffff888099305000 0000000100000006 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff888099305900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888099305980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff888099305a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff888099305a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888099305b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb Fixes: c5fa7b3cf3cb ("tipc: introduce new TIPC server infrastructure") Reported-by: syzbot+d333febcf8f4bc5f6110@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=27169a847a70550d17be Signed-off-by: Kuniyuki Iwashima Reviewed-by: Tung Nguyen Link: https://patch.msgid.link/20250702014350.692213-1-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/tipc/topsrv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 8ee0c07d00e9..ffe577bf6b51 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net) for (id = 0; srv->idr_in_use; id++) { con = idr_find(&srv->conn_idr, id); if (con) { + conn_get(con); spin_unlock_bh(&srv->idr_lock); tipc_conn_close(con); + conn_put(con); spin_lock_bh(&srv->idr_lock); } } From ffc2c8c4a714df53a715827d6334ab9474424f6a Mon Sep 17 00:00:00 2001 From: Ryo Takakura Date: Wed, 2 Jul 2025 18:24:17 +0900 Subject: [PATCH 119/234] net: bcmgenet: Initialize u64 stats seq counter Initialize u64 stats as it uses seq counter on 32bit machines as suggested by lockdep below. [ 1.830953][ T1] INFO: trying to register non-static key. [ 1.830993][ T1] The code is fine but needs lockdep annotation, or maybe [ 1.831027][ T1] you didn't initialize this object before use? [ 1.831057][ T1] turning off the locking correctness validator. [ 1.831090][ T1] CPU: 1 UID: 0 PID: 1 Comm: swapper/0 Tainted: G W 6.16.0-rc2-v7l+ #1 PREEMPT [ 1.831097][ T1] Tainted: [W]=WARN [ 1.831099][ T1] Hardware name: BCM2711 [ 1.831101][ T1] Call trace: [ 1.831104][ T1] unwind_backtrace from show_stack+0x18/0x1c [ 1.831120][ T1] show_stack from dump_stack_lvl+0x8c/0xcc [ 1.831129][ T1] dump_stack_lvl from register_lock_class+0x9e8/0x9fc [ 1.831141][ T1] register_lock_class from __lock_acquire+0x420/0x22c0 [ 1.831154][ T1] __lock_acquire from lock_acquire+0x130/0x3f8 [ 1.831166][ T1] lock_acquire from bcmgenet_get_stats64+0x4a4/0x4c8 [ 1.831176][ T1] bcmgenet_get_stats64 from dev_get_stats+0x4c/0x408 [ 1.831184][ T1] dev_get_stats from rtnl_fill_stats+0x38/0x120 [ 1.831193][ T1] rtnl_fill_stats from rtnl_fill_ifinfo+0x7f8/0x1890 [ 1.831203][ T1] rtnl_fill_ifinfo from rtmsg_ifinfo_build_skb+0xd0/0x138 [ 1.831214][ T1] rtmsg_ifinfo_build_skb from rtmsg_ifinfo+0x48/0x8c [ 1.831225][ T1] rtmsg_ifinfo from register_netdevice+0x8c0/0x95c [ 1.831237][ T1] register_netdevice from register_netdev+0x28/0x40 [ 1.831247][ T1] register_netdev from bcmgenet_probe+0x690/0x6bc [ 1.831255][ T1] bcmgenet_probe from platform_probe+0x64/0xbc [ 1.831263][ T1] platform_probe from really_probe+0xd0/0x2d4 [ 1.831269][ T1] really_probe from __driver_probe_device+0x90/0x1a4 [ 1.831273][ T1] __driver_probe_device from driver_probe_device+0x38/0x11c [ 1.831278][ T1] driver_probe_device from __driver_attach+0x9c/0x18c [ 1.831282][ T1] __driver_attach from bus_for_each_dev+0x84/0xd4 [ 1.831291][ T1] bus_for_each_dev from bus_add_driver+0xd4/0x1f4 [ 1.831303][ T1] bus_add_driver from driver_register+0x88/0x120 [ 1.831312][ T1] driver_register from do_one_initcall+0x78/0x360 [ 1.831320][ T1] do_one_initcall from kernel_init_freeable+0x2bc/0x314 [ 1.831331][ T1] kernel_init_freeable from kernel_init+0x1c/0x144 [ 1.831339][ T1] kernel_init from ret_from_fork+0x14/0x20 [ 1.831344][ T1] Exception stack(0xf082dfb0 to 0xf082dff8) [ 1.831349][ T1] dfa0: 00000000 00000000 00000000 00000000 [ 1.831353][ T1] dfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 [ 1.831356][ T1] dfe0: 00000000 00000000 00000000 00000000 00000013 00000000 Fixes: 59aa6e3072aa ("net: bcmgenet: switch to use 64bit statistics") Reviewed-by: Florian Fainelli Signed-off-by: Ryo Takakura Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250702092417.46486-1-ryotkkr98@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fa0077bc67b7..97585c160de3 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -4092,6 +4092,12 @@ static int bcmgenet_probe(struct platform_device *pdev) for (i = 0; i <= priv->hw_params->rx_queues; i++) priv->rx_rings[i].rx_max_coalesced_frames = 1; + /* Initialize u64 stats seq counter for 32bit machines */ + for (i = 0; i <= priv->hw_params->rx_queues; i++) + u64_stats_init(&priv->rx_rings[i].stats64.syncp); + for (i = 0; i <= priv->hw_params->tx_queues; i++) + u64_stats_init(&priv->tx_rings[i].stats64.syncp); + /* libphy will determine the link state */ netif_carrier_off(dev); From 705a412a367f383430fa34bada387af2e52eb043 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 2 Jul 2025 00:00:52 +0200 Subject: [PATCH 120/234] drm/xe/pf: Clear all LMTT pages on alloc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our LMEM buffer objects are not cleared by default on alloc and during VF provisioning we only setup LMTT PTEs for the actually provisioned LMEM range. But beyond that valid range we might leave some stale data that could either point to some other VFs allocations or even to the PF pages. Explicitly clear all new LMTT page to avoid the risk that a malicious VF would try to exploit that gap. While around add asserts to catch any undesired PTE overwrites and low-level debug traces to track LMTT PT life-cycle. Fixes: b1d204058218 ("drm/xe/pf: Introduce Local Memory Translation Table") Signed-off-by: Michal Wajdeczko Cc: MichaÅ‚ Winiarski Cc: Lukasz Laguna Reviewed-by: MichaÅ‚ Winiarski Reviewed-by: Piotr Piórkowski Link: https://lore.kernel.org/r/20250701220052.1612-1-michal.wajdeczko@intel.com (cherry picked from commit 3fae6918a3e27cce20ded2551f863fb05d4bef8d) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_lmtt.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 63db66df064b..023ed6a6b49d 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level } lmtt_assert(lmtt, xe_bo_is_vram(bo)); + lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE)); + + xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size); pt->level = level; pt->bo = bo; @@ -91,6 +94,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level static void lmtt_pt_free(struct xe_lmtt_pt *pt) { + lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n", + pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE)); + xe_bo_unpin_map_no_vm(pt->bo); kfree(pt); } @@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, switch (lmtt->ops->lmtt_pte_size(level)) { case sizeof(u32): + lmtt_assert(lmtt, !overflows_type(pte, u32)); + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); break; case sizeof(u64): + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); break; default: From c9a95dbe06102cf01afee4cd83ecb29f8d587a72 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 2 Jul 2025 14:35:11 -0700 Subject: [PATCH 121/234] drm/xe: Allocate PF queue size on pow2 boundary CIRC_SPACE does not work unless the size argument is a power of 2, allocate PF queue size on power of 2 boundary. Cc: stable@vger.kernel.org Fixes: 3338e4f90c14 ("drm/xe: Use topology to determine page fault queue size") Fixes: 29582e0ea75c ("drm/xe: Add page queue multiplier") Signed-off-by: Matthew Brost Reviewed-by: Francois Dugast Link: https://lore.kernel.org/r/20250702213511.3226167-1-matthew.brost@intel.com (cherry picked from commit 491b9783126303755717c0cbde0b08ee59b6abab) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_gt_pagefault.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 10622ca471a2..6717a636b1d9 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -444,6 +444,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) #define PF_MULTIPLIER 8 pf_queue->num_dw = (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; + pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); #undef PF_MULTIPLIER pf_queue->gt = gt; From daa099fed50a39256feb37d3fac146bf0d74152f Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 1 Jul 2025 20:58:46 -0700 Subject: [PATCH 122/234] Revert "drm/xe/xe2: Enable Indirect Ring State support for Xe2" This reverts commit fe0154cf8222d9e38c60ccc124adb2f9b5272371. Seeing some unexplained random failures during LRC context switches with indirect ring state enabled. The failures were always there, but the repro rate increased with the addition of WA BB as a separate BO. Commit 3a1edef8f4b5 ("drm/xe: Make WA BB part of LRC BO") helped to reduce the issues in the context switches, but didn't eliminate them completely. Indirect ring state is not required for any current features, so disable for now until failures can be root caused. Cc: stable@vger.kernel.org Fixes: fe0154cf8222 ("drm/xe/xe2: Enable Indirect Ring State support for Xe2") Signed-off-by: Matthew Brost Reviewed-by: Lucas De Marchi Link: https://lore.kernel.org/r/20250702035846.3178344-1-matthew.brost@intel.com Signed-off-by: Lucas De Marchi (cherry picked from commit 03d85ab36bcbcbe9dc962fccd3f8e54d7bb93b35) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_pci.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index ac4beaed58ff..278af53c74dc 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -140,7 +140,6 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ .has_flat_ccs = 1, \ - .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ .has_64bit_timestamp = 1, \ From fee58ca135a7b979c8b75e6d2eac60d695f9209b Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Tue, 1 Jul 2025 11:39:50 +0100 Subject: [PATCH 123/234] drm/xe/bmg: fix compressed VRAM handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There looks to be an issue in our compression handling when the BO pages are very fragmented, where we choose to skip the identity map and instead fall back to emitting the PTEs by hand when migrating memory, such that we can hopefully do more work per blit operation. However in such a case we need to ensure the src PTEs are correctly tagged with a compression enabled PAT index on dgpu xe2+, otherwise the copy will simply treat the src memory as uncompressed, leading to corruption if the memory was compressed by the user. To fix this pass along use_comp_pat into emit_pte() on the src side, to indicate that compression should be considered. v2 (Jonathan): tweak the commit message Fixes: 523f191cc0c7 ("drm/xe/xe_migrate: Handle migration logic for xe2+ dgfx") Signed-off-by: Matthew Auld Cc: Himal Prasad Ghimiray Cc: Thomas Hellström Cc: Akshata Jahagirdar Cc: # v6.12+ Reviewed-by: Jonathan Cavitt Link: https://lore.kernel.org/r/20250701103949.83116-2-matthew.auld@intel.com (cherry picked from commit f7a2fd776e57bd6468644bdecd91ab3aba57ba58) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 7acdc4c78866..66bc02302c55 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -863,7 +863,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); else - emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, + emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, &src_it, src_L0, src); if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) From 70b9c0c11e55167b9552ef395bc00f4920299177 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Mon, 30 Jun 2025 15:02:18 +0200 Subject: [PATCH 124/234] uapi: bitops: use UAPI-safe variant of BITS_PER_LONG again (2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BITS_PER_LONG does not exist in UAPI headers, so can't be used by the UAPI __GENMASK(). Instead __BITS_PER_LONG needs to be used. When __GENMASK() was introduced in commit 3c7a8e190bc5 ("uapi: introduce uapi-friendly macros for GENMASK"), the code was fine. A broken revert in 1e7933a575ed ("uapi: Revert "bitops: avoid integer overflow in GENMASK(_ULL)"") introduced the incorrect usage of BITS_PER_LONG. That was fixed in commit 11fcf368506d ("uapi: bitops: use UAPI-safe variant of BITS_PER_LONG again"). But a broken sync of the kernel headers with the tools/ headers in commit fc92099902fb ("tools headers: Synchronize linux/bits.h with the kernel sources") undid the fix. Reapply the fix and while at it also fix the tools header. Fixes: fc92099902fb ("tools headers: Synchronize linux/bits.h with the kernel sources") Signed-off-by: Thomas Weißschuh Acked-by: Yury Norov (NVIDIA) Signed-off-by: Yury Norov (NVIDIA) --- include/uapi/linux/bits.h | 4 ++-- tools/include/uapi/linux/bits.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/bits.h b/include/uapi/linux/bits.h index 682b406e1067..a04afef9efca 100644 --- a/include/uapi/linux/bits.h +++ b/include/uapi/linux/bits.h @@ -4,9 +4,9 @@ #ifndef _UAPI_LINUX_BITS_H #define _UAPI_LINUX_BITS_H -#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (BITS_PER_LONG - 1 - (h)))) +#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) -#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) +#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) #define __GENMASK_U128(h, l) \ ((_BIT128((h)) << 1) - (_BIT128(l))) diff --git a/tools/include/uapi/linux/bits.h b/tools/include/uapi/linux/bits.h index 682b406e1067..a04afef9efca 100644 --- a/tools/include/uapi/linux/bits.h +++ b/tools/include/uapi/linux/bits.h @@ -4,9 +4,9 @@ #ifndef _UAPI_LINUX_BITS_H #define _UAPI_LINUX_BITS_H -#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (BITS_PER_LONG - 1 - (h)))) +#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) -#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) +#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) #define __GENMASK_U128(h, l) \ ((_BIT128((h)) << 1) - (_BIT128(l))) From 1afc85deecd32ff0f9972b38ecfbddb8be63143e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Mon, 30 Jun 2025 15:09:35 +0200 Subject: [PATCH 125/234] MAINTAINERS: bitmap: add UAPI headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The UAPI headers have been split out from the kernel-only headers. They maintained as part of the bitmap library. Signed-off-by: Thomas Weißschuh Signed-off-by: Yury Norov (NVIDIA) --- MAINTAINERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index fad6cb025a19..a4ff43498c35 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4181,6 +4181,7 @@ F: include/linux/cpumask_types.h F: include/linux/find.h F: include/linux/nodemask.h F: include/linux/nodemask_types.h +F: include/uapi/linux/bits.h F: include/vdso/bits.h F: lib/bitmap-str.c F: lib/bitmap.c @@ -4193,6 +4194,7 @@ F: tools/include/linux/bitfield.h F: tools/include/linux/bitmap.h F: tools/include/linux/bits.h F: tools/include/linux/find.h +F: tools/include/uapi/linux/bits.h F: tools/include/vdso/bits.h F: tools/lib/bitmap.c F: tools/lib/find_bit.c From d3a5f2871adc0c61c61869f37f3e697d97f03d8c Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 7 Jul 2025 13:41:11 +0800 Subject: [PATCH 126/234] tcp: Correct signedness in skb remaining space calculation Syzkaller reported a bug [1] where sk->sk_forward_alloc can overflow. When we send data, if an skb exists at the tail of the write queue, the kernel will attempt to append the new data to that skb. However, the code that checks for available space in the skb is flawed: ''' copy = size_goal - skb->len ''' The types of the variables involved are: ''' copy: ssize_t (s64 on 64-bit systems) size_goal: int skb->len: unsigned int ''' Due to C's type promotion rules, the signed size_goal is converted to an unsigned int to match skb->len before the subtraction. The result is an unsigned int. When this unsigned int result is then assigned to the s64 copy variable, it is zero-extended, preserving its non-negative value. Consequently, copy is always >= 0. Assume we are sending 2GB of data and size_goal has been adjusted to a value smaller than skb->len. The subtraction will result in copy holding a very large positive integer. In the subsequent logic, this large value is used to update sk->sk_forward_alloc, which can easily cause it to overflow. The syzkaller reproducer uses TCP_REPAIR to reliably create this condition. However, this can also occur in real-world scenarios. The tcp_bound_to_half_wnd() function can also reduce size_goal to a small value. This would cause the subsequent tcp_wmem_schedule() to set sk->sk_forward_alloc to a value close to INT_MAX. Further memory allocation requests would then cause sk_forward_alloc to wrap around and become negative. [1]: https://syzkaller.appspot.com/bug?extid=de6565462ab540f50e47 Reported-by: syzbot+de6565462ab540f50e47@syzkaller.appspotmail.com Fixes: 270a1c3de47e ("tcp: Support MSG_SPLICE_PAGES") Signed-off-by: Jiayuan Chen Reviewed-by: Eric Dumazet Reviewed-by: David Howells Link: https://patch.msgid.link/20250707054112.101081-1-jiayuan.chen@linux.dev Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f64f8276a73c..461a9ab540af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1176,7 +1176,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) goto do_error; while (msg_data_left(msg)) { - ssize_t copy = 0; + int copy = 0; skb = tcp_write_queue_tail(sk); if (skb) From b3603c0466a85bed302e80226950092ceaf09b94 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Sat, 28 Jun 2025 13:44:37 +0800 Subject: [PATCH 127/234] dt-bindings: net: sun8i-emac: Rename A523 EMAC0 to GMAC0 The datasheets refer to the first Ethernet controller as GMAC0, not EMAC0. Rename the compatible string to align with the datasheets. A fix for the device trees will be sent separately. Fixes: 0454b9057e98 ("dt-bindings: net: sun8i-emac: Add A523 EMAC0 compatible") Signed-off-by: Chen-Yu Tsai Acked-by: Rob Herring (Arm) Link: https://patch.msgid.link/20250628054438.2864220-2-wens@kernel.org Signed-off-by: Jakub Kicinski --- .../devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml index 7b6a2fde8175..19934d5c24e5 100644 --- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml +++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml @@ -23,7 +23,7 @@ properties: - allwinner,sun20i-d1-emac - allwinner,sun50i-h6-emac - allwinner,sun50i-h616-emac0 - - allwinner,sun55i-a523-emac0 + - allwinner,sun55i-a523-gmac0 - const: allwinner,sun50i-a64-emac reg: From 95a234f6affbf51f06338383537ab80d637bb785 Mon Sep 17 00:00:00 2001 From: Haoxiang Li Date: Thu, 3 Jul 2025 18:01:09 +0800 Subject: [PATCH 128/234] net: ethernet: rtsn: Fix a null pointer dereference in rtsn_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add check for the return value of rcar_gen4_ptp_alloc() to prevent potential null pointer dereference. Fixes: b0d3969d2b4d ("net: ethernet: rtsn: Add support for Renesas Ethernet-TSN") Cc: stable@vger.kernel.org Signed-off-by: Haoxiang Li Reviewed-by: Niklas Söderlund Link: https://patch.msgid.link/20250703100109.2541018-1-haoxiang_li2024@163.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/renesas/rtsn.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 6b3f7fca8d15..05c4b6c8c9c3 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -1259,7 +1259,12 @@ static int rtsn_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->pdev = pdev; priv->ndev = ndev; + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + if (!priv->ptp_priv) { + ret = -ENOMEM; + goto error_free; + } spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); From 209fd720838aaf1420416494c5505096478156b4 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 3 Jul 2025 17:18:18 +0200 Subject: [PATCH 129/234] vsock: Fix transport_{g2h,h2g} TOCTOU vsock_find_cid() and vsock_dev_do_ioctl() may race with module unload. transport_{g2h,h2g} may become NULL after the NULL check. Introduce vsock_transport_local_cid() to protect from a potential null-ptr-deref. KASAN: null-ptr-deref in range [0x0000000000000118-0x000000000000011f] RIP: 0010:vsock_find_cid+0x47/0x90 Call Trace: __vsock_bind+0x4b2/0x720 vsock_bind+0x90/0xe0 __sys_bind+0x14d/0x1e0 __x64_sys_bind+0x6e/0xc0 do_syscall_64+0x92/0x1c0 entry_SYSCALL_64_after_hwframe+0x4b/0x53 KASAN: null-ptr-deref in range [0x0000000000000118-0x000000000000011f] RIP: 0010:vsock_dev_do_ioctl.isra.0+0x58/0xf0 Call Trace: __x64_sys_ioctl+0x12d/0x190 do_syscall_64+0x92/0x1c0 entry_SYSCALL_64_after_hwframe+0x4b/0x53 Fixes: c0cfa2d8a788 ("vsock: add multi-transports support") Suggested-by: Stefano Garzarella Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20250703-vsock-transports-toctou-v4-1-98f0eb530747@rbox.co Signed-off-by: Jakub Kicinski --- net/vmw_vsock/af_vsock.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 2e7a3034e965..39473b9e0829 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -531,9 +531,25 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) } EXPORT_SYMBOL_GPL(vsock_assign_transport); +/* + * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks. + * Otherwise we may race with module removal. Do not use on `vsk->transport`. + */ +static u32 vsock_registered_transport_cid(const struct vsock_transport **transport) +{ + u32 cid = VMADDR_CID_ANY; + + mutex_lock(&vsock_register_mutex); + if (*transport) + cid = (*transport)->get_local_cid(); + mutex_unlock(&vsock_register_mutex); + + return cid; +} + bool vsock_find_cid(unsigned int cid) { - if (transport_g2h && cid == transport_g2h->get_local_cid()) + if (cid == vsock_registered_transport_cid(&transport_g2h)) return true; if (transport_h2g && cid == VMADDR_CID_HOST) @@ -2536,18 +2552,17 @@ static long vsock_dev_do_ioctl(struct file *filp, unsigned int cmd, void __user *ptr) { u32 __user *p = ptr; - u32 cid = VMADDR_CID_ANY; int retval = 0; + u32 cid; switch (cmd) { case IOCTL_VM_SOCKETS_GET_LOCAL_CID: /* To be compatible with the VMCI behavior, we prioritize the * guest CID instead of well-know host CID (VMADDR_CID_HOST). */ - if (transport_g2h) - cid = transport_g2h->get_local_cid(); - else if (transport_h2g) - cid = transport_h2g->get_local_cid(); + cid = vsock_registered_transport_cid(&transport_g2h); + if (cid == VMADDR_CID_ANY) + cid = vsock_registered_transport_cid(&transport_h2g); if (put_user(cid, p) != 0) retval = -EFAULT; From 687aa0c5581b8d4aa87fd92973e4ee576b550cdf Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 3 Jul 2025 17:18:19 +0200 Subject: [PATCH 130/234] vsock: Fix transport_* TOCTOU Transport assignment may race with module unload. Protect new_transport from becoming a stale pointer. This also takes care of an insecure call in vsock_use_local_transport(); add a lockdep assert. BUG: unable to handle page fault for address: fffffbfff8056000 Oops: Oops: 0000 [#1] SMP KASAN RIP: 0010:vsock_assign_transport+0x366/0x600 Call Trace: vsock_connect+0x59c/0xc40 __sys_connect+0xe8/0x100 __x64_sys_connect+0x6e/0xc0 do_syscall_64+0x92/0x1c0 entry_SYSCALL_64_after_hwframe+0x4b/0x53 Fixes: c0cfa2d8a788 ("vsock: add multi-transports support") Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20250703-vsock-transports-toctou-v4-2-98f0eb530747@rbox.co Signed-off-by: Jakub Kicinski --- net/vmw_vsock/af_vsock.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 39473b9e0829..66404c06bdaa 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept); static bool vsock_use_local_transport(unsigned int remote_cid) { + lockdep_assert_held(&vsock_register_mutex); + if (!transport_local) return false; @@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) remote_flags = vsk->remote_addr.svm_flags; + mutex_lock(&vsock_register_mutex); + switch (sk->sk_type) { case SOCK_DGRAM: new_transport = transport_dgram; @@ -479,12 +483,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) new_transport = transport_h2g; break; default: - return -ESOCKTNOSUPPORT; + ret = -ESOCKTNOSUPPORT; + goto err; } if (vsk->transport) { - if (vsk->transport == new_transport) - return 0; + if (vsk->transport == new_transport) { + ret = 0; + goto err; + } /* transport->release() must be called with sock lock acquired. * This path can only be taken during vsock_connect(), where we @@ -508,8 +515,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) /* We increase the module refcnt to prevent the transport unloading * while there are open sockets assigned to it. */ - if (!new_transport || !try_module_get(new_transport->module)) - return -ENODEV; + if (!new_transport || !try_module_get(new_transport->module)) { + ret = -ENODEV; + goto err; + } + + /* It's safe to release the mutex after a successful try_module_get(). + * Whichever transport `new_transport` points at, it won't go away until + * the last module_put() below or in vsock_deassign_transport(). + */ + mutex_unlock(&vsock_register_mutex); if (sk->sk_type == SOCK_SEQPACKET) { if (!new_transport->seqpacket_allow || @@ -528,6 +543,9 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) vsk->transport = new_transport; return 0; +err: + mutex_unlock(&vsock_register_mutex); + return ret; } EXPORT_SYMBOL_GPL(vsock_assign_transport); From 1e7d9df379a04ccd0c2f82f39fbb69d482e864cc Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 3 Jul 2025 17:18:20 +0200 Subject: [PATCH 131/234] vsock: Fix IOCTL_VM_SOCKETS_GET_LOCAL_CID to check also `transport_local` Support returning VMADDR_CID_LOCAL in case no other vsock transport is available. Fixes: 0e12190578d0 ("vsock: add local transport support in the vsock core") Suggested-by: Stefano Garzarella Reviewed-by: Stefano Garzarella Signed-off-by: Michal Luczaj Link: https://patch.msgid.link/20250703-vsock-transports-toctou-v4-3-98f0eb530747@rbox.co Signed-off-by: Jakub Kicinski --- net/vmw_vsock/af_vsock.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 66404c06bdaa..1053662725f8 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -2581,6 +2581,8 @@ static long vsock_dev_do_ioctl(struct file *filp, cid = vsock_registered_transport_cid(&transport_g2h); if (cid == VMADDR_CID_ANY) cid = vsock_registered_transport_cid(&transport_h2g); + if (cid == VMADDR_CID_ANY) + cid = vsock_registered_transport_cid(&transport_local); if (put_user(cid, p) != 0) retval = -EFAULT; From 3ef07434c7dbfba302df477bb6c70e082965f232 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 5 Jul 2025 10:34:32 +0200 Subject: [PATCH 132/234] net: airoha: Fix an error handling path in airoha_probe() If an error occurs after a successful airoha_hw_init() call, airoha_ppe_deinit() needs to be called as already done in the remove function. Fixes: 00a7678310fe ("net: airoha: Introduce flowtable offload support") Signed-off-by: Christophe JAILLET Reviewed-by: Simon Horman Acked-by: Lorenzo Bianconi Link: https://patch.msgid.link/1c940851b4fa3c3ed2a142910c821493a136f121.1746715755.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/airoha/airoha_eth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 06dea3a13e77..9057180051df 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -2984,6 +2984,7 @@ static int airoha_probe(struct platform_device *pdev) error_napi_stop: for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_qdma_stop_napi(ð->qdma[i]); + airoha_ppe_deinit(eth); error_hw_cleanup: for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_hw_cleanup(ð->qdma[i]); From 0c2b53997e8f5e2ec9e0fbd17ac0436466b65488 Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher Date: Wed, 2 Jul 2025 09:18:05 +0200 Subject: [PATCH 133/234] smb: server: make use of rdma_destroy_qp() The qp is created by rdma_create_qp() as t->cm_id->qp and t->qp is just a shortcut. rdma_destroy_qp() also calls ib_destroy_qp(cm_id->qp) internally, but it is protected by a mutex, clears the cm_id and also calls trace_cm_qp_destroy(). This should make the tracing more useful as both rdma_create_qp() and rdma_destroy_qp() are traces and it makes the code look more sane as functions from the same layer are used for the specific qp object. trace-cmd stream -e rdma_cma:cm_qp_create -e rdma_cma:cm_qp_destroy shows this now while doing a mount and unmount from a client: <...>-80 [002] 378.514182: cm_qp_create: cm.id=1 src=172.31.9.167:5445 dst=172.31.9.166:37113 tos=0 pd.id=0 qp_type=RC send_wr=867 recv_wr=255 qp_num=1 rc=0 <...>-6283 [001] 381.686172: cm_qp_destroy: cm.id=1 src=172.31.9.167:5445 dst=172.31.9.166:37113 tos=0 qp_num=1 Before we only saw the first line. Cc: Namjae Jeon Cc: Steve French Cc: Sergey Senozhatsky Cc: Hyunchul Lee Cc: Tom Talpey Cc: linux-cifs@vger.kernel.org Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers") Signed-off-by: Stefan Metzmacher Reviewed-by: Tom Talpey Acked-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/transport_rdma.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 64a428a06ace..c6cbe0d56e32 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -433,7 +433,8 @@ static void free_transport(struct smb_direct_transport *t) if (t->qp) { ib_drain_qp(t->qp); ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); - ib_destroy_qp(t->qp); + t->qp = NULL; + rdma_destroy_qp(t->cm_id); } ksmbd_debug(RDMA, "drain the reassembly queue\n"); @@ -1940,8 +1941,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t, return 0; err: if (t->qp) { - ib_destroy_qp(t->qp); t->qp = NULL; + rdma_destroy_qp(t->cm_id); } if (t->recv_cq) { ib_destroy_cq(t->recv_cq); From 277627b431a0a6401635c416a21b2a0f77a77347 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 6 Jul 2025 02:26:45 +0100 Subject: [PATCH 134/234] ksmbd: fix a mount write count leak in ksmbd_vfs_kern_path_locked() If the call of ksmbd_vfs_lock_parent() fails, we drop the parent_path references and return an error. We need to drop the write access we just got on parent_path->mnt before we drop the mount reference - callers assume that ksmbd_vfs_kern_path_locked() returns with mount write access grabbed if and only if it has returned 0. Fixes: 864fb5d37163 ("ksmbd: fix possible deadlock in smb2_open") Signed-off-by: Al Viro Acked-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/vfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 0f3aad12e495..d3437f6644e3 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -1282,6 +1282,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry); if (err) { + mnt_drop_write(parent_path->mnt); path_put(path); path_put(parent_path); } From 50f930db22365738d9387c974416f38a06e8057e Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Tue, 8 Jul 2025 07:47:40 +0900 Subject: [PATCH 135/234] ksmbd: fix potential use-after-free in oplock/lease break ack If ksmbd_iov_pin_rsp return error, use-after-free can happen by accessing opinfo->state and opinfo_put and ksmbd_fd_put could called twice. Reported-by: Ziyan Xu Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index fafa86273f12..63d17cea2e95 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -8573,11 +8573,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) goto err_out; } - opinfo->op_state = OPLOCK_STATE_NONE; - wake_up_interruptible_all(&opinfo->oplock_q); - opinfo_put(opinfo); - ksmbd_fd_put(work, fp); - rsp->StructureSize = cpu_to_le16(24); rsp->OplockLevel = rsp_oplevel; rsp->Reserved = 0; @@ -8585,16 +8580,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work) rsp->VolatileFid = volatile_id; rsp->PersistentFid = persistent_id; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break)); - if (!ret) - return; - + if (ret) { err_out: + smb2_set_err_rsp(work); + } + opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); - opinfo_put(opinfo); ksmbd_fd_put(work, fp); - smb2_set_err_rsp(work); } static int check_lease_state(struct lease *lease, __le32 req_state) @@ -8724,11 +8718,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) } lease_state = lease->state; - opinfo->op_state = OPLOCK_STATE_NONE; - wake_up_interruptible_all(&opinfo->oplock_q); - atomic_dec(&opinfo->breaking_cnt); - wake_up_interruptible_all(&opinfo->oplock_brk); - opinfo_put(opinfo); rsp->StructureSize = cpu_to_le16(36); rsp->Reserved = 0; @@ -8737,16 +8726,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work) rsp->LeaseState = lease_state; rsp->LeaseDuration = 0; ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack)); - if (!ret) - return; - + if (ret) { err_out: + smb2_set_err_rsp(work); + } + + opinfo->op_state = OPLOCK_STATE_NONE; wake_up_interruptible_all(&opinfo->oplock_q); atomic_dec(&opinfo->breaking_cnt); wake_up_interruptible_all(&opinfo->oplock_brk); - opinfo_put(opinfo); - smb2_set_err_rsp(work); } /** From fc582cd26e888b0652bc1494f252329453fd3b23 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Jul 2025 11:00:32 -0600 Subject: [PATCH 136/234] io_uring/msg_ring: ensure io_kiocb freeing is deferred for RCU syzbot reports that defer/local task_work adding via msg_ring can hit a request that has been freed: CPU: 1 UID: 0 PID: 19356 Comm: iou-wrk-19354 Not tainted 6.16.0-rc4-syzkaller-00108-g17bbde2e1716 #0 PREEMPT(full) Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025 Call Trace: dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120 print_address_description mm/kasan/report.c:408 [inline] print_report+0xd2/0x2b0 mm/kasan/report.c:521 kasan_report+0x118/0x150 mm/kasan/report.c:634 io_req_local_work_add io_uring/io_uring.c:1184 [inline] __io_req_task_work_add+0x589/0x950 io_uring/io_uring.c:1252 io_msg_remote_post io_uring/msg_ring.c:103 [inline] io_msg_data_remote io_uring/msg_ring.c:133 [inline] __io_msg_ring_data+0x820/0xaa0 io_uring/msg_ring.c:151 io_msg_ring_data io_uring/msg_ring.c:173 [inline] io_msg_ring+0x134/0xa00 io_uring/msg_ring.c:314 __io_issue_sqe+0x17e/0x4b0 io_uring/io_uring.c:1739 io_issue_sqe+0x165/0xfd0 io_uring/io_uring.c:1762 io_wq_submit_work+0x6e9/0xb90 io_uring/io_uring.c:1874 io_worker_handle_work+0x7cd/0x1180 io_uring/io-wq.c:642 io_wq_worker+0x42f/0xeb0 io_uring/io-wq.c:696 ret_from_fork+0x3fc/0x770 arch/x86/kernel/process.c:148 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:245 which is supposed to be safe with how requests are allocated. But msg ring requests alloc and free on their own, and hence must defer freeing to a sane time. Add an rcu_head and use kfree_rcu() in both spots where requests are freed. Only the one in io_msg_tw_complete() is strictly required as it has been visible on the other ring, but use it consistently in the other spot as well. This should not cause any other issues outside of KASAN rightfully complaining about it. Link: https://lore.kernel.org/io-uring/686cd2ea.a00a0220.338033.0007.GAE@google.com/ Reported-by: syzbot+54cbbfb4db9145d26fc2@syzkaller.appspotmail.com Cc: stable@vger.kernel.org Fixes: 0617bb500bfa ("io_uring/msg_ring: improve handling of target CQE posting") Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 2 ++ io_uring/msg_ring.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 2922635986f5..a7efcec2e3d0 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -698,6 +698,8 @@ struct io_kiocb { struct hlist_node hash_node; /* For IOPOLL setup queues, with hybrid polling */ u64 iopoll_start; + /* for private io_kiocb freeing */ + struct rcu_head rcu_head; }; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 71400d6cefc8..4c2578f2efcb 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -82,7 +82,7 @@ static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw) spin_unlock(&ctx->msg_lock); } if (req) - kmem_cache_free(req_cachep, req); + kfree_rcu(req, rcu_head); percpu_ref_put(&ctx->refs); } @@ -90,7 +90,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, int res, u32 cflags, u64 user_data) { if (!READ_ONCE(ctx->submitter_task)) { - kmem_cache_free(req_cachep, req); + kfree_rcu(req, rcu_head); return -EOWNERDEAD; } req->opcode = IORING_OP_NOP; From 9dff55ebaef7e94e5dedb6be28a1cafff65cc467 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Jul 2025 11:04:45 -0600 Subject: [PATCH 137/234] Revert "io_uring: gate REQ_F_ISREG on !S_ANON_INODE as well" This reverts commit 6f11adcc6f36ffd8f33dbdf5f5ce073368975bc3. The problematic commit was fixed in mainline, so the work-around in io_uring can be removed at this point. Anonymous inodes no longer pretend to be regular files after: 1e7ab6f67824 ("anon_inode: rework assertions") Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 73648d26a622..5111ec040c53 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1666,12 +1666,11 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) io_req_flags_t io_file_get_flags(struct file *file) { - struct inode *inode = file_inode(file); io_req_flags_t res = 0; BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1); - if (S_ISREG(inode->i_mode) && !(inode->i_flags & S_ANON_INODE)) + if (S_ISREG(file_inode(file)->i_mode)) res |= REQ_F_ISREG; if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) res |= REQ_F_SUPPORT_NOWAIT; From ca3881f6fd8e9b6eb2d51e8718d07d3b8029d886 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Wed, 18 Jun 2025 14:26:26 +0200 Subject: [PATCH 138/234] module: Fix memory deallocation on error path in move_module() The function move_module() uses the variable t to track how many memory types it has allocated and consequently how many should be freed if an error occurs. The variable is initially set to 0 and is updated when a call to module_memory_alloc() fails. However, move_module() can fail for other reasons as well, in which case t remains set to 0 and no memory is freed. Fix the problem by initializing t to MOD_MEM_NUM_TYPES. Additionally, make the deallocation loop more robust by not relying on the mod_mem_type_t enum having a signed integer as its underlying type. Fixes: c7ee8aebf6c0 ("module: add stop-grap sanity check on module memcpy()") Signed-off-by: Petr Pavlu Reviewed-by: Sami Tolvanen Reviewed-by: Daniel Gomez Link: https://lore.kernel.org/r/20250618122730.51324-2-petr.pavlu@suse.com Signed-off-by: Daniel Gomez Message-ID: <20250618122730.51324-2-petr.pavlu@suse.com> --- kernel/module/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 413ac6ea3702..9ac994b2f354 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2697,7 +2697,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) static int move_module(struct module *mod, struct load_info *info) { int i; - enum mod_mem_type t = 0; + enum mod_mem_type t = MOD_MEM_NUM_TYPES; int ret = -ENOMEM; bool codetag_section_found = false; @@ -2776,7 +2776,7 @@ static int move_module(struct module *mod, struct load_info *info) return 0; out_err: module_memory_restore_rox(mod); - for (t--; t >= 0; t--) + while (t--) module_memory_free(mod, t); if (codetag_section_found) codetag_free_module_sections(mod); From eb0994a954978f0edd3efb38d0cbe6744df8b83d Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Wed, 18 Jun 2025 14:26:27 +0200 Subject: [PATCH 139/234] module: Avoid unnecessary return value initialization in move_module() All error conditions in move_module() set the return value by updating the ret variable. Therefore, it is not necessary to the initialize the variable when declaring it. Remove the unnecessary initialization. Signed-off-by: Petr Pavlu Reviewed-by: Sami Tolvanen Reviewed-by: Daniel Gomez Link: https://lore.kernel.org/r/20250618122730.51324-3-petr.pavlu@suse.com Signed-off-by: Daniel Gomez Message-ID: <20250618122730.51324-3-petr.pavlu@suse.com> --- kernel/module/main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 9ac994b2f354..7822b91fca6b 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2696,9 +2696,8 @@ static int find_module_sections(struct module *mod, struct load_info *info) static int move_module(struct module *mod, struct load_info *info) { - int i; + int i, ret; enum mod_mem_type t = MOD_MEM_NUM_TYPES; - int ret = -ENOMEM; bool codetag_section_found = false; for_each_mod_mem_type(type) { From 570db4b39f535a8bb722adb8be0280d09e34ca99 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 10 Jun 2025 18:33:28 +0200 Subject: [PATCH 140/234] module: Make sure relocations are applied to the per-CPU section The per-CPU data section is handled differently than the other sections. The memory allocations requires a special __percpu pointer and then the section is copied into the view of each CPU. Therefore the SHF_ALLOC flag is removed to ensure move_module() skips it. Later, relocations are applied and apply_relocations() skips sections without SHF_ALLOC because they have not been copied. This also skips the per-CPU data section. The missing relocations result in a NULL pointer on x86-64 and very small values on x86-32. This results in a crash because it is not skipped like NULL pointer would and can't be dereferenced. Such an assignment happens during static per-CPU lock initialisation with lockdep enabled. Allow relocation processing for the per-CPU section even if SHF_ALLOC is missing. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202506041623.e45e4f7d-lkp@intel.com Fixes: 1a6100caae425 ("Don't relocate non-allocated regions in modules.") #v2.6.1-rc3 Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Petr Pavlu Link: https://lore.kernel.org/r/20250610163328.URcsSUC1@linutronix.de Signed-off-by: Daniel Gomez Message-ID: <20250610163328.URcsSUC1@linutronix.de> --- kernel/module/main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 7822b91fca6b..c2c08007029d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1573,8 +1573,14 @@ static int apply_relocations(struct module *mod, const struct load_info *info) if (infosec >= info->hdr->e_shnum) continue; - /* Don't bother with non-allocated sections */ - if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) + /* + * Don't bother with non-allocated sections. + * An exception is the percpu section, which has separate allocations + * for individual CPUs. We relocate the percpu section in the initial + * ELF template and subsequently copy it to the per-CPU destinations. + */ + if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC) && + (!infosec || infosec != info->index.pcpu)) continue; if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) From af1ccf546e5f2915fbbde26841db43a971d81cf3 Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Fri, 4 Jul 2025 21:39:59 +0200 Subject: [PATCH 141/234] MAINTAINERS: update Daniel Gomez's role and email address Update Daniel Gomez's modules reviewer role to maintainer. This is according to the plan [1][2][3] of scaling with more reviewers for modules (for the incoming Rust support [4]) and rotate [5] every 6 months. Acked-by: Luis Chamberlain Link: https://lore.kernel.org/linux-modules/ZsPANzx4-5DrOl5m@bombadil.infradead.org [1] Link: https://lore.kernel.org/linux-modules/20240821174021.2371547-1-mcgrof@kernel.org [2] Link: https://lore.kernel.org/linux-modules/458901be-1da8-4987-9c72-5aa3da6db15e@suse.com [3] Link: https://lore.kernel.org/linux-modules/20250702-module-params-v3-v14-0-5b1cc32311af@kernel.org [4] Link: https://lore.kernel.org/linux-modules/Z3gDAnPlA3SZEbgl@bombadil.infradead.org [5] Acked-by: Petr Pavlu Signed-off-by: Daniel Gomez --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index fad6cb025a19..aa9b3677276c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16822,8 +16822,8 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h MODULE SUPPORT M: Luis Chamberlain M: Petr Pavlu +M: Daniel Gomez R: Sami Tolvanen -R: Daniel Gomez L: linux-modules@vger.kernel.org L: linux-kernel@vger.kernel.org S: Maintained From fec5e6f97dae5fbd628c444148b77728eae3bb93 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 8 Jul 2025 15:21:39 -0400 Subject: [PATCH 142/234] bcachefs: Don't set BCH_FS_error on transaction restart This started showing up more when we started logging the error being corrected in the journal - but __bch2_fsck_err() could return transaction restarts before that. Setting BCH_FS_error incorrectly causes recovery passes to not be cleared, among other issues. Fixes: b43f72492768 ("bcachefs: Log fsck errors in the journal") Signed-off-by: Kent Overstreet --- fs/bcachefs/error.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index ea37f5af1800..267e73d9d7e6 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -633,7 +633,9 @@ int __bch2_fsck_err(struct bch_fs *c, * log_fsck_err()s: that would require us to track for every error type * which recovery pass corrects it, to get the fsck exit status correct: */ - if (bch2_err_matches(ret, BCH_ERR_fsck_fix)) { + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { + /* nothing */ + } else if (bch2_err_matches(ret, BCH_ERR_fsck_fix)) { set_bit(BCH_FS_errors_fixed, &c->flags); } else { set_bit(BCH_FS_errors_not_fixed, &c->flags); From 5b937a1ed64ebeba8876e398110a5790ad77407c Mon Sep 17 00:00:00 2001 From: Mikhail Paulyshka Date: Sat, 24 May 2025 17:53:19 +0300 Subject: [PATCH 143/234] x86/rdrand: Disable RDSEED on AMD Cyan Skillfish AMD Cyan Skillfish (Family 17h, Model 47h, Stepping 0h) has an error that causes RDSEED to always return 0xffffffff, while RDRAND works correctly. Mask the RDSEED cap for this CPU so that both /proc/cpuinfo and direct CPUID read report RDSEED as unavailable. [ bp: Move to amd.c, massage. ] Signed-off-by: Mikhail Paulyshka Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/20250524145319.209075-1-me@mixaill.net --- arch/x86/include/asm/msr-index.h | 1 + arch/x86/kernel/cpu/amd.c | 7 +++++++ tools/arch/x86/include/asm/msr-index.h | 1 + 3 files changed, 9 insertions(+) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b7dded3c8113..5cfb5d74dd5f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -628,6 +628,7 @@ #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD_PPIN_CTL 0xc00102f0 #define MSR_AMD_PPIN 0xc00102f1 +#define MSR_AMD64_CPUID_FN_7 0xc0011002 #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 655f44f89ded..e1c4661f4430 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -930,6 +930,13 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); + + /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ + if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { + clear_cpu_cap(c, X86_FEATURE_RDSEED); + msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); + pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); + } } static void init_amd_zen3(struct cpuinfo_x86 *c) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index b7dded3c8113..5cfb5d74dd5f 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -628,6 +628,7 @@ #define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD_PPIN_CTL 0xc00102f0 #define MSR_AMD_PPIN 0xc00102f1 +#define MSR_AMD64_CPUID_FN_7 0xc0011002 #define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 From a74bb5f202dabddfea96abc1328fcedae8aa140a Mon Sep 17 00:00:00 2001 From: Mikhail Paulyshka Date: Tue, 8 Jul 2025 16:39:10 +0200 Subject: [PATCH 144/234] x86/CPU/AMD: Disable INVLPGB on Zen2 AMD Cyan Skillfish (Family 17h, Model 47h, Stepping 0h) has an issue that causes system oopses and panics when performing TLB flush using INVLPGB. However, the problem is that that machine has misconfigured CPUID and should not report the INVLPGB bit in the first place. So zap the kernel's representation of the flag so that nothing gets confused. [ bp: Massage. ] Fixes: 767ae437a32d ("x86/mm: Add INVLPGB feature and Kconfig entry") Signed-off-by: Mikhail Paulyshka Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/1ebe845b-322b-4929-9093-b41074e9e939@mixaill.net --- arch/x86/kernel/cpu/amd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e1c4661f4430..1b1ff60e9202 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -937,6 +937,9 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); } + + /* Correct misconfigured CPUID on some clients. */ + clear_cpu_cap(c, X86_FEATURE_INVLPGB); } static void init_amd_zen3(struct cpuinfo_x86 *c) From 31ec70afaaad11fb08970bd1b0dc9ebae3501e16 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 7 Jul 2025 11:24:33 +0100 Subject: [PATCH 145/234] rxrpc: Fix over large frame size warning Under some circumstances, the compiler will emit the following warning for rxrpc_send_response(): net/rxrpc/output.c: In function 'rxrpc_send_response': net/rxrpc/output.c:974:1: warning: the frame size of 1160 bytes is larger than 1024 bytes This occurs because the local variables include a 16-element scatterlist array and a 16-element bio_vec array. It's probably not actually a problem as this function is only called by the rxrpc I/O thread function in a kernel thread and there won't be much on the stack before it. Fix this by overlaying the bio_vec array over the kvec array in the rxrpc_local struct. There is one of these per I/O thread and the kvec array is intended for pointing at bits of a packet to be transmitted, typically a DATA or an ACK packet. As packets for a local endpoint are only transmitted by its specific I/O thread, there can be no race, and so overlaying this bit of memory should be no problem. Fixes: 5800b1cf3fd8 ("rxrpc: Allow CHALLENGEs to the passed to the app for a RESPONSE") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202506240423.E942yKJP-lkp@intel.com/ Signed-off-by: David Howells cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250707102435.2381045-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/ar-internal.h | 15 +++++++++------ net/rxrpc/output.c | 5 ++++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 5bd3922c310d..376e33dce8c1 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -361,12 +361,15 @@ struct rxrpc_local { struct list_head new_client_calls; /* Newly created client calls need connection */ spinlock_t client_call_lock; /* Lock for ->new_client_calls */ struct sockaddr_rxrpc srx; /* local address */ - /* Provide a kvec table sufficiently large to manage either a DATA - * packet with a maximum set of jumbo subpackets or a PING ACK padded - * out to 64K with zeropages for PMTUD. - */ - struct kvec kvec[1 + RXRPC_MAX_NR_JUMBO > 3 + 16 ? - 1 + RXRPC_MAX_NR_JUMBO : 3 + 16]; + union { + /* Provide a kvec table sufficiently large to manage either a + * DATA packet with a maximum set of jumbo subpackets or a PING + * ACK padded out to 64K with zeropages for PMTUD. + */ + struct kvec kvec[1 + RXRPC_MAX_NR_JUMBO > 3 + 16 ? + 1 + RXRPC_MAX_NR_JUMBO : 3 + 16]; + struct bio_vec bvec[3 + 16]; + }; }; /* diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 0af19bcdc80a..ef7b3096c95e 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -924,7 +924,7 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response { struct rxrpc_skb_priv *sp = rxrpc_skb(response); struct scatterlist sg[16]; - struct bio_vec bvec[16]; + struct bio_vec *bvec = conn->local->bvec; struct msghdr msg; size_t len = sp->resp.len; __be32 wserial; @@ -938,6 +938,9 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response if (ret < 0) goto fail; nr_sg = ret; + ret = -EIO; + if (WARN_ON_ONCE(nr_sg > ARRAY_SIZE(conn->local->bvec))) + goto fail; for (int i = 0; i < nr_sg; i++) bvec_set_page(&bvec[i], sg_page(&sg[i]), sg[i].length, sg[i].offset); From 78b7920a03351a8402de2f81914c1d2e2bdf24b7 Mon Sep 17 00:00:00 2001 From: EricChan Date: Thu, 3 Jul 2025 10:04:49 +0800 Subject: [PATCH 146/234] net: stmmac: Fix interrupt handling for level-triggered mode in DWC_XGMAC2 According to the Synopsys Controller IP XGMAC-10G Ethernet MAC Databook v3.30a (section 2.7.2), when the INTM bit in the DMA_Mode register is set to 2, the sbd_perch_tx_intr_o[] and sbd_perch_rx_intr_o[] signals operate in level-triggered mode. However, in this configuration, the DMA does not assert the XGMAC_NIS status bit for Rx or Tx interrupt events. This creates a functional regression where the condition if (likely(intr_status & XGMAC_NIS)) in dwxgmac2_dma_interrupt() will never evaluate to true, preventing proper interrupt handling for level-triggered mode. The hardware specification explicitly states that "The DMA does not assert the NIS status bit for the Rx or Tx interrupt events" (Synopsys DWC_XGMAC2 Databook v3.30a, sec. 2.7.2). The fix ensures correct handling of both edge and level-triggered interrupts while maintaining backward compatibility with existing configurations. It has been tested on the hardware device (not publicly available), and it can properly trigger the RX and TX interrupt handling in both the INTM=0 and INTM=2 configurations. Fixes: d6ddfacd95c7 ("net: stmmac: Add DMA related callbacks for XGMAC2") Tested-by: EricChan Signed-off-by: EricChan Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250703020449.105730-1-chenchuangyu@xiaomi.com Signed-off-by: Jakub Kicinski --- .../ethernet/stmicro/stmmac/dwxgmac2_dma.c | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 7840bc403788..5dcc95bc0ad2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & XGMAC_NIS)) { - if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_rx; - } - if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_tx; - } + if (likely(intr_status & XGMAC_RI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; } /* Clear interrupts */ From a141af8eb2272ab0f677a7f2653874840bc9b214 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Thu, 3 Jul 2025 13:49:39 +0200 Subject: [PATCH 147/234] net: phy: smsc: Fix Auto-MDIX configuration when disabled by strap Correct the Auto-MDIX configuration to ensure userspace settings are respected when the feature is disabled by the AUTOMDIX_EN hardware strap. The LAN9500 PHY allows its default MDI-X mode to be configured via a hardware strap. If this strap sets the default to "MDI-X off", the driver was previously unable to enable Auto-MDIX from userspace. When handling the ETH_TP_MDI_AUTO case, the driver would set the SPECIAL_CTRL_STS_AMDIX_ENABLE_ bit but neglected to set the required SPECIAL_CTRL_STS_OVRRD_AMDIX_ bit. Without the override flag, the PHY falls back to its hardware strap default, ignoring the software request. This patch corrects the behavior by also setting the override bit when enabling Auto-MDIX. This ensures that the userspace configuration takes precedence over the hardware strap, allowing Auto-MDIX to be enabled correctly in all scenarios. Fixes: 05b35e7eb9a1 ("smsc95xx: add phylib support") Signed-off-by: Oleksij Rempel Cc: Andre Edich Reviewed-by: Maxime Chevallier Link: https://patch.msgid.link/20250703114941.3243890-2-o.rempel@pengutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/smsc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 31463b9e5697..adf12d7108b5 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -167,7 +167,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev) SPECIAL_CTRL_STS_AMDIX_STATE_; break; case ETH_TP_MDI_AUTO: - val = SPECIAL_CTRL_STS_AMDIX_ENABLE_; + val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_; break; default: return genphy_config_aneg(phydev); From 0713e55533c88a20edb53eea6517dc56786a0078 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Thu, 3 Jul 2025 13:49:40 +0200 Subject: [PATCH 148/234] net: phy: smsc: Force predictable MDI-X state on LAN87xx Override the hardware strap configuration for MDI-X mode to ensure a predictable initial state for the driver. The initial mode of the LAN87xx PHY is determined by the AUTOMDIX_EN strap pin, but the driver has no documented way to read its latched status. This unpredictability means the driver cannot know if the PHY has initialized with Auto-MDIX enabled or disabled, preventing it from providing a reliable interface to the user. This patch introduces a `config_init` hook that forces the PHY into a known state by explicitly enabling Auto-MDIX. Fixes: 05b35e7eb9a1 ("smsc95xx: add phylib support") Signed-off-by: Oleksij Rempel Cc: Andre Edich Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20250703114941.3243890-3-o.rempel@pengutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/smsc.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index adf12d7108b5..ad9a3d91bb8a 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -262,6 +262,33 @@ int lan87xx_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(lan87xx_read_status); +static int lan87xx_phy_config_init(struct phy_device *phydev) +{ + int rc; + + /* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN + * hardware strap, but the driver cannot read the strap's status. This + * creates an unpredictable initial state. + * + * To ensure consistent and reliable behavior across all boards, + * override the strap configuration on initialization and force the PHY + * into a known state with Auto-MDIX enabled, which is the expected + * default for modern hardware. + */ + rc = phy_modify(phydev, SPECIAL_CTRL_STS, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_ | + SPECIAL_CTRL_STS_AMDIX_STATE_, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_); + if (rc < 0) + return rc; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + return smsc_phy_config_init(phydev); +} + static int lan874x_phy_config_init(struct phy_device *phydev) { u16 val; @@ -696,7 +723,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .read_status = lan87xx_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan87xx_phy_config_init, .soft_reset = smsc_phy_reset, .config_aneg = lan87xx_config_aneg, From 9dfe110cc0f6ef42af8e81ce52aef34a647d0b8a Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Thu, 3 Jul 2025 13:49:41 +0200 Subject: [PATCH 149/234] net: phy: smsc: Fix link failure in forced mode with Auto-MDIX Force a fixed MDI-X mode when auto-negotiation is disabled to prevent link instability. When forcing the link speed and duplex on a LAN9500 PHY (e.g., with `ethtool -s eth0 autoneg off ...`) while leaving MDI-X control in auto mode, the PHY fails to establish a stable link. This occurs because the PHY's Auto-MDIX algorithm is not designed to operate when auto-negotiation is disabled. In this state, the PHY continuously toggles the TX/RX signal pairs, which prevents the link partner from synchronizing. This patch resolves the issue by detecting when auto-negotiation is disabled. If the MDI-X control mode is set to 'auto', the driver now forces a specific, stable mode (ETH_TP_MDI) to prevent the pair toggling. This choice of a fixed MDI mode mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN strap were configured for a fixed MDI connection. Fixes: 05b35e7eb9a1 ("smsc95xx: add phylib support") Signed-off-by: Oleksij Rempel Cc: Andre Edich Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20250703114941.3243890-4-o.rempel@pengutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/smsc.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index ad9a3d91bb8a..b6489da5cfcd 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev) static int lan87xx_config_aneg(struct phy_device *phydev) { - int rc; + u8 mdix_ctrl; int val; + int rc; - switch (phydev->mdix_ctrl) { + /* When auto-negotiation is disabled (forced mode), the PHY's + * Auto-MDIX will continue toggling the TX/RX pairs. + * + * To establish a stable link, we must select a fixed MDI mode. + * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is + * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode + * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN + * strap were configured for a fixed MDI connection. + */ + if (phydev->autoneg == AUTONEG_DISABLE) { + if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO) + mdix_ctrl = ETH_TP_MDI; + else + mdix_ctrl = phydev->mdix_ctrl; + } else { + mdix_ctrl = phydev->mdix_ctrl; + } + + switch (mdix_ctrl) { case ETH_TP_MDI: val = SPECIAL_CTRL_STS_OVRRD_AMDIX_; break; @@ -184,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev) rc |= val; phy_write(phydev, SPECIAL_CTRL_STS, rc); - phydev->mdix = phydev->mdix_ctrl; + phydev->mdix = mdix_ctrl; return genphy_config_aneg(phydev); } From d88dfb756d557e40e88406e8c962c0684bc9eb87 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 2 Jul 2025 07:02:15 +0200 Subject: [PATCH 150/234] agp/amd64: Check AGP Capability before binding to unsupported devices Since commit 172efbb40333 ("AGP: Try unsupported AGP chipsets on x86-64 by default"), the AGP driver for AMD Opteron/Athlon64 CPUs has attempted to bind to any PCI device possessing an AGP Capability. Commit 6fd024893911 ("amd64-agp: Probe unknown AGP devices the right way") subsequently reworked the driver to perform a bind attempt to any PCI device (regardless of AGP Capability) and reject a device in the driver's ->probe() hook if it lacks the AGP Capability. On modern CPUs exposing an AMD IOMMU, this subtle change results in an annoying message with KERN_CRIT severity: pci 0000:00:00.2: Resources present before probing The message is emitted by the driver core prior to invoking a driver's ->probe() hook. The check for an AGP Capability in the ->probe() hook happens too late to prevent the message. The message has appeared only recently with commit 3be5fa236649 (Revert "iommu/amd: Prevent binding other PCI drivers to IOMMU PCI devices"). Prior to the commit, no driver could bind to AMD IOMMUs. The reason for the message is that an MSI is requested early on for the AMD IOMMU, which results in a call from msi_sysfs_create_group() to devm_device_add_group(). A devres resource is thus attached to the driver-less AMD IOMMU, which is normally not allowed, but presumably cannot be avoided because requesting the MSI from a regular PCI driver might be too late. Avoid the message by once again checking for an AGP Capability *before* binding to an unsupported device. Achieve that by way of the PCI core's dynid functionality. pci_add_dynid() can fail only with -ENOMEM (on allocation failure) or -EINVAL (on bus_to_subsys() failure). It doesn't seem worth the extra code to propagate those error codes out of the for_each_pci_dev() loop, so simply error out with -ENODEV if there was no successful bind attempt. In the -ENOMEM case, a splat is emitted anyway, and the -EINVAL case can never happen because it requires failure of bus_register(&pci_bus_type), in which case there's no driver probing of PCI devices. Hans has voiced a preference to no longer probe unsupported devices by default (i.e. set agp_try_unsupported = 0). In fact, the help text for CONFIG_AGP_AMD64 pretends this to be the default. Alternatively, he proposes probing only devices with PCI_CLASS_BRIDGE_HOST. However these approaches risk regressing users who depend on the existing behavior. Fixes: 3be5fa236649 (Revert "iommu/amd: Prevent binding other PCI drivers to IOMMU PCI devices") Reported-by: Fedor Pchelkin Closes: https://lore.kernel.org/r/wpoivftgshz5b5aovxbkxl6ivvquinukqfvb5z6yi4mv7d25ew@edtzr2p74ckg/ Reported-by: Hans de Goede Closes: https://lore.kernel.org/r/20250625112411.4123-1-hansg@kernel.org/ Tested-by: Hans de Goede Signed-off-by: Lukas Wunner Reviewed-by: Hans de Goede Link: https://lore.kernel.org/r/b29e7fbfc6d146f947603d0ebaef44cbd2f0d754.1751468802.git.lukas@wunner.de --- drivers/char/agp/amd64-agp.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index bf490967241a..2505df1f4e69 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = { MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); -static const struct pci_device_id agp_amd64_pci_promisc_table[] = { - { PCI_DEVICE_CLASS(0, 0) }, - { } -}; - static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume); static struct pci_driver agp_amd64_pci_driver = { @@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = { /* Not static due to IOMMU code calling it early. */ int __init agp_amd64_init(void) { + struct pci_dev *pdev = NULL; int err = 0; if (agp_off) @@ -767,9 +763,13 @@ int __init agp_amd64_init(void) } /* Look for any AGP bridge */ - agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; - err = driver_attach(&agp_amd64_pci_driver.driver); - if (err == 0 && agp_bridges_found == 0) { + for_each_pci_dev(pdev) + if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) + pci_add_dynid(&agp_amd64_pci_driver, + pdev->vendor, pdev->device, + pdev->subsystem_vendor, + pdev->subsystem_device, 0, 0, 0); + if (agp_bridges_found == 0) { pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; } From a066917360ed5000c4f73fb190773cfac004c885 Mon Sep 17 00:00:00 2001 From: Pagadala Yesu Anjaneyulu Date: Mon, 9 Jun 2025 21:35:14 +0300 Subject: [PATCH 151/234] wifi: mac80211: Fix uninitialized variable with __free() in ieee80211_ml_epcs() The cleanup attribute runs kfree() when the variable goes out of scope. There is a possibility that the link_elems variable is uninitialized if the loop ends before an assignment is made to this variable. This leads to uninitialized variable bug. Fix this by assigning link_elems to NULL. Signed-off-by: Pagadala Yesu Anjaneyulu Reviewed-by: Ilan Peer Signed-off-by: Miri Korenblit Link: https://patch.msgid.link/20250609213231.eeacd3738a7b.I0f876fa1359daeec47ab3aef098255a9c23efd70@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d26dcee5683a..0ed68182f79b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -10705,8 +10705,8 @@ static void ieee80211_ml_epcs(struct ieee80211_sub_if_data *sdata, */ for_each_mle_subelement(sub, (const u8 *)elems->ml_epcs, elems->ml_epcs_len) { + struct ieee802_11_elems *link_elems __free(kfree) = NULL; struct ieee80211_link_data *link; - struct ieee802_11_elems *link_elems __free(kfree); u8 *pos = (void *)sub->data; u16 control; ssize_t len; From 3da6bb419750f3ad834786d6ba7c9d5d062c770b Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 9 Jul 2025 20:27:52 +0900 Subject: [PATCH 152/234] perf/core: Fix WARN in perf_sigtrap() Since exit_task_work() runs after perf_event_exit_task_context() updated ctx->task to TASK_TOMBSTONE, perf_sigtrap() from perf_pending_task() might observe event->ctx->task == TASK_TOMBSTONE. Swap the early exit tests in order not to hit WARN_ON_ONCE(). Closes: https://syzkaller.appspot.com/bug?extid=2fe61cb2a86066be6985 Reported-by: syzbot Signed-off-by: Tetsuo Handa Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/b1c224bd-97f9-462c-a3e3-125d5e19c983@I-love.SAKURA.ne.jp --- kernel/events/core.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 0db36b2b2448..22fdf0c187cd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7203,6 +7203,13 @@ void perf_event_wakeup(struct perf_event *event) static void perf_sigtrap(struct perf_event *event) { + /* + * Both perf_pending_task() and perf_pending_irq() can race with the + * task exiting. + */ + if (current->flags & PF_EXITING) + return; + /* * We'd expect this to only occur if the irq_work is delayed and either * ctx->task or current has changed in the meantime. This can be the @@ -7211,13 +7218,6 @@ static void perf_sigtrap(struct perf_event *event) if (WARN_ON_ONCE(event->ctx->task != current)) return; - /* - * Both perf_pending_task() and perf_pending_irq() can race with the - * task exiting. - */ - if (current->flags & PF_EXITING) - return; - send_sig_perf((void __user *)event->pending_addr, event->orig_type, event->attr.sig_data); } From f6bfc9afc7510cb5e6fbe0a17c507917b0120280 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 7 Jul 2025 15:11:55 +0200 Subject: [PATCH 153/234] drm/framebuffer: Acquire internal references on GEM handles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Acquire GEM handles in drm_framebuffer_init() and release them in the corresponding drm_framebuffer_cleanup(). Ties the handle's lifetime to the framebuffer. Not all GEM buffer objects have GEM handles. If not set, no refcounting takes place. This is the case for some fbdev emulation. This is not a problem as these GEM objects do not use dma-bufs and drivers will not release them while fbdev emulation is running. Framebuffer flags keep a bit per color plane of which the framebuffer holds a GEM handle reference. As all drivers use drm_framebuffer_init(), they will now all hold dma-buf references as fixed in commit 5307dce878d4 ("drm/gem: Acquire references on GEM handles for framebuffers"). In the GEM framebuffer helpers, restore the original ref counting on buffer objects. As the helpers for handle refcounting are now no longer called from outside the DRM core, unexport the symbols. v3: - don't mix internal flags with mode flags (Christian) v2: - track framebuffer handle refs by flag - drop gma500 cleanup (Christian) Signed-off-by: Thomas Zimmermann Fixes: 5307dce878d4 ("drm/gem: Acquire references on GEM handles for framebuffers") Reported-by: Bert Karwatzki Closes: https://lore.kernel.org/dri-devel/20250703115915.3096-1-spasswolf@web.de/ Tested-by: Bert Karwatzki Tested-by: Mario Limonciello Tested-by: Borislav Petkov (AMD) Cc: Thomas Zimmermann Cc: Anusha Srivatsa Cc: Christian König Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Sumit Semwal Cc: "Christian König" Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: Reviewed-by: Christian König Link: https://lore.kernel.org/r/20250707131224.249496-1-tzimmermann@suse.de --- drivers/gpu/drm/drm_framebuffer.c | 31 ++++++++++++++-- drivers/gpu/drm/drm_gem.c | 38 ++++++++++++-------- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 16 ++++----- drivers/gpu/drm/drm_internal.h | 2 +- include/drm/drm_framebuffer.h | 7 ++++ 5 files changed, 68 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index b781601946db..63a70f285cce 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -862,11 +862,23 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free); int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -875,7 +887,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -883,7 +895,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -960,6 +981,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4bf0a76bb35e..aad6ac9748cc 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -223,23 +223,34 @@ static void drm_gem_object_handle_get(struct drm_gem_object *obj) } /** - * drm_gem_object_handle_get_unlocked - acquire reference on user-space handles + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any * @obj: GEM object * - * Acquires a reference on the GEM buffer object's handle. Required - * to keep the GEM object alive. Call drm_gem_object_handle_put_unlocked() - * to release the reference. + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise */ -void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj) +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; guard(mutex)(&dev->object_name_lock); - drm_WARN_ON(dev, !obj->handle_count); /* first ref taken in create-tail helper */ + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + drm_gem_object_handle_get(obj); + + return true; } -EXPORT_SYMBOL(drm_gem_object_handle_get_unlocked); /** * drm_gem_object_handle_free - release resources bound to userspace handles @@ -272,7 +283,7 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } /** - * drm_gem_object_handle_put_unlocked - releases reference on user-space handles + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle * @obj: GEM object * * Releases a reference on the GEM buffer object's handle. Possibly releases @@ -283,14 +294,14 @@ void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -303,7 +314,6 @@ void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) if (final) drm_gem_object_put(obj); } -EXPORT_SYMBOL(drm_gem_object_handle_put_unlocked); /* * Called at device or object close to release the file's diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 14a87788695d..6f72e7a0f427 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -99,7 +99,7 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb) unsigned int i; for (i = 0; i < fb->format->num_planes; i++) - drm_gem_object_handle_put_unlocked(fb->obj[i]); + drm_gem_object_put(fb->obj[i]); drm_framebuffer_cleanup(fb); kfree(fb); @@ -182,10 +182,8 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, if (!objs[i]) { drm_dbg_kms(dev, "Failed to lookup GEM object\n"); ret = -ENOENT; - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; } - drm_gem_object_handle_get_unlocked(objs[i]); - drm_gem_object_put(objs[i]); min_size = (height - 1) * mode_cmd->pitches[i] + drm_format_info_min_pitch(info, i, width) @@ -195,22 +193,22 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum size (%u) for plane %d\n", objs[i]->size, min_size, i); - drm_gem_object_handle_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); ret = -EINVAL; - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; } } ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); if (ret) - goto err_gem_object_handle_put_unlocked; + goto err_gem_object_put; return 0; -err_gem_object_handle_put_unlocked: +err_gem_object_put: while (i > 0) { --i; - drm_gem_object_handle_put_unlocked(objs[i]); + drm_gem_object_put(objs[i]); } return ret; } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index be77d61a16ce..60c282881958 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -161,7 +161,7 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); -void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 668077009fce..38b24fc8978d 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -23,6 +23,7 @@ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ +#include #include #include #include @@ -100,6 +101,8 @@ struct drm_framebuffer_funcs { unsigned num_clips); }; +#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i)) + /** * struct drm_framebuffer - frame buffer object * @@ -188,6 +191,10 @@ struct drm_framebuffer { * DRM_MODE_FB_MODIFIERS. */ int flags; + /** + * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF. + */ + unsigned int internal_flags; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ From bd46cece51a36ef088f22ef0416ac13b0a46d5b0 Mon Sep 17 00:00:00 2001 From: Simona Vetter Date: Mon, 7 Jul 2025 17:18:13 +0200 Subject: [PATCH 154/234] drm/gem: Fix race in drm_gem_handle_create_tail() Object creation is a careful dance where we must guarantee that the object is fully constructed before it is visible to other threads, and GEM buffer objects are no difference. Final publishing happens by calling drm_gem_handle_create(). After that the only allowed thing to do is call drm_gem_object_put() because a concurrent call to the GEM_CLOSE ioctl with a correctly guessed id (which is trivial since we have a linear allocator) can already tear down the object again. Luckily most drivers get this right, the very few exceptions I've pinged the relevant maintainers for. Unfortunately we also need drm_gem_handle_create() when creating additional handles for an already existing object (e.g. GETFB ioctl or the various bo import ioctl), and hence we cannot have a drm_gem_handle_create_and_put() as the only exported function to stop these issues from happening. Now unfortunately the implementation of drm_gem_handle_create() isn't living up to standards: It does correctly finishe object initialization at the global level, and hence is safe against a concurrent tear down. But it also sets up the file-private aspects of the handle, and that part goes wrong: We fully register the object in the drm_file.object_idr before calling drm_vma_node_allow() or obj->funcs->open, which opens up races against concurrent removal of that handle in drm_gem_handle_delete(). Fix this with the usual two-stage approach of first reserving the handle id, and then only registering the object after we've completed the file-private setup. Jacek reported this with a testcase of concurrently calling GEM_CLOSE on a freshly-created object (which also destroys the object), but it should be possible to hit this with just additional handles created through import or GETFB without completed destroying the underlying object with the concurrent GEM_CLOSE ioctl calls. Note that the close-side of this race was fixed in f6cd7daecff5 ("drm: Release driver references to handle before making it available again"), which means a cool 9 years have passed until someone noticed that we need to make this symmetry or there's still gaps left :-/ Without the 2-stage close approach we'd still have a race, therefore that's an integral part of this bugfix. More importantly, this means we can have NULL pointers behind allocated id in our drm_file.object_idr. We need to check for that now: - drm_gem_handle_delete() checks for ERR_OR_NULL already - drm_gem.c:object_lookup() also chekcs for NULL - drm_gem_release() should never be called if there's another thread still existing that could call into an IOCTL that creates a new handle, so cannot race. For paranoia I added a NULL check to drm_gem_object_release_handle() though. - most drivers (etnaviv, i915, msm) are find because they use idr_find(), which maps both ENOENT and NULL to NULL. - drivers using idr_for_each_entry() should also be fine, because idr_get_next does filter out NULL entries and continues the iteration. - The same holds for drm_show_memory_stats(). v2: Use drm_WARN_ON (Thomas) Reported-by: Jacek Lawrynowicz Tested-by: Jacek Lawrynowicz Reviewed-by: Thomas Zimmermann Cc: stable@vger.kernel.org Cc: Jacek Lawrynowicz Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Thomas Zimmermann Cc: David Airlie Cc: Simona Vetter Signed-off-by: Simona Vetter Signed-off-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250707151814.603897-1-simona.vetter@ffwll.ch --- drivers/gpu/drm/drm_gem.c | 10 +++++++++- include/drm/drm_file.h | 3 +++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index aad6ac9748cc..ac0524595bd6 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -325,6 +325,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + if (drm_WARN_ON(obj->dev, !data)) + return 0; + if (obj->funcs->close) obj->funcs->close(obj, file_priv); @@ -445,7 +448,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); @@ -466,6 +469,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, goto err_revoke; } + /* mirrors drm_gem_handle_delete to avoid races */ + spin_lock(&file_priv->table_lock); + obj = idr_replace(&file_priv->object_idr, obj, handle); + WARN_ON(obj != NULL); + spin_unlock(&file_priv->table_lock); *handlep = handle; return 0; diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 5c3b2aa3e69d..d344d41e6cfe 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -300,6 +300,9 @@ struct drm_file { * * Mapping of mm object handles to object pointers. Used by the GEM * subsystem. Protected by @table_lock. + * + * Note that allocated entries might be NULL as a transient state when + * creating or deleting a handle. */ struct idr object_idr; From d563e7f95c933816efe531c3a48e22bc099c0f55 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Tue, 8 Jul 2025 10:38:29 +0200 Subject: [PATCH 155/234] gpio: of: initialize local variable passed to the .of_xlate() callback of_flags is passed down to GPIO chip's xlate function, so ensure this one is properly initialized as - if the xlate callback does nothing with it - we may end up with various configuration errors like: gpio-720 (enable): multiple pull-up, pull-down or pull-disable enabled, invalid configuration Signed-off-by: Alexander Stein Link: https://lore.kernel.org/r/20250708083829.658051-1-alexander.stein@ew.tq-group.com [Bartosz: tweaked the commit message] Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpiolib-of.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 73ba73b31cb1..37ab78243fab 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -708,7 +708,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id, unsigned int idx, unsigned long *flags) { char propname[32]; /* 32 is max size of property name */ - enum of_gpio_flags of_flags; + enum of_gpio_flags of_flags = 0; const of_find_gpio_quirk *q; struct gpio_desc *desc; From 76303ee8d54bff6d9a6d55997acd88a6c2ba63cf Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 2 Jul 2025 10:32:04 +0200 Subject: [PATCH 156/234] x86/mm: Disable hugetlb page table sharing on 32-bit Only select ARCH_WANT_HUGE_PMD_SHARE on 64-bit x86. Page table sharing requires at least three levels because it involves shared references to PMD tables; 32-bit x86 has either two-level paging (without PAE) or three-level paging (with PAE), but even with three-level paging, having a dedicated PGD entry for hugetlb is only barely possible (because the PGD only has four entries), and it seems unlikely anyone's actually using PMD sharing on 32-bit. Having ARCH_WANT_HUGE_PMD_SHARE enabled on non-PAE 32-bit X86 (which has 2-level paging) became particularly problematic after commit 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count"), since that changes `struct ptdesc` such that the `pt_mm` (for PGDs) and the `pt_share_count` (for PMDs) share the same union storage - and with 2-level paging, PMDs are PGDs. (For comparison, arm64 also gates ARCH_WANT_HUGE_PMD_SHARE on the configuration of page tables such that it is never enabled with 2-level paging.) Closes: https://lore.kernel.org/r/srhpjxlqfna67blvma5frmy3aa@altlinux.org Fixes: cfe28c5d63d8 ("x86: mm: Remove x86 version of huge_pmd_share.") Reported-by: Vitaly Chikunov Suggested-by: Dave Hansen Signed-off-by: Jann Horn Signed-off-by: Dave Hansen Acked-by: Oscar Salvador Acked-by: David Hildenbrand Tested-by: Vitaly Chikunov Cc:stable@vger.kernel.org Link: https://lore.kernel.org/all/20250702-x86-2level-hugetlb-v2-1-1a98096edf92%40google.com --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 71019b3b54ea..4e0fe688cc83 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -147,7 +147,7 @@ config X86 select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_NO_INSTR select ARCH_WANT_GENERAL_HUGETLB - select ARCH_WANT_HUGE_PMD_SHARE + select ARCH_WANT_HUGE_PMD_SHARE if X86_64 select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64 select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64 From 8c2e52ebbe885c7eeaabd3b7ddcdc1246fc400d2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 9 Jul 2025 10:38:29 -0700 Subject: [PATCH 157/234] eventpoll: don't decrement ep refcount while still holding the ep mutex Jann Horn points out that epoll is decrementing the ep refcount and then doing a mutex_unlock(&ep->mtx); afterwards. That's very wrong, because it can lead to a use-after-free. That pattern is actually fine for the very last reference, because the code in question will delay the actual call to "ep_free(ep)" until after it has unlocked the mutex. But it's wrong for the much subtler "next to last" case when somebody *else* may also be dropping their reference and free the ep while we're still using the mutex. Note that this is true even if that other user is also using the same ep mutex: mutexes, unlike spinlocks, can not be used for object ownership, even if they guarantee mutual exclusion. A mutex "unlock" operation is not atomic, and as one user is still accessing the mutex as part of unlocking it, another user can come in and get the now released mutex and free the data structure while the first user is still cleaning up. See our mutex documentation in Documentation/locking/mutex-design.rst, in particular the section [1] about semantics: "mutex_unlock() may access the mutex structure even after it has internally released the lock already - so it's not safe for another context to acquire the mutex and assume that the mutex_unlock() context is not using the structure anymore" So if we drop our ep ref before the mutex unlock, but we weren't the last one, we may then unlock the mutex, another user comes in, drops _their_ reference and releases the 'ep' as it now has no users - all while the mutex_unlock() is still accessing it. Fix this by simply moving the ep refcount dropping to outside the mutex: the refcount itself is atomic, and doesn't need mutex protection (that's the whole _point_ of refcounts: unlike mutexes, they are inherently about object lifetimes). Reported-by: Jann Horn Link: https://docs.kernel.org/locking/mutex-design.html#semantics [1] Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a97a771a459c..895256cd2786 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -828,7 +828,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) kfree_rcu(epi, rcu); percpu_counter_dec(&ep->user->epoll_watches); - return ep_refcount_dec_and_test(ep); + return true; } /* @@ -836,14 +836,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) */ static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) { - WARN_ON_ONCE(__ep_remove(ep, epi, false)); + if (__ep_remove(ep, epi, false)) + WARN_ON_ONCE(ep_refcount_dec_and_test(ep)); } static void ep_clear_and_put(struct eventpoll *ep) { struct rb_node *rbp, *next; struct epitem *epi; - bool dispose; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) @@ -876,10 +876,8 @@ static void ep_clear_and_put(struct eventpoll *ep) cond_resched(); } - dispose = ep_refcount_dec_and_test(ep); mutex_unlock(&ep->mtx); - - if (dispose) + if (ep_refcount_dec_and_test(ep)) ep_free(ep); } @@ -1100,7 +1098,7 @@ void eventpoll_release_file(struct file *file) dispose = __ep_remove(ep, epi, true); mutex_unlock(&ep->mtx); - if (dispose) + if (dispose && ep_refcount_dec_and_test(ep)) ep_free(ep); goto again; } From 4578a747f3c7950be3feb93c2db32eb597a3e55b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 9 Jul 2025 13:45:14 -0400 Subject: [PATCH 158/234] KVM: x86: avoid underflow when scaling TSC frequency In function kvm_guest_time_update(), __scale_tsc() is used to calculate a TSC *frequency* rather than a TSC value. With low-enough ratios, a TSC value that is less than 1 would underflow to 0 and to an infinite while loop in kvm_get_time_scale(): kvm_guest_time_update(struct kvm_vcpu *v) if (kvm_caps.has_tsc_control) tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, v->arch.l1_tsc_scaling_ratio); __scale_tsc(u64 ratio, u64 tsc) ratio=122380531, tsc=2299998, N=48 ratio*tsc >> N = 0.999... -> 0 Later in the function: Call Trace: kvm_get_time_scale arch/x86/kvm/x86.c:2458 [inline] kvm_guest_time_update+0x926/0xb00 arch/x86/kvm/x86.c:3268 vcpu_enter_guest.constprop.0+0x1e70/0x3cf0 arch/x86/kvm/x86.c:10678 vcpu_run+0x129/0x8d0 arch/x86/kvm/x86.c:11126 kvm_arch_vcpu_ioctl_run+0x37a/0x13d0 arch/x86/kvm/x86.c:11352 kvm_vcpu_ioctl+0x56b/0xe60 virt/kvm/kvm_main.c:4188 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:871 [inline] __se_sys_ioctl+0x12d/0x190 fs/ioctl.c:857 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x59/0x110 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x78/0xe2 This can really happen only when fuzzing, since the TSC frequency would have to be nonsensically low. Fixes: 35181e86df97 ("KVM: x86: Add a common TSC scaling function") Reported-by: Yuntao Liu Suggested-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b58a74c1722d..de51dbd85a58 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3258,9 +3258,11 @@ int kvm_guest_time_update(struct kvm_vcpu *v) /* With all the info we got, fill in the values */ - if (kvm_caps.has_tsc_control) + if (kvm_caps.has_tsc_control) { tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, v->arch.l1_tsc_scaling_ratio); + tgt_tsc_khz = tgt_tsc_khz ? : 1; + } if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, From ec3cae639482a8da11a3ae30d28ceceb9d6a3f56 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 9 Jul 2025 19:12:47 +0200 Subject: [PATCH 159/234] PM: sleep: Call pm_restore_gfp_mask() after dpm_resume() Commit 12ffc3b1513e ("PM: Restrict swap use to later in the suspend sequence") changed two pm_restore_gfp_mask() calls in enter_state() and hibernation_restore() into one pm_restore_gfp_mask() call in dpm_resume_end(), but it put that call before the dpm_resume() invocation which is too early (some swap-backing devices may not be ready at that point). Moreover, this code ordering change was not even mentioned in the changelog of the commit mentioned above. Address this by moving that call after the dpm_resume() one. Fixes: 12ffc3b1513e ("PM: Restrict swap use to later in the suspend sequence") Signed-off-by: Rafael J. Wysocki Reviewed-by: Mario Limonciello Link: https://patch.msgid.link/2797018.mvXUDI8C0e@rjwysocki.net --- drivers/base/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index bf77d28e959f..a6ab666ef48a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1236,8 +1236,8 @@ void dpm_complete(pm_message_t state) */ void dpm_resume_end(pm_message_t state) { - pm_restore_gfp_mask(); dpm_resume(state); + pm_restore_gfp_mask(); dpm_complete(state); } EXPORT_SYMBOL_GPL(dpm_resume_end); From 706cc36477139c1616a9b2b96610a8bb520b7119 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 4 Jul 2025 06:23:51 +0000 Subject: [PATCH 160/234] atm: clip: Fix potential null-ptr-deref in to_atmarpd(). atmarpd is protected by RTNL since commit f3a0592b37b8 ("[ATM]: clip causes unregister hang"). However, it is not enough because to_atmarpd() is called without RTNL, especially clip_neigh_solicit() / neigh_ops->solicit() is unsleepable. Also, there is no RTNL dependency around atmarpd. Let's use a private mutex and RCU to protect access to atmarpd in to_atmarpd(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250704062416.1613927-2-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/atm/clip.c | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/net/atm/clip.c b/net/atm/clip.c index b234dc3bcb0d..f36f2c7d8714 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -45,7 +45,8 @@ #include static struct net_device *clip_devs; -static struct atm_vcc *atmarpd; +static struct atm_vcc __rcu *atmarpd; +static DEFINE_MUTEX(atmarpd_lock); static struct timer_list idle_timer; static const struct neigh_ops clip_neigh_ops; @@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) { struct sock *sk; struct atmarp_ctrl *ctrl; + struct atm_vcc *vcc; struct sk_buff *skb; + int err = 0; pr_debug("(%d)\n", type); - if (!atmarpd) - return -EUNATCH; + + rcu_read_lock(); + vcc = rcu_dereference(atmarpd); + if (!vcc) { + err = -EUNATCH; + goto unlock; + } skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC); - if (!skb) - return -ENOMEM; + if (!skb) { + err = -ENOMEM; + goto unlock; + } ctrl = skb_put(skb, sizeof(struct atmarp_ctrl)); ctrl->type = type; ctrl->itf_num = itf; ctrl->ip = ip; - atm_force_charge(atmarpd, skb->truesize); + atm_force_charge(vcc, skb->truesize); - sk = sk_atm(atmarpd); + sk = sk_atm(vcc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); - return 0; +unlock: + rcu_read_unlock(); + return err; } static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) @@ -607,10 +619,12 @@ static void atmarpd_close(struct atm_vcc *vcc) { pr_debug("\n"); - rtnl_lock(); - atmarpd = NULL; + mutex_lock(&atmarpd_lock); + RCU_INIT_POINTER(atmarpd, NULL); + mutex_unlock(&atmarpd_lock); + + synchronize_rcu(); skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); - rtnl_unlock(); pr_debug("(done)\n"); module_put(THIS_MODULE); @@ -631,15 +645,15 @@ static struct atm_dev atmarpd_dev = { static int atm_init_atmarp(struct atm_vcc *vcc) { - rtnl_lock(); + mutex_lock(&atmarpd_lock); if (atmarpd) { - rtnl_unlock(); + mutex_unlock(&atmarpd_lock); return -EADDRINUSE; } mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); - atmarpd = vcc; + rcu_assign_pointer(atmarpd, vcc); set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* allow replies and avoid getting closed if signaling dies */ @@ -648,7 +662,7 @@ static int atm_init_atmarp(struct atm_vcc *vcc) vcc->push = NULL; vcc->pop = NULL; /* crash */ vcc->push_oam = NULL; /* crash */ - rtnl_unlock(); + mutex_unlock(&atmarpd_lock); return 0; } From 62dba28275a9a3104d4e33595c7b3328d4032d8d Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 4 Jul 2025 06:23:52 +0000 Subject: [PATCH 161/234] atm: clip: Fix memory leak of struct clip_vcc. ioctl(ATMARP_MKIP) allocates struct clip_vcc and set it to vcc->user_back. The code assumes that vcc_destroy_socket() passes NULL skb to vcc->push() when the socket is close()d, and then clip_push() frees clip_vcc. However, ioctl(ATMARPD_CTRL) sets NULL to vcc->push() in atm_init_atmarp(), resulting in memory leak. Let's serialise two ioctl() by lock_sock() and check vcc->push() in atm_init_atmarp() to prevent memleak. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250704062416.1613927-3-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/atm/clip.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/atm/clip.c b/net/atm/clip.c index f36f2c7d8714..9c9c6c3d9886 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -645,6 +645,9 @@ static struct atm_dev atmarpd_dev = { static int atm_init_atmarp(struct atm_vcc *vcc) { + if (vcc->push == clip_push) + return -EINVAL; + mutex_lock(&atmarpd_lock); if (atmarpd) { mutex_unlock(&atmarpd_lock); @@ -669,6 +672,7 @@ static int atm_init_atmarp(struct atm_vcc *vcc) static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); + struct sock *sk = sock->sk; int err = 0; switch (cmd) { @@ -689,14 +693,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = clip_create(arg); break; case ATMARPD_CTRL: + lock_sock(sk); err = atm_init_atmarp(vcc); if (!err) { sock->state = SS_CONNECTED; __module_get(THIS_MODULE); } + release_sock(sk); break; case ATMARP_MKIP: + lock_sock(sk); err = clip_mkip(vcc, arg); + release_sock(sk); break; case ATMARP_SETENTRY: err = clip_setentry(vcc, (__force __be32)arg); From c489f3283dbfc0f3c00c312149cae90d27552c45 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 4 Jul 2025 06:23:53 +0000 Subject: [PATCH 162/234] atm: clip: Fix infinite recursive call of clip_push(). syzbot reported the splat below. [0] This happens if we call ioctl(ATMARP_MKIP) more than once. During the first call, clip_mkip() sets clip_push() to vcc->push(), and the second call copies it to clip_vcc->old_push(). Later, when the socket is close()d, vcc_destroy_socket() passes NULL skb to clip_push(), which calls clip_vcc->old_push(), triggering the infinite recursion. Let's prevent the second ioctl(ATMARP_MKIP) by checking vcc->user_back, which is allocated by the first call as clip_vcc. Note also that we use lock_sock() to prevent racy calls. [0]: BUG: TASK stack guard page was hit at ffffc9000d66fff8 (stack is ffffc9000d670000..ffffc9000d678000) Oops: stack guard page: 0000 [#1] SMP KASAN NOPTI CPU: 0 UID: 0 PID: 5322 Comm: syz.0.0 Not tainted 6.16.0-rc4-syzkaller #0 PREEMPT(full) Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 RIP: 0010:clip_push+0x5/0x720 net/atm/clip.c:191 Code: e0 8f aa 8c e8 1c ad 5b fa eb ae 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa 55 <41> 57 41 56 41 55 41 54 53 48 83 ec 20 48 89 f3 49 89 fd 48 bd 00 RSP: 0018:ffffc9000d670000 EFLAGS: 00010246 RAX: 1ffff1100235a4a5 RBX: ffff888011ad2508 RCX: ffff8880003c0000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff888037f01000 RBP: dffffc0000000000 R08: ffffffff8fa104f7 R09: 1ffffffff1f4209e R10: dffffc0000000000 R11: ffffffff8a99b300 R12: ffffffff8a99b300 R13: ffff888037f01000 R14: ffff888011ad2500 R15: ffff888037f01578 FS: 000055557ab6d500(0000) GS:ffff88808d250000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffc9000d66fff8 CR3: 0000000043172000 CR4: 0000000000352ef0 Call Trace: clip_push+0x6dc/0x720 net/atm/clip.c:200 clip_push+0x6dc/0x720 net/atm/clip.c:200 clip_push+0x6dc/0x720 net/atm/clip.c:200 ... clip_push+0x6dc/0x720 net/atm/clip.c:200 clip_push+0x6dc/0x720 net/atm/clip.c:200 clip_push+0x6dc/0x720 net/atm/clip.c:200 vcc_destroy_socket net/atm/common.c:183 [inline] vcc_release+0x157/0x460 net/atm/common.c:205 __sock_release net/socket.c:647 [inline] sock_close+0xc0/0x240 net/socket.c:1391 __fput+0x449/0xa70 fs/file_table.c:465 task_work_run+0x1d1/0x260 kernel/task_work.c:227 resume_user_mode_work include/linux/resume_user_mode.h:50 [inline] exit_to_user_mode_loop+0xec/0x110 kernel/entry/common.c:114 exit_to_user_mode_prepare include/linux/entry-common.h:330 [inline] syscall_exit_to_user_mode_work include/linux/entry-common.h:414 [inline] syscall_exit_to_user_mode include/linux/entry-common.h:449 [inline] do_syscall_64+0x2bd/0x3b0 arch/x86/entry/syscall_64.c:100 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7ff31c98e929 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fffb5aa1f78 EFLAGS: 00000246 ORIG_RAX: 00000000000001b4 RAX: 0000000000000000 RBX: 0000000000012747 RCX: 00007ff31c98e929 RDX: 0000000000000000 RSI: 000000000000001e RDI: 0000000000000003 RBP: 00007ff31cbb7ba0 R08: 0000000000000001 R09: 0000000db5aa226f R10: 00007ff31c7ff030 R11: 0000000000000246 R12: 00007ff31cbb608c R13: 00007ff31cbb6080 R14: ffffffffffffffff R15: 00007fffb5aa2090 Modules linked in: Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+0c77cccd6b7cd917b35a@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=2371d94d248d126c1eb1 Signed-off-by: Kuniyuki Iwashima Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250704062416.1613927-4-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/atm/clip.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/atm/clip.c b/net/atm/clip.c index 9c9c6c3d9886..a30c5a270545 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -429,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) if (!vcc->push) return -EBADFD; + if (vcc->user_back) + return -EINVAL; clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); if (!clip_vcc) return -ENOMEM; From 22fc46cea91df3dce140a7dc6847c6fcf0354505 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Sat, 5 Jul 2025 16:52:28 +0800 Subject: [PATCH 163/234] atm: clip: Fix NULL pointer dereference in vcc_sendmsg() atmarpd_dev_ops does not implement the send method, which may cause crash as bellow. BUG: kernel NULL pointer dereference, address: 0000000000000000 PGD 0 P4D 0 Oops: Oops: 0010 [#1] SMP KASAN NOPTI CPU: 0 UID: 0 PID: 5324 Comm: syz.0.0 Not tainted 6.15.0-rc6-syzkaller-00346-g5723cc3450bc #0 PREEMPT(full) Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 RIP: 0010:0x0 Code: Unable to access opcode bytes at 0xffffffffffffffd6. RSP: 0018:ffffc9000d3cf778 EFLAGS: 00010246 RAX: 1ffffffff1910dd1 RBX: 00000000000000c0 RCX: dffffc0000000000 RDX: ffffc9000dc82000 RSI: ffff88803e4c4640 RDI: ffff888052cd0000 RBP: ffffc9000d3cf8d0 R08: ffff888052c9143f R09: 1ffff1100a592287 R10: dffffc0000000000 R11: 0000000000000000 R12: 1ffff92001a79f00 R13: ffff888052cd0000 R14: ffff88803e4c4640 R15: ffffffff8c886e88 FS: 00007fbc762566c0(0000) GS:ffff88808d6c2000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffffffffffffd6 CR3: 0000000041f1b000 CR4: 0000000000352ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: vcc_sendmsg+0xa10/0xc50 net/atm/common.c:644 sock_sendmsg_nosec net/socket.c:712 [inline] __sock_sendmsg+0x219/0x270 net/socket.c:727 ____sys_sendmsg+0x52d/0x830 net/socket.c:2566 ___sys_sendmsg+0x21f/0x2a0 net/socket.c:2620 __sys_sendmmsg+0x227/0x430 net/socket.c:2709 __do_sys_sendmmsg net/socket.c:2736 [inline] __se_sys_sendmmsg net/socket.c:2733 [inline] __x64_sys_sendmmsg+0xa0/0xc0 net/socket.c:2733 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xf6/0x210 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+e34e5e6b5eddb0014def@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/682f82d5.a70a0220.1765ec.0143.GAE@google.com/T Signed-off-by: Yue Haibing Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250705085228.329202-1-yuehaibing@huawei.com Signed-off-by: Jakub Kicinski --- net/atm/clip.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/atm/clip.c b/net/atm/clip.c index a30c5a270545..f7a5565e794e 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -632,8 +632,16 @@ static void atmarpd_close(struct atm_vcc *vcc) module_put(THIS_MODULE); } +static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb) +{ + atm_return_tx(vcc, skb); + dev_kfree_skb_any(skb); + return 0; +} + static const struct atmdev_ops atmarpd_dev_ops = { - .close = atmarpd_close + .close = atmarpd_close, + .send = atmarpd_send }; From d55683866c79a3af1e334126f32841d05e7e4143 Mon Sep 17 00:00:00 2001 From: Victor Nogueira Date: Sat, 5 Jul 2025 17:36:38 -0300 Subject: [PATCH 164/234] selftests/tc-testing: Create test case for UAF scenario with DRR/NETEM/BLACKHOLE chain Create a tdc test for the UAF scenario with DRR/NETEM/BLACKHOLE chain shared by Lion on his report [1]. [1] https://lore.kernel.org/netdev/45876f14-cf28-4177-8ead-bb769fd9e57a@gmail.com/ Signed-off-by: Victor Nogueira Acked-by: Cong Wang Link: https://patch.msgid.link/20250705203638.246350-1-victor@mojatatu.com Signed-off-by: Jakub Kicinski --- .../tc-testing/tc-tests/infra/qdiscs.json | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index 9aa44d8176d9..5c6851e8d311 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -635,5 +635,42 @@ "$TC qdisc del dev $DUMMY handle 1:0 root", "$IP addr del 10.10.10.10/24 dev $DUMMY || true" ] + }, + { + "id": "d74b", + "name": "Test use-after-free with DRR/NETEM/BLACKHOLE chain", + "category": [ + "qdisc", + "hfsc", + "drr", + "netem", + "blackhole" + ], + "plugins": { + "requires": [ + "nsPlugin", + "scapyPlugin" + ] + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY root handle 1: drr", + "$TC filter add dev $DUMMY parent 1: basic classid 1:1", + "$TC class add dev $DUMMY parent 1: classid 1:1 drr", + "$TC qdisc add dev $DUMMY parent 1:1 handle 2: hfsc def 1", + "$TC class add dev $DUMMY parent 2: classid 2:1 hfsc rt m1 8 d 1 m2 0", + "$TC qdisc add dev $DUMMY parent 2:1 handle 3: netem", + "$TC qdisc add dev $DUMMY parent 3:1 handle 4: blackhole", + "ping -c1 -W0.01 -I $DUMMY 10.10.11.11 || true", + "$TC class del dev $DUMMY classid 1:1" + ], + "cmdUnderTest": "ping -c1 -W0.01 -I $DUMMY 10.10.11.11", + "expExitCode": "1", + "verifyCmd": "$TC -j class ls dev $DUMMY classid 1:1", + "matchJSON": [], + "teardown": [ + "$TC qdisc del dev $DUMMY root handle 1: drr" + ] } ] From 849704b8b2115647e12436e5076b8e7a4944f21a Mon Sep 17 00:00:00 2001 From: Alok Tiwari Date: Sun, 6 Jul 2025 12:43:21 -0700 Subject: [PATCH 165/234] net: thunderx: avoid direct MTU assignment after WRITE_ONCE() The current logic in nicvf_change_mtu() writes the new MTU to netdev->mtu using WRITE_ONCE() before verifying if the hardware update succeeds. However on hardware update failure, it attempts to revert to the original MTU using a direct assignment (netdev->mtu = orig_mtu) which violates the intended of WRITE_ONCE protection introduced in commit 1eb2cded45b3 ("net: annotate writes on dev->mtu from ndo_change_mtu()") Additionally, WRITE_ONCE(netdev->mtu, new_mtu) is unnecessarily performed even when the device is not running. Fix this by: Only writing netdev->mtu after successfully updating the hardware. Skipping hardware update when the device is down, and setting MTU directly. Remove unused variable orig_mtu. This ensures that all writes to netdev->mtu are consistent with WRITE_ONCE expectations and avoids unintended state corruption on failure paths. Signed-off-by: Alok Tiwari Reviewed-by: Jacob Keller Link: https://patch.msgid.link/20250706194327.1369390-1-alok.a.tiwari@oracle.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aebb9fef3f6e..1be2dc40a1a6 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1578,7 +1578,6 @@ int nicvf_open(struct net_device *netdev) static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) { struct nicvf *nic = netdev_priv(netdev); - int orig_mtu = netdev->mtu; /* For now just support only the usual MTU sized frames, * plus some headroom for VLAN, QinQ. @@ -1589,15 +1588,10 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - WRITE_ONCE(netdev->mtu, new_mtu); - - if (!netif_running(netdev)) - return 0; - - if (nicvf_update_hw_max_frs(nic, new_mtu)) { - netdev->mtu = orig_mtu; + if (netif_running(netdev) && nicvf_update_hw_max_frs(nic, new_mtu)) return -EINVAL; - } + + WRITE_ONCE(netdev->mtu, new_mtu); return 0; } From 02c4d6c26f1f662da8885b299c224ca6628ad232 Mon Sep 17 00:00:00 2001 From: Chintan Vankar Date: Mon, 7 Jul 2025 14:22:01 +0530 Subject: [PATCH 166/234] net: ethernet: ti: am65-cpsw-nuss: Fix skb size by accounting for skb_shared_info While transitioning from netdev_alloc_ip_align() to build_skb(), memory for the "skb_shared_info" member of an "skb" was not allocated. Fix this by allocating "PAGE_SIZE" as the skb length, accounting for the packet length, headroom and tailroom, thereby including the required memory space for skb_shared_info. Fixes: 8acacc40f733 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support") Reviewed-by: Siddharth Vadapalli Signed-off-by: Chintan Vankar Link: https://patch.msgid.link/20250707085201.1898818-1-c-vankar@ti.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index f20d1ff192ef..231ca141331f 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -856,8 +856,6 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr, { struct sk_buff *skb; - len += AM65_CPSW_HEADROOM; - skb = build_skb(page_addr, len); if (unlikely(!skb)) return NULL; @@ -1344,7 +1342,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, } skb = am65_cpsw_build_skb(page_addr, ndev, - AM65_CPSW_MAX_PACKET_SIZE, headroom); + PAGE_SIZE, headroom); if (unlikely(!skb)) { new_page = page; goto requeue; From ffdde7bf5a439aaa1955ebd581f5c64ab1533963 Mon Sep 17 00:00:00 2001 From: Victor Nogueira Date: Mon, 7 Jul 2025 18:08:01 -0300 Subject: [PATCH 167/234] net/sched: Abort __tc_modify_qdisc if parent class does not exist Lion's patch [1] revealed an ancient bug in the qdisc API. Whenever a user creates/modifies a qdisc specifying as a parent another qdisc, the qdisc API will, during grafting, detect that the user is not trying to attach to a class and reject. However grafting is performed after qdisc_create (and thus the qdiscs' init callback) is executed. In qdiscs that eventually call qdisc_tree_reduce_backlog during init or change (such as fq, hhf, choke, etc), an issue arises. For example, executing the following commands: sudo tc qdisc add dev lo root handle a: htb default 2 sudo tc qdisc add dev lo parent a: handle beef fq Qdiscs such as fq, hhf, choke, etc unconditionally invoke qdisc_tree_reduce_backlog() in their control path init() or change() which then causes a failure to find the child class; however, that does not stop the unconditional invocation of the assumed child qdisc's qlen_notify with a null class. All these qdiscs make the assumption that class is non-null. The solution is ensure that qdisc_leaf() which looks up the parent class, and is invoked prior to qdisc_create(), should return failure on not finding the class. In this patch, we leverage qdisc_leaf to return ERR_PTRs whenever the parentid doesn't correspond to a class, so that we can detect it earlier on and abort before qdisc_create is called. [1] https://lore.kernel.org/netdev/d912cbd7-193b-4269-9857-525bee8bbb6a@gmail.com/ Fixes: 5e50da01d0ce ("[NET_SCHED]: Fix endless loops (part 2): "simple" qdiscs") Reported-by: syzbot+d8b58d7b0ad89a678a16@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/68663c93.a70a0220.5d25f.0857.GAE@google.com/ Reported-by: syzbot+5eccb463fa89309d8bdc@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/68663c94.a70a0220.5d25f.0858.GAE@google.com/ Reported-by: syzbot+1261670bbdefc5485a06@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/686764a5.a00a0220.c7b3.0013.GAE@google.com/ Reported-by: syzbot+15b96fc3aac35468fe77@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/686764a5.a00a0220.c7b3.0014.GAE@google.com/ Reported-by: syzbot+4dadc5aecf80324d5a51@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/68679e81.a70a0220.29cf51.0016.GAE@google.com/ Acked-by: Jamal Hadi Salim Reviewed-by: Cong Wang Signed-off-by: Victor Nogueira Link: https://patch.msgid.link/20250707210801.372995-1-victor@mojatatu.com Signed-off-by: Jakub Kicinski --- net/sched/sch_api.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index d8a33486c511..241e86cec9c5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -336,17 +336,22 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) return q; } -static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) +static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid, + struct netlink_ext_ack *extack) { unsigned long cl; const struct Qdisc_class_ops *cops = p->ops->cl_ops; - if (cops == NULL) - return NULL; + if (cops == NULL) { + NL_SET_ERR_MSG(extack, "Parent qdisc is not classful"); + return ERR_PTR(-EOPNOTSUPP); + } cl = cops->find(p, classid); - if (cl == 0) - return NULL; + if (cl == 0) { + NL_SET_ERR_MSG(extack, "Specified class not found"); + return ERR_PTR(-ENOENT); + } return cops->leaf(p, cl); } @@ -1490,7 +1495,7 @@ static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid"); return -ENOENT; } - q = qdisc_leaf(p, clid); + q = qdisc_leaf(p, clid, extack); } else if (dev_ingress_queue(dev)) { q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } @@ -1501,6 +1506,8 @@ static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); return -ENOENT; } + if (IS_ERR(q)) + return PTR_ERR(q); if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { NL_SET_ERR_MSG(extack, "Invalid handle"); @@ -1602,7 +1609,9 @@ static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Failed to find specified qdisc"); return -ENOENT; } - q = qdisc_leaf(p, clid); + q = qdisc_leaf(p, clid, extack); + if (IS_ERR(q)) + return PTR_ERR(q); } else if (dev_ingress_queue_create(dev)) { q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } From 1a03edeb84e6e3b9c6ca5a642557bced93d54434 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 7 Jul 2025 21:38:59 +0000 Subject: [PATCH 168/234] tcp: refine sk_rcvbuf increase for ooo packets When a passive flow has not been accepted yet, it is not wise to increase sk_rcvbuf when receiving ooo packets. A very busy server might tune down tcp_rmem[1] to better control how much memory can be used by sockets waiting in its listeners accept queues. Fixes: 63ad7dfedfae ("tcp: adjust rcvbuf in presence of reorders") Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20250707213900.1543248-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_input.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12c2e6fc85c6..68bc79eb9019 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5181,7 +5181,9 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) skb_condense(skb); skb_set_owner_r(skb, sk); } - tcp_rcvbuf_grow(sk); + /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ + if (sk->sk_socket) + tcp_rcvbuf_grow(sk); } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, From b939c074efc160648de884a41aeb5e857f2c5c68 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 7 Jul 2025 21:39:00 +0000 Subject: [PATCH 169/234] selftests/net: packetdrill: add tcp_ooo-before-and-after-accept.pkt Test how new passive flows react to ooo incoming packets. Their sk_rcvbuf can increase only after accept(). Signed-off-by: Eric Dumazet Link: https://patch.msgid.link/20250707213900.1543248-3-edumazet@google.com Signed-off-by: Jakub Kicinski --- .../tcp_ooo-before-and-after-accept.pkt | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt diff --git a/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt b/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt new file mode 100644 index 000000000000..09aabc775e80 --- /dev/null +++ b/tools/testing/selftests/net/packetdrill/tcp_ooo-before-and-after-accept.pkt @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 + +--mss=1000 + +`./defaults.sh +sysctl -q net.ipv4.tcp_rmem="4096 131072 $((32*1024*1024))"` + +// Test that a not-yet-accepted socket does not change +// its initial sk_rcvbuf (tcp_rmem[1]) when receiving ooo packets. + + +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 + +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 + +0 bind(3, ..., ...) = 0 + +0 listen(3, 1) = 0 + + +0 < S 0:0(0) win 65535 + +0 > S. 0:0(0) ack 1 + +.1 < . 1:1(0) ack 1 win 257 + +0 < . 2001:41001(39000) ack 1 win 257 + +0 > . 1:1(0) ack 1 + +0 < . 41001:101001(60000) ack 1 win 257 + +0 > . 1:1(0) ack 1 + +0 < . 1:1001(1000) ack 1 win 257 + +0 > . 1:1(0) ack 1001 + +0 < . 1001:2001(1000) ack 1 win 257 + +0 > . 1:1(0) ack 101001 + + +0 accept(3, ..., ...) = 4 + + +0 %{ assert SK_MEMINFO_RCVBUF == 131072, SK_MEMINFO_RCVBUF }% + + +0 close(4) = 0 + +0 close(3) = 0 + +// Test that ooo packets for accepted sockets do increase sk_rcvbuf + +0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 + +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 + +0 bind(3, ..., ...) = 0 + +0 listen(3, 1) = 0 + + +0 < S 0:0(0) win 65535 + +0 > S. 0:0(0) ack 1 + +.1 < . 1:1(0) ack 1 win 257 + + +0 accept(3, ..., ...) = 4 + + +0 < . 2001:41001(39000) ack 1 win 257 + +0 > . 1:1(0) ack 1 + +0 < . 41001:101001(60000) ack 1 win 257 + +0 > . 1:1(0) ack 1 + + +0 %{ assert SK_MEMINFO_RCVBUF > 131072, SK_MEMINFO_RCVBUF }% + From ee48b0abeca9afcd5a8bcb92345002d2667893ba Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Tue, 8 Jul 2025 10:20:51 +0200 Subject: [PATCH 170/234] MAINTAINERS: remove myself as netronome maintainer I am moving on from Corigine to different things, for the moment slightly removed from kernel development. Right now there is nobody I can in good conscience recommend to take over the maintainer role, but there are still people available for review, so put the driver state to 'Odd Fixes'. Additionally add Simon Horman as reviewer - thanks Simon. Signed-off-by: Louis Peens Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index fad6cb025a19..3cf8ecc9f517 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17222,10 +17222,10 @@ F: drivers/rtc/rtc-ntxec.c F: include/linux/mfd/ntxec.h NETRONOME ETHERNET DRIVERS -M: Louis Peens R: Jakub Kicinski +R: Simon Horman L: oss-drivers@corigine.com -S: Maintained +S: Odd Fixes F: drivers/net/ethernet/netronome/ NETWORK BLOCK DEVICE (NBD) From 69e4186773c6445b258fb45b6e1df18df831ec45 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 8 Jul 2025 22:15:03 +0100 Subject: [PATCH 171/234] rxrpc: Fix bug due to prealloc collision When userspace is using AF_RXRPC to provide a server, it has to preallocate incoming calls and assign to them call IDs that will be used to thread related recvmsg() and sendmsg() together. The preallocated call IDs will automatically be attached to calls as they come in until the pool is empty. To the kernel, the call IDs are just arbitrary numbers, but userspace can use the call ID to hold a pointer to prepared structs. In any case, the user isn't permitted to create two calls with the same call ID (call IDs become available again when the call ends) and EBADSLT should result from sendmsg() if an attempt is made to preallocate a call with an in-use call ID. However, the cleanup in the error handling will trigger both assertions in rxrpc_cleanup_call() because the call isn't marked complete and isn't marked as having been released. Fix this by setting the call state in rxrpc_service_prealloc_one() and then marking it as being released before calling the cleanup function. Fixes: 00e907127e6f ("rxrpc: Preallocate peers, conns and calls for incoming service requests") Reported-by: Junvyyang, Tencent Zhuque Lab Signed-off-by: David Howells cc: LePremierHomme cc: Marc Dionne cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250708211506.2699012-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/call_accept.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a4b363b47cca..7271977b1683 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, id_in_use: write_unlock(&rx->call_lock); + rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT); rxrpc_cleanup_call(call); _leave(" = -EBADSLT"); return -EBADSLT; From 880a88f318cf1d2a0f4c0a7ff7b07e2062b434a4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 8 Jul 2025 22:15:04 +0100 Subject: [PATCH 172/234] rxrpc: Fix oops due to non-existence of prealloc backlog struct If an AF_RXRPC service socket is opened and bound, but calls are preallocated, then rxrpc_alloc_incoming_call() will oops because the rxrpc_backlog struct doesn't get allocated until the first preallocation is made. Fix this by returning NULL from rxrpc_alloc_incoming_call() if there is no backlog struct. This will cause the incoming call to be aborted. Reported-by: Junvyyang, Tencent Zhuque Lab Suggested-by: Junvyyang, Tencent Zhuque Lab Signed-off-by: David Howells cc: LePremierHomme cc: Marc Dionne cc: Willy Tarreau cc: Simon Horman cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20250708211506.2699012-3-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rxrpc/call_accept.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 7271977b1683..49fccee1a726 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -255,6 +255,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, unsigned short call_tail, conn_tail, peer_tail; unsigned short call_count, conn_count; + if (!b) + return NULL; + /* #calls >= #conns >= #peers must hold true. */ call_head = smp_load_acquire(&b->call_backlog_head); call_tail = b->call_backlog_tail; From a95743b53031b015e8949e845a9f6fdfb2656347 Mon Sep 17 00:00:00 2001 From: Achill Gilgenast Date: Sun, 22 Jun 2025 03:45:49 +0200 Subject: [PATCH 173/234] kallsyms: fix build without execinfo Some libc's like musl libc don't provide execinfo.h since it's not part of POSIX. In order to fix compilation on musl, only include execinfo.h if available (HAVE_BACKTRACE_SUPPORT) This was discovered with c104c16073b7 ("Kunit to check the longest symbol length") which starts to include linux/kallsyms.h with Alpine Linux' configs. Link: https://lkml.kernel.org/r/20250622014608.448718-1-fossdd@pwned.life Fixes: c104c16073b7 ("Kunit to check the longest symbol length") Signed-off-by: Achill Gilgenast Cc: Luis Henriques Cc: Greg Kroah-Hartman Cc: Signed-off-by: Andrew Morton --- tools/include/linux/kallsyms.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h index 5a37ccbec54f..f61a01dd7eb7 100644 --- a/tools/include/linux/kallsyms.h +++ b/tools/include/linux/kallsyms.h @@ -18,6 +18,7 @@ static inline const char *kallsyms_lookup(unsigned long addr, return NULL; } +#ifdef HAVE_BACKTRACE_SUPPORT #include #include static inline void print_ip_sym(const char *loglvl, unsigned long ip) @@ -30,5 +31,8 @@ static inline void print_ip_sym(const char *loglvl, unsigned long ip) free(name); } +#else +static inline void print_ip_sym(const char *loglvl, unsigned long ip) {} +#endif #endif From 99af22cd34688cc0d535a1919e0bea4cbc6c1ea1 Mon Sep 17 00:00:00 2001 From: Harry Yoo Date: Sat, 21 Jun 2025 04:53:05 +0900 Subject: [PATCH 174/234] lib/alloc_tag: do not acquire non-existent lock in alloc_tag_top_users() alloc_tag_top_users() attempts to lock alloc_tag_cttype->mod_lock even when the alloc_tag_cttype is not allocated because: 1) alloc tagging is disabled because mem profiling is disabled (!alloc_tag_cttype) 2) alloc tagging is enabled, but not yet initialized (!alloc_tag_cttype) 3) alloc tagging is enabled, but failed initialization (!alloc_tag_cttype or IS_ERR(alloc_tag_cttype)) In all cases, alloc_tag_cttype is not allocated, and therefore alloc_tag_top_users() should not attempt to acquire the semaphore. This leads to a crash on memory allocation failure by attempting to acquire a non-existent semaphore: Oops: general protection fault, probably for non-canonical address 0xdffffc000000001b: 0000 [#3] SMP KASAN NOPTI KASAN: null-ptr-deref in range [0x00000000000000d8-0x00000000000000df] CPU: 2 UID: 0 PID: 1 Comm: systemd Tainted: G D 6.16.0-rc2 #1 VOLUNTARY Tainted: [D]=DIE Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 RIP: 0010:down_read_trylock+0xaa/0x3b0 Code: d0 7c 08 84 d2 0f 85 a0 02 00 00 8b 0d df 31 dd 04 85 c9 75 29 48 b8 00 00 00 00 00 fc ff df 48 8d 6b 68 48 89 ea 48 c1 ea 03 <80> 3c 02 00 0f 85 88 02 00 00 48 3b 5b 68 0f 85 53 01 00 00 65 ff RSP: 0000:ffff8881002ce9b8 EFLAGS: 00010016 RAX: dffffc0000000000 RBX: 0000000000000070 RCX: 0000000000000000 RDX: 000000000000001b RSI: 000000000000000a RDI: 0000000000000070 RBP: 00000000000000d8 R08: 0000000000000001 R09: ffffed107dde49d1 R10: ffff8883eef24e8b R11: ffff8881002cec20 R12: 1ffff11020059d37 R13: 00000000003fff7b R14: ffff8881002cec20 R15: dffffc0000000000 FS: 00007f963f21d940(0000) GS:ffff888458ca6000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f963f5edf71 CR3: 000000010672c000 CR4: 0000000000350ef0 Call Trace: codetag_trylock_module_list+0xd/0x20 alloc_tag_top_users+0x369/0x4b0 __show_mem+0x1cd/0x6e0 warn_alloc+0x2b1/0x390 __alloc_frozen_pages_noprof+0x12b9/0x21a0 alloc_pages_mpol+0x135/0x3e0 alloc_slab_page+0x82/0xe0 new_slab+0x212/0x240 ___slab_alloc+0x82a/0xe00 As David Wang points out, this issue became easier to trigger after commit 780138b12381 ("alloc_tag: check mem_profiling_support in alloc_tag_init"). Before the commit, the issue occurred only when it failed to allocate and initialize alloc_tag_cttype or if a memory allocation fails before alloc_tag_init() is called. After the commit, it can be easily triggered when memory profiling is compiled but disabled at boot. To properly determine whether alloc_tag_init() has been called and its data structures initialized, verify that alloc_tag_cttype is a valid pointer before acquiring the semaphore. If the variable is NULL or an error value, it has not been properly initialized. In such a case, just skip and do not attempt to acquire the semaphore. [harry.yoo@oracle.com: v3] Link: https://lkml.kernel.org/r/20250624072513.84219-1-harry.yoo@oracle.com Link: https://lkml.kernel.org/r/20250620195305.1115151-1-harry.yoo@oracle.com Fixes: 780138b12381 ("alloc_tag: check mem_profiling_support in alloc_tag_init") Fixes: 1438d349d16b ("lib: add memory allocations report in show_mem()") Signed-off-by: Harry Yoo Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202506181351.bba867dd-lkp@intel.com Acked-by: Suren Baghdasaryan Tested-by: Raghavendra K T Cc: Casey Chen Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Yuanyuan Zhong Cc: Signed-off-by: Andrew Morton --- lib/alloc_tag.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 3a74d63a959e..0142bc916f73 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -135,6 +135,9 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl struct codetag_bytes n; unsigned int i, nr = 0; + if (IS_ERR_OR_NULL(alloc_tag_cttype)) + return 0; + if (can_sleep) codetag_lock_module_list(alloc_tag_cttype, true); else if (!codetag_trylock_module_list(alloc_tag_cttype)) From 7627b459aa0737bdd62a8591a1481cda467f20e3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 23 Jun 2025 09:41:52 -0700 Subject: [PATCH 175/234] scripts/gdb: fix interrupts display after MCP on x86 The text line would not be appended to as it should have, it should have been a '+=' but ended up being a '==', fix that. Link: https://lkml.kernel.org/r/20250623164153.746359-1-florian.fainelli@broadcom.com Fixes: b0969d7687a7 ("scripts/gdb: print interrupts") Signed-off-by: Florian Fainelli Cc: Jan Kiszka Cc: Kieran Bingham Cc: Signed-off-by: Andrew Morton --- scripts/gdb/linux/interrupts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 616a5f26377a..199d9e8193f4 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -142,7 +142,7 @@ def x86_show_interupts(prec): if constants.LX_CONFIG_X86_MCE: text += x86_show_mce(prec, "&mce_exception_count", "MCE", "Machine check exceptions") - text == x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") + text += x86_show_mce(prec, "&mce_poll_count", "MCP", "Machine check polls") text += show_irq_err_count(prec) From fea18c686320a53fce7ad62a87a3e1d10ad02f31 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 23 Jun 2025 09:57:21 +0200 Subject: [PATCH 176/234] mm/vmalloc: leave lazy MMU mode on PTE mapping error vmap_pages_pte_range() enters the lazy MMU mode, but fails to leave it in case an error is encountered. Link: https://lkml.kernel.org/r/20250623075721.2817094-1-agordeev@linux.ibm.com Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified") Signed-off-by: Alexander Gordeev Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202506132017.T1l1l6ME-lkp@intel.com/ Reviewed-by: Ryan Roberts Cc: Signed-off-by: Andrew Morton --- mm/vmalloc.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ab986dd09b6a..6dbcdceecae1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -514,6 +514,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { + int err = 0; pte_t *pte; /* @@ -530,12 +531,18 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, do { struct page *page = pages[*nr]; - if (WARN_ON(!pte_none(ptep_get(pte)))) - return -EBUSY; - if (WARN_ON(!page)) - return -ENOMEM; - if (WARN_ON(!pfn_valid(page_to_pfn(page)))) - return -EINVAL; + if (WARN_ON(!pte_none(ptep_get(pte)))) { + err = -EBUSY; + break; + } + if (WARN_ON(!page)) { + err = -ENOMEM; + break; + } + if (WARN_ON(!pfn_valid(page_to_pfn(page)))) { + err = -EINVAL; + break; + } set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; @@ -543,7 +550,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; - return 0; + + return err; } static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, From ea9b77f98d94c4d5c1bd1ac1db078f78b40e8bf5 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 24 Jun 2025 15:18:40 -0400 Subject: [PATCH 177/234] maple_tree: fix mt_destroy_walk() on root leaf node On destroy, we should set each node dead. But current code miss this when the maple tree has only the root node. The reason is mt_destroy_walk() leverage mte_destroy_descend() to set node dead, but this is skipped since the only root node is a leaf. Fixes this by setting the node dead if it is a leaf. Link: https://lore.kernel.org/all/20250407231354.11771-1-richard.weiyang@gmail.com/ Link: https://lkml.kernel.org/r/20250624191841.64682-1-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Wei Yang Signed-off-by: Liam R. Howlett Reviewed-by: Dev Jain Cc: Signed-off-by: Andrew Morton --- lib/maple_tree.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 00524e55a21e..ef66be963798 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5319,6 +5319,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, struct maple_enode *start; if (mte_is_leaf(enode)) { + mte_set_node_dead(enode); node->type = mte_node_type(enode); goto free_leaf; } From a02b0cde8ee515ee0c8efd33e7fbe6830c282e69 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 24 Jun 2025 19:10:20 -0700 Subject: [PATCH 178/234] scripts/gdb: fix interrupts.py after maple tree conversion In commit 721255b9826b ("genirq: Use a maple tree for interrupt descriptor management"), the irq_desc_tree was replaced with a sparse_irqs tree using a maple tree structure. Since the script looked for the irq_desc_tree symbol which is no longer available, no interrupts would be printed and the script output would not be useful anymore. In addition to looking up the correct symbol (sparse_irqs), a new module (mapletree.py) is added whose mtree_load() implementation is largely copied after the C version and uses the same variable and intermediate function names wherever possible to ensure that both the C and Python version be updated in the future. This restores the scripts' output to match that of /proc/interrupts. Link: https://lkml.kernel.org/r/20250625021020.1056930-1-florian.fainelli@broadcom.com Fixes: 721255b9826b ("genirq: Use a maple tree for interrupt descriptor management") Signed-off-by: Florian Fainelli Cc: Jan Kiszka Cc: Kieran Bingham Cc: Shanker Donthineni Cc: Thomas Gleinxer Cc: Signed-off-by: Andrew Morton --- scripts/gdb/linux/constants.py.in | 7 + scripts/gdb/linux/interrupts.py | 12 +- scripts/gdb/linux/mapletree.py | 252 ++++++++++++++++++++++++++++++ scripts/gdb/linux/xarray.py | 28 ++++ 4 files changed, 293 insertions(+), 6 deletions(-) create mode 100644 scripts/gdb/linux/mapletree.py create mode 100644 scripts/gdb/linux/xarray.py diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index fd6bd69c5096..f795302ddfa8 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,12 @@ LX_GDBPARSED(RADIX_TREE_MAP_SIZE) LX_GDBPARSED(RADIX_TREE_MAP_SHIFT) LX_GDBPARSED(RADIX_TREE_MAP_MASK) +/* linux/maple_tree.h */ +LX_VALUE(MAPLE_NODE_SLOTS) +LX_VALUE(MAPLE_RANGE64_SLOTS) +LX_VALUE(MAPLE_ARANGE64_SLOTS) +LX_GDBPARSED(MAPLE_NODE_MASK) + /* linux/vmalloc.h */ LX_VALUE(VM_IOREMAP) LX_VALUE(VM_ALLOC) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 199d9e8193f4..492d1f1e9529 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -7,7 +7,7 @@ import gdb from linux import constants from linux import cpus from linux import utils -from linux import radixtree +from linux import mapletree irq_desc_type = utils.CachedType("struct irq_desc") @@ -23,12 +23,12 @@ def irqd_is_level(desc): def show_irq_desc(prec, irq): text = "" - desc = radixtree.lookup(gdb.parse_and_eval("&irq_desc_tree"), irq) + desc = mapletree.mtree_load(gdb.parse_and_eval("&sparse_irqs"), irq) if desc is None: return text - desc = desc.cast(irq_desc_type.get_type()) - if desc is None: + desc = desc.cast(irq_desc_type.get_type().pointer()) + if desc == 0: return text if irq_settings_is_hidden(desc): @@ -221,8 +221,8 @@ class LxInterruptList(gdb.Command): gdb.write("CPU%-8d" % cpu) gdb.write("\n") - if utils.gdb_eval_or_none("&irq_desc_tree") is None: - return + if utils.gdb_eval_or_none("&sparse_irqs") is None: + raise gdb.GdbError("Unable to find the sparse IRQ tree, is CONFIG_SPARSE_IRQ enabled?") for irq in range(nr_irqs): gdb.write(show_irq_desc(prec, irq)) diff --git a/scripts/gdb/linux/mapletree.py b/scripts/gdb/linux/mapletree.py new file mode 100644 index 000000000000..d52d51c0a03f --- /dev/null +++ b/scripts/gdb/linux/mapletree.py @@ -0,0 +1,252 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Maple tree helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli + +import gdb + +from linux import utils +from linux import constants +from linux import xarray + +maple_tree_root_type = utils.CachedType("struct maple_tree") +maple_node_type = utils.CachedType("struct maple_node") +maple_enode_type = utils.CachedType("void") + +maple_dense = 0 +maple_leaf_64 = 1 +maple_range_64 = 2 +maple_arange_64 = 3 + +class Mas(object): + ma_active = 0 + ma_start = 1 + ma_root = 2 + ma_none = 3 + ma_pause = 4 + ma_overflow = 5 + ma_underflow = 6 + ma_error = 7 + + def __init__(self, mt, first, end): + if mt.type == maple_tree_root_type.get_type().pointer(): + self.tree = mt.dereference() + elif mt.type != maple_tree_root_type.get_type(): + raise gdb.GdbError("must be {} not {}" + .format(maple_tree_root_type.get_type().pointer(), mt.type)) + self.tree = mt + self.index = first + self.last = end + self.node = None + self.status = self.ma_start + self.min = 0 + self.max = -1 + + def is_start(self): + # mas_is_start() + return self.status == self.ma_start + + def is_ptr(self): + # mas_is_ptr() + return self.status == self.ma_root + + def is_none(self): + # mas_is_none() + return self.status == self.ma_none + + def root(self): + # mas_root() + return self.tree['ma_root'].cast(maple_enode_type.get_type().pointer()) + + def start(self): + # mas_start() + if self.is_start() is False: + return None + + self.min = 0 + self.max = ~0 + + while True: + self.depth = 0 + root = self.root() + if xarray.xa_is_node(root): + self.depth = 0 + self.status = self.ma_active + self.node = mte_safe_root(root) + self.offset = 0 + if mte_dead_node(self.node) is True: + continue + + return None + + self.node = None + # Empty tree + if root is None: + self.status = self.ma_none + self.offset = constants.LX_MAPLE_NODE_SLOTS + return None + + # Single entry tree + self.status = self.ma_root + self.offset = constants.LX_MAPLE_NODE_SLOTS + + if self.index != 0: + return None + + return root + + return None + + def reset(self): + # mas_reset() + self.status = self.ma_start + self.node = None + +def mte_safe_root(node): + if node.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_safe_root.__name__, maple_enode_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + indirect_ptr = node.cast(ulong_type) & ~0x2 + val = indirect_ptr.cast(maple_enode_type.get_type().pointer()) + return val + +def mte_node_type(entry): + ulong_type = utils.get_ulong_type() + val = None + if entry.type == maple_enode_type.get_type().pointer(): + val = entry.cast(ulong_type) + elif entry.type == ulong_type: + val = entry + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_node_type.__name__, maple_enode_type.get_type().pointer(), entry.type)) + return (val >> 0x3) & 0xf + +def ma_dead_node(node): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(ma_dead_node.__name__, maple_node_type.get_type().pointer(), node.type)) + ulong_type = utils.get_ulong_type() + parent = node['parent'] + indirect_ptr = node['parent'].cast(ulong_type) & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr == node + +def mte_to_node(enode): + ulong_type = utils.get_ulong_type() + if enode.type == maple_enode_type.get_type().pointer(): + indirect_ptr = enode.cast(ulong_type) + elif enode.type == ulong_type: + indirect_ptr = enode + else: + raise gdb.GdbError("{} must be {} not {}" + .format(mte_to_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + indirect_ptr = indirect_ptr & ~constants.LX_MAPLE_NODE_MASK + return indirect_ptr.cast(maple_node_type.get_type().pointer()) + +def mte_dead_node(enode): + if enode.type != maple_enode_type.get_type().pointer(): + raise gdb.GdbError("{} must be {} not {}" + .format(mte_dead_node.__name__, maple_enode_type.get_type().pointer(), enode.type)) + node = mte_to_node(enode) + return ma_dead_node(node) + +def ma_is_leaf(tp): + result = tp < maple_range_64 + return tp < maple_range_64 + +def mt_pivots(t): + if t == maple_dense: + return 0 + elif t == maple_leaf_64 or t == maple_range_64: + return constants.LX_MAPLE_RANGE64_SLOTS - 1 + elif t == maple_arange_64: + return constants.LX_MAPLE_ARANGE64_SLOTS - 1 + +def ma_pivots(node, t): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_pivots.__name__, maple_node_type.get_type().pointer(), node.type)) + if t == maple_arange_64: + return node['ma64']['pivot'] + elif t == maple_leaf_64 or t == maple_range_64: + return node['mr64']['pivot'] + else: + return None + +def ma_slots(node, tp): + if node.type != maple_node_type.get_type().pointer(): + raise gdb.GdbError("{}: must be {} not {}" + .format(ma_slots.__name__, maple_node_type.get_type().pointer(), node.type)) + if tp == maple_arange_64: + return node['ma64']['slot'] + elif tp == maple_range_64 or tp == maple_leaf_64: + return node['mr64']['slot'] + elif tp == maple_dense: + return node['slot'] + else: + return None + +def mt_slot(mt, slots, offset): + ulong_type = utils.get_ulong_type() + return slots[offset].cast(ulong_type) + +def mtree_lookup_walk(mas): + ulong_type = utils.get_ulong_type() + n = mas.node + + while True: + node = mte_to_node(n) + tp = mte_node_type(n) + pivots = ma_pivots(node, tp) + end = mt_pivots(tp) + offset = 0 + while True: + if pivots[offset] >= mas.index: + break + if offset >= end: + break + offset += 1 + + slots = ma_slots(node, tp) + n = mt_slot(mas.tree, slots, offset) + if ma_dead_node(node) is True: + mas.reset() + return None + break + + if ma_is_leaf(tp) is True: + break + + return n + +def mtree_load(mt, index): + ulong_type = utils.get_ulong_type() + # MT_STATE(...) + mas = Mas(mt, index, index) + entry = None + + while True: + entry = mas.start() + if mas.is_none(): + return None + + if mas.is_ptr(): + if index != 0: + entry = None + return entry + + entry = mtree_lookup_walk(mas) + if entry is None and mas.is_start(): + continue + else: + break + + if xarray.xa_is_zero(entry): + return None + + return entry diff --git a/scripts/gdb/linux/xarray.py b/scripts/gdb/linux/xarray.py new file mode 100644 index 000000000000..f4477b5def75 --- /dev/null +++ b/scripts/gdb/linux/xarray.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Xarray helpers +# +# Copyright (c) 2025 Broadcom +# +# Authors: +# Florian Fainelli + +import gdb + +from linux import utils +from linux import constants + +def xa_is_internal(entry): + ulong_type = utils.get_ulong_type() + return ((entry.cast(ulong_type) & 3) == 2) + +def xa_mk_internal(v): + return ((v << 2) | 2) + +def xa_is_zero(entry): + ulong_type = utils.get_ulong_type() + return entry.cast(ulong_type) == xa_mk_internal(257) + +def xa_is_node(entry): + ulong_type = utils.get_ulong_type() + return xa_is_internal(entry) and (entry.cast(ulong_type) > 4096) From 50f4d2ba26d5c3a4687ae0569be3bbf1c8f0cbed Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 23 Jun 2025 20:00:19 -0700 Subject: [PATCH 179/234] scripts/gdb: de-reference per-CPU MCE interrupts The per-CPU MCE interrupts are looked up by reference and need to be de-referenced before printing, otherwise we print the addresses of the variables instead of their contents: MCE: 18379471554386948492 Machine check exceptions MCP: 18379471554386948488 Machine check polls The corrected output looks like this instead now: MCE: 0 Machine check exceptions MCP: 1 Machine check polls Link: https://lkml.kernel.org/r/20250625021109.1057046-1-florian.fainelli@broadcom.com Link: https://lkml.kernel.org/r/20250624030020.882472-1-florian.fainelli@broadcom.com Fixes: b0969d7687a7 ("scripts/gdb: print interrupts") Signed-off-by: Florian Fainelli Cc: Jan Kiszka Cc: Kieran Bingham Cc: Signed-off-by: Andrew Morton --- scripts/gdb/linux/interrupts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 492d1f1e9529..f4f715a8f0e3 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -110,7 +110,7 @@ def x86_show_mce(prec, var, pfx, desc): pvar = gdb.parse_and_eval(var) text = "%*s: " % (prec, pfx) for cpu in cpus.each_online_cpu(): - text += "%10u " % (cpus.per_cpu(pvar, cpu)) + text += "%10u " % (cpus.per_cpu(pvar, cpu).dereference()) text += " %s\n" % (desc) return text From c39b87456411a7e4d97a0f6384ca31f2abb1a4b7 Mon Sep 17 00:00:00 2001 From: Vivek Kasireddy Date: Thu, 26 Jun 2025 12:11:16 -0700 Subject: [PATCH 180/234] mm/hugetlb: don't crash when allocating a folio if there are no resv There are cases when we try to pin a folio but discover that it has not been faulted-in. So, we try to allocate it in memfd_alloc_folio() but there is a chance that we might encounter a fatal crash/failure (VM_BUG_ON(!h->resv_huge_pages) in alloc_hugetlb_folio_reserve()) if there are no active reservations at that instant. This issue was reported by syzbot: kernel BUG at mm/hugetlb.c:2403! Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI CPU: 0 UID: 0 PID: 5315 Comm: syz.0.0 Not tainted 6.13.0-rc5-syzkaller-00161-g63676eefb7a0 #0 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 RIP: 0010:alloc_hugetlb_folio_reserve+0xbc/0xc0 mm/hugetlb.c:2403 Code: 1f eb 05 e8 56 18 a0 ff 48 c7 c7 40 56 61 8e e8 ba 21 cc 09 4c 89 f0 5b 41 5c 41 5e 41 5f 5d c3 cc cc cc cc e8 35 18 a0 ff 90 <0f> 0b 66 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f RSP: 0018:ffffc9000d3d77f8 EFLAGS: 00010087 RAX: ffffffff81ff6beb RBX: 0000000000000000 RCX: 0000000000100000 RDX: ffffc9000e51a000 RSI: 00000000000003ec RDI: 00000000000003ed RBP: 1ffffffff34810d9 R08: ffffffff81ff6ba3 R09: 1ffffd4000093005 R10: dffffc0000000000 R11: fffff94000093006 R12: dffffc0000000000 R13: dffffc0000000000 R14: ffffea0000498000 R15: ffffffff9a4086c8 FS: 00007f77ac12e6c0(0000) GS:ffff88801fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f77ab54b170 CR3: 0000000040b70000 CR4: 0000000000352ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: memfd_alloc_folio+0x1bd/0x370 mm/memfd.c:88 memfd_pin_folios+0xf10/0x1570 mm/gup.c:3750 udmabuf_pin_folios drivers/dma-buf/udmabuf.c:346 [inline] udmabuf_create+0x70e/0x10c0 drivers/dma-buf/udmabuf.c:443 udmabuf_ioctl_create drivers/dma-buf/udmabuf.c:495 [inline] udmabuf_ioctl+0x301/0x4e0 drivers/dma-buf/udmabuf.c:526 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:906 [inline] __se_sys_ioctl+0xf5/0x170 fs/ioctl.c:892 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f Therefore, prevent the above crash by removing the VM_BUG_ON() as there is no need to crash the system in this situation and instead we could just fail the allocation request. Furthermore, as described above, the specific situation where this happens is when we try to pin memfd folios before they are faulted-in. Although, this is a valid thing to do, it is not the regular or the common use-case. Let us consider the following scenarios: 1) hugetlbfs_file_mmap() memfd_alloc_folio() hugetlb_fault() 2) memfd_alloc_folio() hugetlbfs_file_mmap() hugetlb_fault() 3) hugetlbfs_file_mmap() hugetlb_fault() alloc_hugetlb_folio() 3) is the most common use-case where first a memfd is allocated followed by mmap(), user writes/updates and then the relevant folios are pinned (memfd_pin_folios()). The BUG this patch is fixing occurs in 2) because we try to pin the folios before hugetlbfs_file_mmap() is called. So, in this situation we try to allocate the folios before pinning them but since we did not make any reservations, resv_huge_pages would be 0, leading to this issue. Link: https://lkml.kernel.org/r/20250626191116.1377761-1-vivek.kasireddy@intel.com Fixes: 26a8ea80929c ("mm/hugetlb: fix memfd_pin_folios resv_huge_pages leak") Reported-by: syzbot+a504cb5bae4fe117ba94@syzkaller.appspotmail.com Signed-off-by: Vivek Kasireddy Closes: https://syzkaller.appspot.com/bug?extid=a504cb5bae4fe117ba94 Closes: https://lore.kernel.org/all/677928b5.050a0220.3b53b0.004d.GAE@google.com/T/ Acked-by: Oscar Salvador Cc: Steve Sistare Cc: Muchun Song Cc: David Hildenbrand Cc: Anshuman Khandual Signed-off-by: Andrew Morton --- mm/hugetlb.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9dc95eac558c..a0d285d20992 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2340,12 +2340,15 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, struct folio *folio; spin_lock_irq(&hugetlb_lock); + if (!h->resv_huge_pages) { + spin_unlock_irq(&hugetlb_lock); + return NULL; + } + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, nmask); - if (folio) { - VM_BUG_ON(!h->resv_huge_pages); + if (folio) h->resv_huge_pages--; - } spin_unlock_irq(&hugetlb_lock); return folio; From ddd05742b45b083975a0855ef6ebbf88cf1f532a Mon Sep 17 00:00:00 2001 From: Lance Yang Date: Fri, 27 Jun 2025 14:23:19 +0800 Subject: [PATCH 181/234] mm/rmap: fix potential out-of-bounds page table access during batched unmap As pointed out by David[1], the batched unmap logic in try_to_unmap_one() may read past the end of a PTE table when a large folio's PTE mappings are not fully contained within a single page table. While this scenario might be rare, an issue triggerable from userspace must be fixed regardless of its likelihood. This patch fixes the out-of-bounds access by refactoring the logic into a new helper, folio_unmap_pte_batch(). The new helper correctly calculates the safe batch size by capping the scan at both the VMA and PMD boundaries. To simplify the code, it also supports partial batching (i.e., any number of pages from 1 up to the calculated safe maximum), as there is no strong reason to special-case for fully mapped folios. Link: https://lkml.kernel.org/r/20250701143100.6970-1-lance.yang@linux.dev Link: https://lkml.kernel.org/r/20250630011305.23754-1-lance.yang@linux.dev Link: https://lkml.kernel.org/r/20250627062319.84936-1-lance.yang@linux.dev Link: https://lore.kernel.org/linux-mm/a694398c-9f03-4737-81b9-7e49c857fcbe@redhat.com [1] Fixes: 354dffd29575 ("mm: support batched unmap for lazyfree large folios during reclamation") Signed-off-by: Lance Yang Suggested-by: David Hildenbrand Reported-by: David Hildenbrand Closes: https://lore.kernel.org/linux-mm/a694398c-9f03-4737-81b9-7e49c857fcbe@redhat.com Suggested-by: Barry Song Acked-by: Barry Song Reviewed-by: Lorenzo Stoakes Acked-by: David Hildenbrand Reviewed-by: Harry Yoo Cc: Baolin Wang Cc: Chris Li Cc: "Huang, Ying" Cc: Kairui Song Cc: Lance Yang Cc: Liam Howlett Cc: Mingzhe Yang Cc: Rik van Riel Cc: Ryan Roberts Cc: Tangquan Zheng Cc: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- mm/rmap.c | 46 ++++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index fb63d9256f09..1320b88fab74 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1845,23 +1845,32 @@ void folio_remove_rmap_pud(struct folio *folio, struct page *page, #endif } -/* We support batch unmapping of PTEs for lazyfree large folios */ -static inline bool can_batch_unmap_folio_ptes(unsigned long addr, - struct folio *folio, pte_t *ptep) +static inline unsigned int folio_unmap_pte_batch(struct folio *folio, + struct page_vma_mapped_walk *pvmw, + enum ttu_flags flags, pte_t pte) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; - int max_nr = folio_nr_pages(folio); - pte_t pte = ptep_get(ptep); + unsigned long end_addr, addr = pvmw->address; + struct vm_area_struct *vma = pvmw->vma; + unsigned int max_nr; + if (flags & TTU_HWPOISON) + return 1; + if (!folio_test_large(folio)) + return 1; + + /* We may only batch within a single VMA and a single page table. */ + end_addr = pmd_addr_end(addr, vma->vm_end); + max_nr = (end_addr - addr) >> PAGE_SHIFT; + + /* We only support lazyfree batching for now ... */ if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) - return false; + return 1; if (pte_unused(pte)) - return false; - if (pte_pfn(pte) != folio_pfn(folio)) - return false; + return 1; - return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, - NULL, NULL) == max_nr; + return folio_pte_batch(folio, addr, pvmw->pte, pte, max_nr, fpb_flags, + NULL, NULL, NULL); } /* @@ -2024,9 +2033,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (pte_dirty(pteval)) folio_mark_dirty(folio); } else if (likely(pte_present(pteval))) { - if (folio_test_large(folio) && !(flags & TTU_HWPOISON) && - can_batch_unmap_folio_ptes(address, folio, pvmw.pte)) - nr_pages = folio_nr_pages(folio); + nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); end_addr = address + nr_pages * PAGE_SIZE; flush_cache_range(vma, address, end_addr); @@ -2206,13 +2213,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, hugetlb_remove_rmap(folio); } else { folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); - folio_ref_sub(folio, nr_pages - 1); } if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); - folio_put(folio); - /* We have already batched the entire folio */ - if (nr_pages > 1) + folio_put_refs(folio, nr_pages); + + /* + * If we are sure that we batched the entire folio and cleared + * all PTEs, we can just optimize and stop right here. + */ + if (nr_pages == folio_nr_pages(folio)) goto walk_done; continue; walk_abort: From bb1b5929b4279b136816f95ce1e8f1fa689bf4a1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Sun, 29 Jun 2025 13:49:14 -0700 Subject: [PATCH 182/234] mm/damon/core: handle damon_call_control as normal under kdmond deactivation DAMON sysfs interface internally uses damon_call() to update DAMON parameters as users requested, online. However, DAMON core cancels any damon_call() requests when it is deactivated by DAMOS watermarks. As a result, users cannot change DAMON parameters online while DAMON is deactivated. Note that users can turn DAMON off and on with different watermarks to work around. Since deactivated DAMON is nearly same to stopped DAMON, the work around should have no big problem. Anyway, a bug is a bug. There is no real good reason to cancel the damon_call() request under DAMOS deactivation. Fix it by simply handling the request as normal, rather than cancelling under the situation. Link: https://lkml.kernel.org/r/20250629204914.54114-1-sj@kernel.org Fixes: 42b7491af14c ("mm/damon/core: introduce damon_call()") Signed-off-by: SeongJae Park Cc: [6.14+] Signed-off-by: Andrew Morton --- mm/damon/core.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index b217e0120e09..bc2e58c1222d 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2355,9 +2355,8 @@ static void kdamond_usleep(unsigned long usecs) * * If there is a &struct damon_call_control request that registered via * &damon_call() on @ctx, do or cancel the invocation of the function depending - * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS - * watermarks, or the kdamond is already out of the main loop and therefore - * will be terminated. + * on @cancel. @cancel is set when the kdamond is already out of the main loop + * and therefore will be terminated. */ static void kdamond_call(struct damon_ctx *ctx, bool cancel) { @@ -2405,7 +2404,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) if (ctx->callback.after_wmarks_check && ctx->callback.after_wmarks_check(ctx)) break; - kdamond_call(ctx, true); + kdamond_call(ctx, false); damos_walk_cancel(ctx); } return -EBUSY; From 10d04c26ab2b7da31dd66973f5a09d6fd8ff2a46 Mon Sep 17 00:00:00 2001 From: Christoph Berg Date: Tue, 24 Jun 2025 16:44:27 +0200 Subject: [PATCH 183/234] mm/migrate: fix do_pages_stat in compat mode For arrays with more than 16 entries, the old code would incorrectly advance the pages pointer by 16 words instead of 16 compat_uptr_t. Fix by doing the pointer arithmetic inside get_compat_pages_array where pages32 is already a correctly-typed pointer. Discovered while working on PostgreSQL 18's new NUMA introspection code. Link: https://lkml.kernel.org/r/aGREU0XTB48w9CwN@msg.df7cb.de Fixes: 5b1b561ba73c ("mm: simplify compat_sys_move_pages") Signed-off-by: Christoph Berg Acked-by: David Hildenbrand Suggested-by: David Hildenbrand Reported-by: Bertrand Drouvot Reported-by: Tomas Vondra Closes: https://www.postgresql.org/message-id/flat/6342f601-77de-4ee0-8c2a-3deb50ceac5b%40vondra.me#86402e3d80c031788f5f55b42c459471 Cc: Alistair Popple Cc: Byungchul Park Cc: Gregory Price Cc: "Huang, Ying" Cc: Joshua Hahn Cc: Mathew Brost Cc: Rakie Kim Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/migrate.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 8cf0f9c9599d..2c88f3b33833 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2399,6 +2399,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, static int get_compat_pages_array(const void __user *chunk_pages[], const void __user * __user *pages, + unsigned long chunk_offset, unsigned long chunk_nr) { compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; @@ -2406,7 +2407,7 @@ static int get_compat_pages_array(const void __user *chunk_pages[], int i; for (i = 0; i < chunk_nr; i++) { - if (get_user(p, pages32 + i)) + if (get_user(p, pages32 + chunk_offset + i)) return -EFAULT; chunk_pages[i] = compat_ptr(p); } @@ -2425,27 +2426,28 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, #define DO_PAGES_STAT_CHUNK_NR 16UL const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; int chunk_status[DO_PAGES_STAT_CHUNK_NR]; + unsigned long chunk_offset = 0; while (nr_pages) { unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); if (in_compat_syscall()) { if (get_compat_pages_array(chunk_pages, pages, - chunk_nr)) + chunk_offset, chunk_nr)) break; } else { - if (copy_from_user(chunk_pages, pages, + if (copy_from_user(chunk_pages, pages + chunk_offset, chunk_nr * sizeof(*chunk_pages))) break; } do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); - if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) + if (copy_to_user(status + chunk_offset, chunk_status, + chunk_nr * sizeof(*status))) break; - pages += chunk_nr; - status += chunk_nr; + chunk_offset += chunk_nr; nr_pages -= chunk_nr; } return nr_pages ? -EFAULT : 0; From e6d3e653b084f003977bf2e33820cb84d2e4541f Mon Sep 17 00:00:00 2001 From: Illia Ostapyshyn Date: Sun, 29 Jun 2025 02:38:11 +0200 Subject: [PATCH 184/234] scripts: gdb: vfs: support external dentry names d_shortname of struct dentry only reserves D_NAME_INLINE_LEN characters and contains garbage for longer names. Use d_name instead, which always references the valid name. Link: https://lore.kernel.org/all/20250525213709.878287-2-illia@yshyn.com/ Link: https://lkml.kernel.org/r/20250629003811.2420418-1-illia@yshyn.com Fixes: 79300ac805b6 ("scripts/gdb: fix dentry_name() lookup") Signed-off-by: Illia Ostapyshyn Tested-by: Florian Fainelli Reviewed-by: Florian Fainelli Cc: Al Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jan Kiszka Cc: Kieran Bingham Cc: Signed-off-by: Andrew Morton --- scripts/gdb/linux/vfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gdb/linux/vfs.py b/scripts/gdb/linux/vfs.py index b5fbb18ccb77..9e921b645a68 100644 --- a/scripts/gdb/linux/vfs.py +++ b/scripts/gdb/linux/vfs.py @@ -22,7 +22,7 @@ def dentry_name(d): if parent == d or parent == 0: return "" p = dentry_name(d['d_parent']) + "/" - return p + d['d_shortname']['string'].string() + return p + d['d_name']['name'].string() class DentryName(gdb.Function): """Return string of the full path of a dentry. From 6ee9b3d84775944fb8c8a447961cd01274ac671c Mon Sep 17 00:00:00 2001 From: Yeoreum Yun Date: Thu, 3 Jul 2025 19:10:18 +0100 Subject: [PATCH 185/234] kasan: remove kasan_find_vm_area() to prevent possible deadlock find_vm_area() couldn't be called in atomic_context. If find_vm_area() is called to reports vm area information, kasan can trigger deadlock like: CPU0 CPU1 vmalloc(); alloc_vmap_area(); spin_lock(&vn->busy.lock) spin_lock_bh(&some_lock); spin_lock(&some_lock); kasan_report(); print_report(); print_address_description(); kasan_find_vm_area(); find_vm_area(); spin_lock(&vn->busy.lock) // deadlock! To prevent possible deadlock while kasan reports, remove kasan_find_vm_area(). Link: https://lkml.kernel.org/r/20250703181018.580833-1-yeoreum.yun@arm.com Fixes: c056a364e954 ("kasan: print virtual mapping info in reports") Signed-off-by: Yeoreum Yun Reported-by: Yunseong Kim Reviewed-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Byungchul Park Cc: Dmitriy Vyukov Cc: Sebastian Andrzej Siewior Cc: Steven Rostedt Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- mm/kasan/report.c | 45 ++------------------------------------------- 1 file changed, 2 insertions(+), 43 deletions(-) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 8357e1a33699..b0877035491f 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -370,36 +370,6 @@ static inline bool init_task_stack_addr(const void *addr) sizeof(init_thread_union.stack)); } -/* - * This function is invoked with report_lock (a raw_spinlock) held. A - * PREEMPT_RT kernel cannot call find_vm_area() as it will acquire a sleeping - * rt_spinlock. - * - * For !RT kernel, the PROVE_RAW_LOCK_NESTING config option will print a - * lockdep warning for this raw_spinlock -> spinlock dependency. This config - * option is enabled by default to ensure better test coverage to expose this - * kind of RT kernel problem. This lockdep splat, however, can be suppressed - * by using DEFINE_WAIT_OVERRIDE_MAP() if it serves a useful purpose and the - * invalid PREEMPT_RT case has been taken care of. - */ -static inline struct vm_struct *kasan_find_vm_area(void *addr) -{ - static DEFINE_WAIT_OVERRIDE_MAP(vmalloc_map, LD_WAIT_SLEEP); - struct vm_struct *va; - - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - return NULL; - - /* - * Suppress lockdep warning and fetch vmalloc area of the - * offending address. - */ - lock_map_acquire_try(&vmalloc_map); - va = find_vm_area(addr); - lock_map_release(&vmalloc_map); - return va; -} - static void print_address_description(void *addr, u8 tag, struct kasan_report_info *info) { @@ -429,19 +399,8 @@ static void print_address_description(void *addr, u8 tag, } if (is_vmalloc_addr(addr)) { - struct vm_struct *va = kasan_find_vm_area(addr); - - if (va) { - pr_err("The buggy address belongs to the virtual mapping at\n" - " [%px, %px) created by:\n" - " %pS\n", - va->addr, va->addr + va->size, va->caller); - pr_err("\n"); - - page = vmalloc_to_page(addr); - } else { - pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); - } + pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr); + page = vmalloc_to_page(addr); } if (page) { From d9e01c62b7a0c258a7481c083f84c766a8f5597c Mon Sep 17 00:00:00 2001 From: Honggyu Kim Date: Wed, 2 Jul 2025 09:02:01 +0900 Subject: [PATCH 186/234] samples/damon: fix damon sample prcl for start failure Patch series "mm/damon: fix divide by zero and its samples", v3. This series includes fixes against damon and its samples to make it safer when damon sample starting fails. It includes the following changes. - fix unexpected divide by zero crash for zero size regions - fix bugs for damon samples in case of start failures This patch (of 4): The damon_sample_prcl_start() can fail so we must reset the "enable" parameter to "false" again for proper rollback. In such cases, setting Y to "enable" then N triggers the following crash because damon sample start failed but the "enable" stays as Y. [ 2441.419649] damon_sample_prcl: start [ 2454.146817] damon_sample_prcl: stop [ 2454.146862] ------------[ cut here ]------------ [ 2454.146865] kernel BUG at mm/slub.c:546! [ 2454.148183] Oops: invalid opcode: 0000 [#1] SMP NOPTI ... [ 2454.167555] Call Trace: [ 2454.167822] [ 2454.168061] damon_destroy_ctx+0x78/0x140 [ 2454.168454] damon_sample_prcl_enable_store+0x8d/0xd0 [ 2454.168932] param_attr_store+0xa1/0x120 [ 2454.169315] module_attr_store+0x20/0x50 [ 2454.169695] sysfs_kf_write+0x72/0x90 [ 2454.170065] kernfs_fop_write_iter+0x150/0x1e0 [ 2454.170491] vfs_write+0x315/0x440 [ 2454.170833] ksys_write+0x69/0xf0 [ 2454.171162] __x64_sys_write+0x19/0x30 [ 2454.171525] x64_sys_call+0x18b2/0x2700 [ 2454.171900] do_syscall_64+0x7f/0x680 [ 2454.172258] ? exit_to_user_mode_loop+0xf6/0x180 [ 2454.172694] ? clear_bhb_loop+0x30/0x80 [ 2454.173067] ? clear_bhb_loop+0x30/0x80 [ 2454.173439] entry_SYSCALL_64_after_hwframe+0x76/0x7e Link: https://lkml.kernel.org/r/20250702000205.1921-1-honggyu.kim@sk.com Link: https://lkml.kernel.org/r/20250702000205.1921-2-honggyu.kim@sk.com Fixes: 2aca254620a8 ("samples/damon: introduce a skeleton of a smaple DAMON module for proactive reclamation") Signed-off-by: Honggyu Kim Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- samples/damon/prcl.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c index 056b1b21a0fe..5597e6a08ab2 100644 --- a/samples/damon/prcl.c +++ b/samples/damon/prcl.c @@ -122,8 +122,12 @@ static int damon_sample_prcl_enable_store( if (enable == enabled) return 0; - if (enable) - return damon_sample_prcl_start(); + if (enable) { + err = damon_sample_prcl_start(); + if (err) + enable = false; + return err; + } damon_sample_prcl_stop(); return 0; } From f1221c8442616a6927aff836327777144545cb29 Mon Sep 17 00:00:00 2001 From: Honggyu Kim Date: Wed, 2 Jul 2025 09:02:02 +0900 Subject: [PATCH 187/234] samples/damon: fix damon sample wsse for start failure The damon_sample_wsse_start() can fail so we must reset the "enable" parameter to "false" again for proper rollback. In such cases, setting Y to "enable" then N triggers the similar crash with wsse because damon sample start failed but the "enable" stays as Y. Link: https://lkml.kernel.org/r/20250702000205.1921-3-honggyu.kim@sk.com Fixes: b757c6cfc696 ("samples/damon/wsse: start and stop DAMON as the user requests") Signed-off-by: Honggyu Kim Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- samples/damon/wsse.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c index 11be25803274..e20238a249e7 100644 --- a/samples/damon/wsse.c +++ b/samples/damon/wsse.c @@ -102,8 +102,12 @@ static int damon_sample_wsse_enable_store( if (enable == enabled) return 0; - if (enable) - return damon_sample_wsse_start(); + if (enable) { + err = damon_sample_wsse_start(); + if (err) + enable = false; + return err; + } damon_sample_wsse_stop(); return 0; } From ddba1b6cf4791824f8b614ff8878353a6c6f79f5 Mon Sep 17 00:00:00 2001 From: Honggyu Kim Date: Wed, 2 Jul 2025 09:02:03 +0900 Subject: [PATCH 188/234] samples/damon: fix damon sample mtier for start failure The damon_sample_mtier_start() can fail so we must reset the "enable" parameter to "false" again for proper rollback. In such cases, setting Y to "enable" then N triggers the similar crash with mtier because damon sample start failed but the "enable" stays as Y. Link: https://lkml.kernel.org/r/20250702000205.1921-4-honggyu.kim@sk.com Fixes: 82a08bde3cf7 ("samples/damon: implement a DAMON module for memory tiering") Signed-off-by: Honggyu Kim Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- samples/damon/mtier.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/samples/damon/mtier.c b/samples/damon/mtier.c index 36d2cd933f5a..c94254b77fc9 100644 --- a/samples/damon/mtier.c +++ b/samples/damon/mtier.c @@ -164,8 +164,12 @@ static int damon_sample_mtier_enable_store( if (enable == enabled) return 0; - if (enable) - return damon_sample_mtier_start(); + if (enable) { + err = damon_sample_mtier_start(); + if (err) + enable = false; + return err; + } damon_sample_mtier_stop(); return 0; } From bd225b9591442065beb876da72656f4a2d627d03 Mon Sep 17 00:00:00 2001 From: Honggyu Kim Date: Wed, 2 Jul 2025 09:02:04 +0900 Subject: [PATCH 189/234] mm/damon: fix divide by zero in damon_get_intervals_score() The current implementation allows having zero size regions with no special reasons, but damon_get_intervals_score() gets crashed by divide by zero when the region size is zero. [ 29.403950] Oops: divide error: 0000 [#1] SMP NOPTI This patch fixes the bug, but does not disallow zero size regions to keep the backward compatibility since disallowing zero size regions might be a breaking change for some users. In addition, the same crash can happen when intervals_goal.access_bp is zero so this should be fixed in stable trees as well. Link: https://lkml.kernel.org/r/20250702000205.1921-5-honggyu.kim@sk.com Fixes: f04b0fedbe71 ("mm/damon/core: implement intervals auto-tuning") Signed-off-by: Honggyu Kim Reviewed-by: SeongJae Park Cc: Signed-off-by: Andrew Morton --- mm/damon/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/damon/core.c b/mm/damon/core.c index bc2e58c1222d..979b29e16ef4 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1449,6 +1449,7 @@ static unsigned long damon_get_intervals_score(struct damon_ctx *c) } } target_access_events = max_access_events * goal_bp / 10000; + target_access_events = target_access_events ? : 1; return access_events * 10000 / target_access_events; } From 82241a83cd15aaaf28200a40ad1a8b480012edaf Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 5 Jun 2025 20:58:29 +0800 Subject: [PATCH 190/234] mm: fix the inaccurate memory statistics issue for users On some large machines with a high number of CPUs running a 64K pagesize kernel, we found that the 'RES' field is always 0 displayed by the top command for some processes, which will cause a lot of confusion for users. PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 875525 root 20 0 12480 0 0 R 0.3 0.0 0:00.08 top 1 root 20 0 172800 0 0 S 0.0 0.0 0:04.52 systemd The main reason is that the batch size of the percpu counter is quite large on these machines, caching a significant percpu value, since converting mm's rss stats into percpu_counter by commit f1a7941243c1 ("mm: convert mm's rss stats into percpu_counter"). Intuitively, the batch number should be optimized, but on some paths, performance may take precedence over statistical accuracy. Therefore, introducing a new interface to add the percpu statistical count and display it to users, which can remove the confusion. In addition, this change is not expected to be on a performance-critical path, so the modification should be acceptable. In addition, the 'mm->rss_stat' is updated by using add_mm_counter() and dec/inc_mm_counter(), which are all wrappers around percpu_counter_add_batch(). In percpu_counter_add_batch(), there is percpu batch caching to avoid 'fbc->lock' contention. This patch changes task_mem() and task_statm() to get the accurate mm counters under the 'fbc->lock', but this should not exacerbate kernel 'mm->rss_stat' lock contention due to the percpu batch caching of the mm counters. The following test also confirm the theoretical analysis. I run the stress-ng that stresses anon page faults in 32 threads on my 32 cores machine, while simultaneously running a script that starts 32 threads to busy-loop pread each stress-ng thread's /proc/pid/status interface. From the following data, I did not observe any obvious impact of this patch on the stress-ng tests. w/o patch: stress-ng: info: [6848] 4,399,219,085,152 CPU Cycles 67.327 B/sec stress-ng: info: [6848] 1,616,524,844,832 Instructions 24.740 B/sec (0.367 instr. per cycle) stress-ng: info: [6848] 39,529,792 Page Faults Total 0.605 M/sec stress-ng: info: [6848] 39,529,792 Page Faults Minor 0.605 M/sec w/patch: stress-ng: info: [2485] 4,462,440,381,856 CPU Cycles 68.382 B/sec stress-ng: info: [2485] 1,615,101,503,296 Instructions 24.750 B/sec (0.362 instr. per cycle) stress-ng: info: [2485] 39,439,232 Page Faults Total 0.604 M/sec stress-ng: info: [2485] 39,439,232 Page Faults Minor 0.604 M/sec On comparing a very simple app which just allocates & touches some memory against v6.1 (which doesn't have f1a7941243c1) and latest Linus tree (4c06e63b9203) I can see that on latest Linus tree the values for VmRSS, RssAnon and RssFile from /proc/self/status are all zeroes while they do report values on v6.1 and a Linus tree with this patch. Link: https://lkml.kernel.org/r/f4586b17f66f97c174f7fd1f8647374fdb53de1c.1749119050.git.baolin.wang@linux.alibaba.com Fixes: f1a7941243c1 ("mm: convert mm's rss stats into percpu_counter") Signed-off-by: Baolin Wang Reviewed-by: Aboorva Devarajan Tested-by: Aboorva Devarajan Tested-by Donet Tom Acked-by: Shakeel Butt Acked-by: SeongJae Park Acked-by: Michal Hocko Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mike Rapoport Cc: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 14 +++++++------- include/linux/mm.h | 5 +++++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4be91eb6ea5c..751479eb128f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -36,9 +36,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) unsigned long text, lib, swap, anon, file, shmem; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; - anon = get_mm_counter(mm, MM_ANONPAGES); - file = get_mm_counter(mm, MM_FILEPAGES); - shmem = get_mm_counter(mm, MM_SHMEMPAGES); + anon = get_mm_counter_sum(mm, MM_ANONPAGES); + file = get_mm_counter_sum(mm, MM_FILEPAGES); + shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES); /* * Note: to minimize their overhead, mm maintains hiwater_vm and @@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) text = min(text, mm->exec_vm << PAGE_SHIFT); lib = (mm->exec_vm << PAGE_SHIFT) - text; - swap = get_mm_counter(mm, MM_SWAPENTS); + swap = get_mm_counter_sum(mm, MM_SWAPENTS); SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); @@ -92,12 +92,12 @@ unsigned long task_statm(struct mm_struct *mm, unsigned long *shared, unsigned long *text, unsigned long *data, unsigned long *resident) { - *shared = get_mm_counter(mm, MM_FILEPAGES) + - get_mm_counter(mm, MM_SHMEMPAGES); + *shared = get_mm_counter_sum(mm, MM_FILEPAGES) + + get_mm_counter_sum(mm, MM_SHMEMPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> PAGE_SHIFT; *data = mm->data_vm + mm->stack_vm; - *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); + *resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES); return mm->total_vm; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 0ef2ba0c667a..fa538feaa8d9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2568,6 +2568,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) return percpu_counter_read_positive(&mm->rss_stat[member]); } +static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member) +{ + return percpu_counter_sum_positive(&mm->rss_stat[member]); +} + void mm_trace_rss_stat(struct mm_struct *mm, int member); static inline void add_mm_counter(struct mm_struct *mm, int member, long value) From db6cc3f4ac2e6cdc898fc9cbc8b32ae1bf56bdad Mon Sep 17 00:00:00 2001 From: Chen Yu Date: Fri, 4 Jul 2025 21:56:20 +0800 Subject: [PATCH 191/234] Revert "sched/numa: add statistics of numa balance task" This reverts commit ad6b26b6a0a79166b53209df2ca1cf8636296382. This commit introduces per-memcg/task NUMA balance statistics, but unfortunately it introduced a NULL pointer exception due to the following race condition: After a swap task candidate was chosen, its mm_struct pointer was set to NULL due to task exit. Later, when performing the actual task swapping, the p->mm caused the problem. CPU0 CPU1 : ... task_numa_migrate task_numa_find_cpu task_numa_compare # a normal task p is chosen env->best_task = p # p exit: exit_signals(p); p->flags |= PF_EXITING exit_mm p->mm = NULL; migrate_swap_stop __migrate_swap_task((arg->src_task, arg->dst_cpu) count_memcg_event_mm(p->mm, NUMA_TASK_SWAP)# p->mm is NULL task_lock() should be held and the PF_EXITING flag needs to be checked to prevent this from happening. After discussion, the conclusion was that adding a lock is not worthwhile for some statistics calculations. Revert the change and rely on the tracepoint for this purpose. Link: https://lkml.kernel.org/r/20250704135620.685752-1-yu.c.chen@intel.com Link: https://lkml.kernel.org/r/20250708064917.BBD13C4CEED@smtp.kernel.org Fixes: ad6b26b6a0a7 ("sched/numa: add statistics of numa balance task") Signed-off-by: Chen Yu Reported-by: Jirka Hladky Closes: https://lore.kernel.org/all/CAE4VaGBLJxpd=NeRJXpSCuw=REhC5LWJpC29kDy-Zh2ZDyzQZA@mail.gmail.com/ Reported-by: Srikanth Aithal Reported-by: Suneeth D Acked-by: Michal Hocko Cc: Borislav Petkov Cc: Ingo Molnar Cc: Jiri Hladky Cc: Libo Chen Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v2.rst | 6 ------ include/linux/sched.h | 4 ---- include/linux/vm_event_item.h | 2 -- kernel/sched/core.c | 9 ++------- kernel/sched/debug.c | 4 ---- mm/memcontrol.c | 2 -- mm/vmstat.c | 2 -- 7 files changed, 2 insertions(+), 27 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 0cc35a14afbe..bd98ea3175ec 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1732,12 +1732,6 @@ The following nested keys are defined. numa_hint_faults (npn) Number of NUMA hinting faults. - numa_task_migrated (npn) - Number of task migration by NUMA balancing. - - numa_task_swapped (npn) - Number of task swap by NUMA balancing. - pgdemote_kswapd Number of pages demoted by kswapd. diff --git a/include/linux/sched.h b/include/linux/sched.h index 4f78a64beb52..aa9c5be7a632 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -548,10 +548,6 @@ struct sched_statistics { u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; -#ifdef CONFIG_NUMA_BALANCING - u64 numa_task_migrated; - u64 numa_task_swapped; -#endif u64 nr_wakeups; u64 nr_wakeups_sync; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 91a3ce9a2687..9e15a088ba38 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -66,8 +66,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NUMA_HINT_FAULTS, NUMA_HINT_FAULTS_LOCAL, NUMA_PAGE_MIGRATE, - NUMA_TASK_MIGRATE, - NUMA_TASK_SWAP, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ec68fc686bd7..81c6df746df1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3362,10 +3362,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) #ifdef CONFIG_NUMA_BALANCING static void __migrate_swap_task(struct task_struct *p, int cpu) { - __schedstat_inc(p->stats.numa_task_swapped); - count_vm_numa_event(NUMA_TASK_SWAP); - count_memcg_event_mm(p->mm, NUMA_TASK_SWAP); - if (task_on_rq_queued(p)) { struct rq *src_rq, *dst_rq; struct rq_flags srf, drf; @@ -7939,9 +7935,8 @@ int migrate_task_to(struct task_struct *p, int target_cpu) if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) return -EINVAL; - __schedstat_inc(p->stats.numa_task_migrated); - count_vm_numa_event(NUMA_TASK_MIGRATE); - count_memcg_event_mm(p->mm, NUMA_TASK_MIGRATE); + /* TODO: This is not properly updating schedstats */ + trace_sched_move_numa(p, curr_cpu, target_cpu); return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 9d71baf08075..557246880a7e 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1210,10 +1210,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(nr_failed_migrations_running); P_SCHEDSTAT(nr_failed_migrations_hot); P_SCHEDSTAT(nr_forced_migrations); -#ifdef CONFIG_NUMA_BALANCING - P_SCHEDSTAT(numa_task_migrated); - P_SCHEDSTAT(numa_task_swapped); -#endif P_SCHEDSTAT(nr_wakeups); P_SCHEDSTAT(nr_wakeups_sync); P_SCHEDSTAT(nr_wakeups_migrate); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 902da8a9c643..70fdeda1120b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -474,8 +474,6 @@ static const unsigned int memcg_vm_event_stat[] = { NUMA_PAGE_MIGRATE, NUMA_PTE_UPDATES, NUMA_HINT_FAULTS, - NUMA_TASK_MIGRATE, - NUMA_TASK_SWAP, #endif }; diff --git a/mm/vmstat.c b/mm/vmstat.c index 429ae5339bfe..a78d70ddeacd 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1346,8 +1346,6 @@ const char * const vmstat_text[] = { "numa_hint_faults", "numa_hint_faults_local", "numa_pages_migrated", - "numa_task_migrated", - "numa_task_swapped", #endif #ifdef CONFIG_MIGRATION "pgmigrate_success", From d53238b614e01266a3d36b417b60a502e0698504 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 7 Jul 2025 16:48:32 +0800 Subject: [PATCH 192/234] erofs: fix to add missing tracepoint in erofs_readahead() Commit 771c994ea51f ("erofs: convert all uncompressed cases to iomap") converts to use iomap interface, it removed trace_erofs_readahead() tracepoint in the meantime, let's add it back. Fixes: 771c994ea51f ("erofs: convert all uncompressed cases to iomap") Signed-off-by: Chao Yu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20250707084832.2725677-1-chao@kernel.org Signed-off-by: Gao Xiang --- fs/erofs/data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 6a329c329f43..534ac359976e 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -356,6 +356,9 @@ static int erofs_read_folio(struct file *file, struct folio *folio) static void erofs_readahead(struct readahead_control *rac) { + trace_erofs_readahead(rac->mapping->host, readahead_index(rac), + readahead_count(rac), true); + return iomap_readahead(rac, &erofs_iomap_ops); } From 99f7619a77a0a2e3e2bcae676d0f301769167754 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 8 Jul 2025 19:19:42 +0800 Subject: [PATCH 193/234] erofs: fix to add missing tracepoint in erofs_read_folio() Commit 771c994ea51f ("erofs: convert all uncompressed cases to iomap") converts to use iomap interface, it removed trace_erofs_readpage() tracepoint in the meantime, let's add it back. Fixes: 771c994ea51f ("erofs: convert all uncompressed cases to iomap") Signed-off-by: Chao Yu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20250708111942.3120926-1-chao@kernel.org Signed-off-by: Gao Xiang --- fs/erofs/data.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 534ac359976e..221e0ff1ed0d 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -351,6 +351,8 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, */ static int erofs_read_folio(struct file *file, struct folio *folio) { + trace_erofs_read_folio(folio, true); + return iomap_read_folio(folio, &erofs_iomap_ops); } From f5443d0d1ad775927f1473b1c256ef68f2515b9c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 9 Jul 2025 11:46:13 +0800 Subject: [PATCH 194/234] erofs: use memcpy_to_folio() to replace copy_to_iter() Using copy_to_iter() here is overkill and even messy. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20250709034614.2780117-1-hsiangkao@linux.alibaba.com --- fs/erofs/fileio.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index df5cc63f2c01..fe2cd2982b4b 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -96,8 +96,6 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) struct erofs_map_blocks *map = &io->map; unsigned int cur = 0, end = folio_size(folio), len, attached = 0; loff_t pos = folio_pos(folio), ofs; - struct iov_iter iter; - struct bio_vec bv; int err = 0; erofs_onlinefolio_init(folio); @@ -122,13 +120,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) err = PTR_ERR(src); break; } - bvec_set_folio(&bv, folio, len, cur); - iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len); - if (copy_to_iter(src, len, &iter) != len) { - erofs_put_metabuf(&buf); - err = -EIO; - break; - } + memcpy_to_folio(folio, cur, src, len); erofs_put_metabuf(&buf); } else if (!(map->m_flags & EROFS_MAP_MAPPED)) { folio_zero_segment(folio, cur, cur + len); From 27917e8194f91dffd8b4825350c63cb68e98ce58 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 9 Jul 2025 11:46:14 +0800 Subject: [PATCH 195/234] erofs: address D-cache aliasing Flush the D-cache before unlocking folios for compressed inodes, as they are dirtied during decompression. Avoid calling flush_dcache_folio() on every CPU write, since it's more like playing whack-a-mole without real benefit. It has no impact on x86 and arm64/risc-v: on x86, flush_dcache_folio() is a no-op, and on arm64/risc-v, PG_dcache_clean (PG_arch_1) is clear for new page cache folios. However, certain ARM boards are affected, as reported. Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Closes: https://lore.kernel.org/r/c1e51e16-6cc6-49d0-a63e-4e9ff6c4dd53@pengutronix.de Closes: https://lore.kernel.org/r/38d43fae-1182-4155-9c5b-ffc7382d9917@siemens.com Tested-by: Jan Kiszka Tested-by: Stefan Kerkmann Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20250709034614.2780117-2-hsiangkao@linux.alibaba.com --- fs/erofs/data.c | 16 +++++++++++----- fs/erofs/decompressor.c | 12 ++++-------- fs/erofs/fileio.c | 4 ++-- fs/erofs/internal.h | 2 +- fs/erofs/zdata.c | 6 +++--- 5 files changed, 21 insertions(+), 19 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 221e0ff1ed0d..16e4a6bd9b97 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -214,9 +214,11 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) /* * bit 30: I/O error occurred on this folio + * bit 29: CPU has dirty data in D-cache (needs aliasing handling); * bit 0 - 29: remaining parts to complete this folio */ -#define EROFS_ONLINEFOLIO_EIO (1 << 30) +#define EROFS_ONLINEFOLIO_EIO 30 +#define EROFS_ONLINEFOLIO_DIRTY 29 void erofs_onlinefolio_init(struct folio *folio) { @@ -233,19 +235,23 @@ void erofs_onlinefolio_split(struct folio *folio) atomic_inc((atomic_t *)&folio->private); } -void erofs_onlinefolio_end(struct folio *folio, int err) +void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty) { int orig, v; do { orig = atomic_read((atomic_t *)&folio->private); - v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0); + DBG_BUGON(orig <= 0); + v = dirty << EROFS_ONLINEFOLIO_DIRTY; + v |= (orig - 1) | (!!err << EROFS_ONLINEFOLIO_EIO); } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); - if (v & ~EROFS_ONLINEFOLIO_EIO) + if (v & (BIT(EROFS_ONLINEFOLIO_DIRTY) - 1)) return; folio->private = 0; - folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO)); + if (v & BIT(EROFS_ONLINEFOLIO_DIRTY)) + flush_dcache_folio(folio); + folio_end_read(folio, !(v & BIT(EROFS_ONLINEFOLIO_EIO))); } static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index bf62e2836b60..358061d7b660 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -301,13 +301,11 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, cur = min(cur, rq->outputsize); if (cur && rq->out[0]) { kin = kmap_local_page(rq->in[nrpages_in - 1]); - if (rq->out[0] == rq->in[nrpages_in - 1]) { + if (rq->out[0] == rq->in[nrpages_in - 1]) memmove(kin + rq->pageofs_out, kin + pi, cur); - flush_dcache_page(rq->out[0]); - } else { + else memcpy_to_page(rq->out[0], rq->pageofs_out, kin + pi, cur); - } kunmap_local(kin); } rq->outputsize -= cur; @@ -325,14 +323,12 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; DBG_BUGON(no >= nrpages_out); cnt = min(insz - pi, PAGE_SIZE - po); - if (rq->out[no] == rq->in[ni]) { + if (rq->out[no] == rq->in[ni]) memmove(kin + po, kin + rq->pageofs_in + pi, cnt); - flush_dcache_page(rq->out[no]); - } else if (rq->out[no]) { + else if (rq->out[no]) memcpy_to_page(rq->out[no], po, kin + rq->pageofs_in + pi, cnt); - } pi += cnt; } while (pi < insz); kunmap_local(kin); diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index fe2cd2982b4b..91781718199e 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -38,7 +38,7 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) } else { bio_for_each_folio_all(fi, &rq->bio) { DBG_BUGON(folio_test_uptodate(fi.folio)); - erofs_onlinefolio_end(fi.folio, ret); + erofs_onlinefolio_end(fi.folio, ret, false); } } bio_uninit(&rq->bio); @@ -154,7 +154,7 @@ static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) } cur += len; } - erofs_onlinefolio_end(folio, err); + erofs_onlinefolio_end(folio, err, false); return err; } diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index a32c03a80c70..0d19bde8c094 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -390,7 +390,7 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); void erofs_onlinefolio_init(struct folio *folio); void erofs_onlinefolio_split(struct folio *folio); -void erofs_onlinefolio_end(struct folio *folio, int err); +void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty); struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index fe8071844724..32e3905e75fe 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1091,7 +1091,7 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f, tight = (bs == PAGE_SIZE); } } while ((end = cur) > 0); - erofs_onlinefolio_end(folio, err); + erofs_onlinefolio_end(folio, err, false); return err; } @@ -1196,7 +1196,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err) cur += len; } kunmap_local(dst); - erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); + erofs_onlinefolio_end(page_folio(bvi->bvec.page), err, true); list_del(p); kfree(bvi); } @@ -1355,7 +1355,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) DBG_BUGON(z_erofs_page_is_invalidated(page)); if (!z_erofs_is_shortlived_page(page)) { - erofs_onlinefolio_end(page_folio(page), err); + erofs_onlinefolio_end(page_folio(page), err, true); continue; } if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { From d31fbdc4c7252846ea80235db8c1a8c932da9d39 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 10 Jul 2025 15:36:18 +0800 Subject: [PATCH 196/234] erofs: allow readdir() to be interrupted In a quick slow device, readdir() may loop for long time in large directory, let's give a chance to allow it to be interrupted by userspace. Signed-off-by: Chao Yu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20250710073619.4083422-1-chao@kernel.org [ Gao Xiang: move cond_resched() to the end of the while loop. ] Signed-off-by: Gao Xiang --- fs/erofs/dir.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index 2fae209d0274..3e4b38bec0aa 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -58,6 +58,11 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) struct erofs_dirent *de; unsigned int nameoff, maxsize; + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + break; + } + de = erofs_bread(&buf, dbstart, true); if (IS_ERR(de)) { erofs_err(sb, "failed to readdir of logical block %llu of nid %llu", @@ -88,6 +93,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) break; ctx->pos = dbstart + maxsize; ofs = 0; + cond_resched(); } erofs_put_metabuf(&buf); if (EROFS_I(dir)->dot_omitted && ctx->pos == dir->i_size) { From dd831ac8221e691e9e918585b1003c7071df0379 Mon Sep 17 00:00:00 2001 From: Xiang Mei Date: Sat, 5 Jul 2025 14:21:43 -0700 Subject: [PATCH 197/234] net/sched: sch_qfq: Fix null-deref in agg_dequeue To prevent a potential crash in agg_dequeue (net/sched/sch_qfq.c) when cl->qdisc->ops->peek(cl->qdisc) returns NULL, we check the return value before using it, similar to the existing approach in sch_hfsc.c. To avoid code duplication, the following changes are made: 1. Changed qdisc_warn_nonwc(include/net/pkt_sched.h) into a static inline function. 2. Moved qdisc_peek_len from net/sched/sch_hfsc.c to include/net/pkt_sched.h so that sch_qfq can reuse it. 3. Applied qdisc_peek_len in agg_dequeue to avoid crashing. Signed-off-by: Xiang Mei Reviewed-by: Cong Wang Link: https://patch.msgid.link/20250705212143.3982664-1-xmei5@asu.edu Signed-off-by: Paolo Abeni --- include/net/pkt_sched.h | 25 ++++++++++++++++++++++++- net/sched/sch_api.c | 10 ---------- net/sched/sch_hfsc.c | 16 ---------------- net/sched/sch_qfq.c | 2 +- 4 files changed, 25 insertions(+), 28 deletions(-) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index d7b7b6cd4aa1..8a75c73fc555 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -114,7 +114,6 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct netlink_ext_ack *extack); void qdisc_put_rtab(struct qdisc_rate_table *tab); void qdisc_put_stab(struct qdisc_size_table *tab); -void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate); @@ -290,4 +289,28 @@ static inline bool tc_qdisc_stats_dump(struct Qdisc *sch, return true; } +static inline void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) +{ + if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { + pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", + txt, qdisc->ops->id, qdisc->handle >> 16); + qdisc->flags |= TCQ_F_WARN_NONWC; + } +} + +static inline unsigned int qdisc_peek_len(struct Qdisc *sch) +{ + struct sk_buff *skb; + unsigned int len; + + skb = sch->ops->peek(sch); + if (unlikely(skb == NULL)) { + qdisc_warn_nonwc("qdisc_peek_len", sch); + return 0; + } + len = qdisc_pkt_len(skb); + + return len; +} + #endif diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 241e86cec9c5..d7c767b861a4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -601,16 +601,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, qdisc_skb_cb(skb)->pkt_len = pkt_len; } -void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc) -{ - if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { - pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", - txt, qdisc->ops->id, qdisc->handle >> 16); - qdisc->flags |= TCQ_F_WARN_NONWC; - } -} -EXPORT_SYMBOL(qdisc_warn_nonwc); - static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) { struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5a7745170e84..d8fd35da32a7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -835,22 +835,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) } } -static unsigned int -qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - unsigned int len; - - skb = sch->ops->peek(sch); - if (unlikely(skb == NULL)) { - qdisc_warn_nonwc("qdisc_peek_len", sch); - return 0; - } - len = qdisc_pkt_len(skb); - - return len; -} - static void hfsc_adjust_levels(struct hfsc_class *cl) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index bf1282cb22eb..bcce36608871 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -989,7 +989,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ list_del_init(&cl->alist); - else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { + else if (cl->deficit < qdisc_peek_len(cl->qdisc)) { cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); } From d7a54d02db41f72f0581a3c77c75b0993ed3f6e2 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Wed, 9 Jul 2025 23:34:10 +0300 Subject: [PATCH 198/234] wifi: mac80211: always initialize sdata::key_list This is currently not initialized for a virtual monitor, leading to a NULL pointer dereference when - for example - iterating over all the keys of all the vifs. Reviewed-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://patch.msgid.link/20250709233400.8dcefe578497.I4c90a00ae3256520e063199d7f6f2580d5451acf@changeid Signed-off-by: Johannes Berg --- net/mac80211/iface.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7c27f3cd841c..c01634fdba78 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1150,6 +1150,8 @@ static void ieee80211_sdata_init(struct ieee80211_local *local, { sdata->local = local; + INIT_LIST_HEAD(&sdata->key_list); + /* * Initialize the default link, so we can use link_id 0 for non-MLD, * and that continues to work for non-MLD-aware drivers that use just @@ -2210,8 +2212,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ieee80211_init_frag_cache(&sdata->frags); - INIT_LIST_HEAD(&sdata->key_list); - wiphy_delayed_work_init(&sdata->dec_tailroom_needed_wk, ieee80211_delayed_tailroom_dec); From c07981af55d3ba3ec3be880cfe4a0cc10f1f7138 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Wed, 9 Jul 2025 23:34:56 +0300 Subject: [PATCH 199/234] wifi: mac80211: add the virtual monitor after reconfig complete In reconfig we add the virtual monitor in 2 cases: 1. If we are resuming (it was deleted on suspend) 2. If it was added after an error but before the reconfig (due to the last non-monitor interface removal). In the second case, the removal of the non-monitor interface will succeed but the addition of the virtual monitor will fail, so we add it in the reconfig. The problem is that we mislead the driver to think that this is an existing interface that is getting re-added - while it is actually a completely new interface from the drivers' point of view. Some drivers act differently when a interface is re-added. For example, it might not initialize things because they were already initialized. Such drivers will - in this case - be left with a partialy initialized vif. To fix it, add the virtual monitor after reconfig_complete, so the driver will know that this is a completely new interface. Fixes: 3c3e21e7443b ("mac80211: destroy virtual monitor interface across suspend") Reviewed-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://patch.msgid.link/20250709233451.648d39b041e8.I2e37b68375278987e303d6c00cc5f3d8334d2f96@changeid Signed-off-by: Johannes Berg --- net/mac80211/util.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a125995ed252..e66da651678a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2144,11 +2144,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); wake_up: - - if (local->virt_monitors > 0 && - local->virt_monitors == local->open_count) - ieee80211_add_virtual_monitor(local); - /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. @@ -2202,6 +2197,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } + if (local->virt_monitors > 0 && + local->virt_monitors == local->open_count) + ieee80211_add_virtual_monitor(local); + if (!suspended) return 0; From e778689390c71462a099b5d6e56d71c316486184 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 7 Jul 2025 23:14:12 +0200 Subject: [PATCH 200/234] drm/i915/bios: Apply vlv_fixup_mipi_sequences() to v2 mipi-sequences too It turns out that the fixup from vlv_fixup_mipi_sequences() is necessary for some DSI panel's with version 2 mipi-sequences too. Specifically the Acer Iconia One 8 A1-840 (not to be confused with the A1-840FHD which is different) has the following sequences: BDB block 53 (1284 bytes) - MIPI sequence block: Sequence block version v2 Panel 0 * Sequence 2 - MIPI_SEQ_INIT_OTP GPIO index 9, source 0, set 0 (0x00) Delay: 50000 us GPIO index 9, source 0, set 1 (0x01) Delay: 6000 us GPIO index 9, source 0, set 0 (0x00) Delay: 6000 us GPIO index 9, source 0, set 1 (0x01) Delay: 25000 us Send DCS: Port A, VC 0, LP, Type 39, Length 5, Data ff aa 55 a5 80 Send DCS: Port A, VC 0, LP, Type 39, Length 3, Data 6f 11 00 ... Send DCS: Port A, VC 0, LP, Type 05, Length 1, Data 29 Delay: 120000 us Sequence 4 - MIPI_SEQ_DISPLAY_OFF Send DCS: Port A, VC 0, LP, Type 05, Length 1, Data 28 Delay: 105000 us Send DCS: Port A, VC 0, LP, Type 05, Length 2, Data 10 00 Delay: 10000 us Sequence 5 - MIPI_SEQ_ASSERT_RESET Delay: 10000 us GPIO index 9, source 0, set 0 (0x00) Notice how there is no MIPI_SEQ_DEASSERT_RESET, instead the deassert is done at the beginning of MIPI_SEQ_INIT_OTP, which is exactly what the fixup from vlv_fixup_mipi_sequences() fixes up. Extend it to also apply to v2 sequences, this fixes the panel not working on the Acer Iconia One 8 A1-840. Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14605 Signed-off-by: Hans de Goede Acked-by: Jani Nikula Link: https://lore.kernel.org/r/20250703143824.7121-1-hansg@kernel.org Signed-off-by: Rodrigo Vivi (cherry picked from commit 11895f375939d60efe7ed5dddc1cffe2e79f976c) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/display/intel_bios.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index ba7b8938b17c..166ee11831ab 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1938,7 +1938,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, int index, len; if (drm_WARN_ON(display->drm, - !data || panel->vbt.dsi.seq_version != 1)) + !data || panel->vbt.dsi.seq_version >= 3)) return 0; /* index = 1 to skip sequence byte */ @@ -1961,7 +1961,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, } /* - * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * Some v1/v2 VBT MIPI sequences do the deassert in the init OTP sequence. * The deassert must be done before calling intel_dsi_device_ready, so for * these devices we split the init OTP sequence into a deassert sequence and * the actual init OTP part. @@ -1972,9 +1972,9 @@ static void vlv_fixup_mipi_sequences(struct intel_display *display, u8 *init_otp; int len; - /* Limit this to v1 vid-mode sequences */ + /* Limit this to v1/v2 vid-mode sequences */ if (panel->vbt.dsi.config->is_cmd_mode || - panel->vbt.dsi.seq_version != 1) + panel->vbt.dsi.seq_version >= 3) return; /* Only do this if there are otp and assert seqs and no deassert seq */ From 68ea85df15d111d82fc474cbe104174791169355 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Tue, 8 Jul 2025 22:25:30 +0000 Subject: [PATCH 201/234] PCI/MSI: Prevent recursive locking in pci_msix_write_tph_tag() pci_msix_write_tph_tag() takes the per device MSI descriptor mutex and then invokes msi_domain_get_virq(), which takes the same mutex again. That obviously results in a system hang which is exposed by a softlockup or lockdep warning. Move the lock guard after the invocation of msi_domain_get_virq() to fix this. [ tglx: Massage changelog by adding a proper explanation and removing the not really useful stacktrace ] Fixes: d5124a9957b2 ("PCI/MSI: Provide a sane mechanism for TPH") Reported-by: Jorge Lopez Suggested-by: Thomas Gleixner Signed-off-by: Himanshu Madhani Signed-off-by: Thomas Gleixner Tested-by: Jorge Lopez Link: https://lore.kernel.org/all/20250708222530.1041477-1-himanshu.madhani@oracle.com --- drivers/pci/msi/msi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 6ede55a7c5e6..d686488f4111 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -934,10 +934,12 @@ int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) if (!pdev->msix_enabled) return -ENXIO; - guard(msi_descs_lock)(&pdev->dev); virq = msi_get_virq(&pdev->dev, index); if (!virq) return -ENXIO; + + guard(msi_descs_lock)(&pdev->dev); + /* * This is a horrible hack, but short of implementing a PCI * specific interrupt chip callback and a huge pile of From a8b289f0f2dcbadd8c207ad8f33cf7ba2b4eb088 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 10 Jul 2025 10:00:12 +0200 Subject: [PATCH 202/234] irqchip/irq-msi-lib: Fix build with PCI disabled The armada-370-xp irqchip fails in some randconfig builds because of a missing declaration: In file included from drivers/irqchip/irq-armada-370-xp.c:23: include/linux/irqchip/irq-msi-lib.h:25:39: error: 'struct msi_domain_info' declared inside parameter list will not be visible outside of this definition or declaration [-Werror] Add a forward declaration for the msi_domain_info structure. [ tglx: Fixed up the subsystem prefix. Is it really that hard to get right? ] Fixes: e51b27438a10 ("irqchip: Make irq-msi-lib.h globally available") Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Acked-by: Marc Zyngier Link: https://lore.kernel.org/all/20250710080021.2303640-1-arnd@kernel.org --- include/linux/irqchip/irq-msi-lib.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h index dd8d1d138544..224ac28e88d7 100644 --- a/include/linux/irqchip/irq-msi-lib.h +++ b/include/linux/irqchip/irq-msi-lib.h @@ -17,6 +17,7 @@ #define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI) +struct msi_domain_info; int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); From 18cdb3d982da8976b28d57691eb256ec5688fad2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 7 Jul 2025 12:45:17 +0000 Subject: [PATCH 203/234] netfilter: flowtable: account for Ethernet header in nf_flow_pppoe_proto() syzbot found a potential access to uninit-value in nf_flow_pppoe_proto() Blamed commit forgot the Ethernet header. BUG: KMSAN: uninit-value in nf_flow_offload_inet_hook+0x7e4/0x940 net/netfilter/nf_flow_table_inet.c:27 nf_flow_offload_inet_hook+0x7e4/0x940 net/netfilter/nf_flow_table_inet.c:27 nf_hook_entry_hookfn include/linux/netfilter.h:157 [inline] nf_hook_slow+0xe1/0x3d0 net/netfilter/core.c:623 nf_hook_ingress include/linux/netfilter_netdev.h:34 [inline] nf_ingress net/core/dev.c:5742 [inline] __netif_receive_skb_core+0x4aff/0x70c0 net/core/dev.c:5837 __netif_receive_skb_one_core net/core/dev.c:5975 [inline] __netif_receive_skb+0xcc/0xac0 net/core/dev.c:6090 netif_receive_skb_internal net/core/dev.c:6176 [inline] netif_receive_skb+0x57/0x630 net/core/dev.c:6235 tun_rx_batched+0x1df/0x980 drivers/net/tun.c:1485 tun_get_user+0x4ee0/0x6b40 drivers/net/tun.c:1938 tun_chr_write_iter+0x3e9/0x5c0 drivers/net/tun.c:1984 new_sync_write fs/read_write.c:593 [inline] vfs_write+0xb4b/0x1580 fs/read_write.c:686 ksys_write fs/read_write.c:738 [inline] __do_sys_write fs/read_write.c:749 [inline] Reported-by: syzbot+bf6ed459397e307c3ad2@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/686bc073.a00a0220.c7b3.0086.GAE@google.com/T/#u Fixes: 87b3593bed18 ("netfilter: flowtable: validate pppoe header") Signed-off-by: Eric Dumazet Reviewed-by: Pablo Neira Ayuso Link: https://patch.msgid.link/20250707124517.614489-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/net/netfilter/nf_flow_table.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index d711642e78b5..c003cd194fa2 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -370,7 +370,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb) static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto) { - if (!pskb_may_pull(skb, PPPOE_SES_HLEN)) + if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN)) return false; *inner_proto = __nf_flow_pppoe_proto(skb); From 711c80f7d8b163d3ecd463cd96f07230f488e750 Mon Sep 17 00:00:00 2001 From: Kito Xu Date: Wed, 9 Jul 2025 03:52:51 +0000 Subject: [PATCH 204/234] net: appletalk: Fix device refcount leak in atrtr_create() When updating an existing route entry in atrtr_create(), the old device reference was not being released before assigning the new device, leading to a device refcount leak. Fix this by calling dev_put() to release the old device reference before holding the new one. Fixes: c7f905f0f6d4 ("[ATALK]: Add missing dev_hold() to atrtr_create().") Signed-off-by: Kito Xu Link: https://patch.msgid.link/tencent_E1A26771CDAB389A0396D1681A90A49E5D09@qq.com Signed-off-by: Jakub Kicinski --- net/appletalk/ddp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 73ea7e67f05a..30242fe10341 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -576,6 +576,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint) /* Fill in the routing entry */ rt->target = ta->sat_addr; + dev_put(rt->dev); /* Release old device */ dev_hold(devhint); rt->dev = devhint; rt->flags = r->rt_flags; From 01b8114b432d7baaa5e51ab229c12c4f36b8e2c6 Mon Sep 17 00:00:00 2001 From: Mingming Cao Date: Wed, 9 Jul 2025 08:33:32 -0700 Subject: [PATCH 205/234] ibmvnic: Fix hardcoded NUM_RX_STATS/NUM_TX_STATS with dynamic sizeof The previous hardcoded definitions of NUM_RX_STATS and NUM_TX_STATS were not updated when new fields were added to the ibmvnic_{rx,tx}_queue_stats structures. Specifically, commit 2ee73c54a615 ("ibmvnic: Add stat for tx direct vs tx batched") added a fourth TX stat, but NUM_TX_STATS remained 3, leading to a mismatch. This patch replaces the static defines with dynamic sizeof-based calculations to ensure the stat arrays are correctly sized. This fixes incorrect indexing and prevents incomplete stat reporting in tools like ethtool. Fixes: 2ee73c54a615 ("ibmvnic: Add stat for tx direct vs tx batched") Signed-off-by: Mingming Cao Reviewed-by: Dave Marquardt Reviewed-by: Haren Myneni Reviewed-by: Simon Horman Link: https://patch.msgid.link/20250709153332.73892-1-mmc@linux.ibm.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index a189038d88df..246ddce753f9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -211,7 +211,6 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); -#define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { u64 batched_packets; u64 direct_packets; @@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats { u64 dropped_packets; }; -#define NUM_RX_STATS 3 +#define NUM_TX_STATS \ + (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64)) + struct ibmvnic_rx_queue_stats { u64 packets; u64 bytes; u64 interrupts; }; +#define NUM_RX_STATS \ + (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64)) + struct ibmvnic_acl_buffer { __be32 len; __be32 version; From b4517c363e0e005c7f81ae3be199eec68e87f122 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Wed, 9 Jul 2025 15:07:52 +0200 Subject: [PATCH 206/234] net: phy: microchip: Use genphy_soft_reset() to purge stale LPA bits Enable .soft_reset for the LAN88xx PHY driver by assigning genphy_soft_reset() to ensure that the phylib core performs a proper soft reset during reconfiguration. Previously, the driver left .soft_reset unimplemented, so calls to phy_init_hw() (e.g., from lan88xx_link_change_notify()) did not fully reset the PHY. As a result, stale contents in the Link Partner Ability (LPA) register could persist, causing the PHY to incorrectly report that the link partner advertised autonegotiation even when it did not. Using genphy_soft_reset() guarantees a clean reset of the PHY and corrects the false autoneg reporting in these scenarios. Fixes: ccb989e4d1ef ("net: phy: microchip: Reset LAN88xx PHY to ensure clean link state on LAN7800/7850") Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20250709130753.3994461-2-o.rempel@pengutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 13570f628aa5..5e590b0a75e5 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -488,6 +488,7 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, .link_change_notify = lan88xx_link_change_notify, + .soft_reset = genphy_soft_reset, /* Interrupt handling is broken, do not define related * functions to force polling. From dd4360c0e8504f2f7639c7f5d07c93cfd6a98333 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Wed, 9 Jul 2025 15:07:53 +0200 Subject: [PATCH 207/234] net: phy: microchip: limit 100M workaround to link-down events on LAN88xx Restrict the 100Mbit forced-mode workaround to link-down transitions only, to prevent repeated link reset cycles in certain configurations. The workaround was originally introduced to improve signal reliability when switching cables between long and short distances. It temporarily forces the PHY into 10 Mbps before returning to 100 Mbps. However, when used with autonegotiating link partners (e.g., Intel i350), executing this workaround on every link change can confuse the partner and cause constant renegotiation loops. This results in repeated link down/up transitions and the PHY never reaching a stable state. Limit the workaround to only run during the PHY_NOLINK state. This ensures it is triggered only once per link drop, avoiding disruptive toggling while still preserving its intended effect. Note: I am not able to reproduce the original issue that this workaround addresses. I can only confirm that 100 Mbit mode works correctly in my test setup. Based on code inspection, I assume the workaround aims to reset some internal state machine or signal block by toggling speeds. However, a PHY reset is already performed earlier in the function via phy_init_hw(), which may achieve a similar effect. Without a reproducer, I conservatively keep the workaround but restrict its conditions. Fixes: e57cf3639c32 ("net: lan78xx: fix accessing the LAN7800's internal phy specific registers from the MAC driver") Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Link: https://patch.msgid.link/20250709130753.3994461-3-o.rempel@pengutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 5e590b0a75e5..dc8634e7bcbe 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -332,7 +332,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) * As workaround, set to 10 before setting to 100 * at forced 100 F/H mode. */ - if (!phydev->autoneg && phydev->speed == 100) { + if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) { /* disable phy interrupt */ temp = phy_read(phydev, LAN88XX_INT_MASK); temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; From 4e914ef063de40397e25a025c70d9737a9e45a8c Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Wed, 9 Jul 2025 16:30:10 +0200 Subject: [PATCH 208/234] gre: Fix IPv6 multicast route creation. Use addrconf_add_dev() instead of ipv6_find_idev() in addrconf_gre_config() so that we don't just get the inet6_dev, but also install the default ff00::/8 multicast route. Before commit 3e6a0243ff00 ("gre: Fix again IPv6 link-local address generation."), the multicast route was created at the end of the function by addrconf_add_mroute(). But this code path is now only taken in one particular case (gre devices not bound to a local IP address and in EUI64 mode). For all other cases, the function exits early and addrconf_add_mroute() is not called anymore. Using addrconf_add_dev() instead of ipv6_find_idev() in addrconf_gre_config(), fixes the problem as it will create the default multicast route for all gre devices. This also brings addrconf_gre_config() a bit closer to the normal netdevice IPv6 configuration code (addrconf_dev_config()). Cc: stable@vger.kernel.org Fixes: 3e6a0243ff00 ("gre: Fix again IPv6 link-local address generation.") Reported-by: Aiden Yang Closes: https://lore.kernel.org/netdev/CANR=AhRM7YHHXVxJ4DmrTNMeuEOY87K2mLmo9KMed1JMr20p6g@mail.gmail.com/ Reviewed-by: Gary Guo Tested-by: Gary Guo Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Link: https://patch.msgid.link/027a923dcb550ad115e6d93ee8bb7d310378bd01.1752070620.git.gnault@redhat.com Signed-off-by: Jakub Kicinski --- net/ipv6/addrconf.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ba2ec7c870cc..870a0bd6c2ba 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3525,11 +3525,9 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); - idev = ipv6_find_idev(dev); - if (IS_ERR(idev)) { - pr_debug("%s: add_dev failed\n", __func__); + idev = addrconf_add_dev(dev); + if (IS_ERR(idev)) return; - } /* Generate the IPv6 link-local address using addrconf_addr_gen(), * unless we have an IPv4 GRE device not bound to an IP address and @@ -3543,9 +3541,6 @@ static void addrconf_gre_config(struct net_device *dev) } add_v4_addrs(idev); - - if (dev->flags & IFF_POINTOPOINT) - addrconf_add_mroute(dev); } #endif From 4d61a8a7334399f457867442445a4f916b40cddb Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Wed, 9 Jul 2025 16:30:17 +0200 Subject: [PATCH 209/234] selftests: Add IPv6 multicast route generation tests for GRE devices. The previous patch fixes a bug that prevented the creation of the default IPv6 multicast route (ff00::/8) for some GRE devices. Now let's extend the GRE IPv6 selftests to cover this case. Also, rename check_ipv6_ll_addr() to check_ipv6_device_config() and adapt comments and script output to take into account the fact that we're not limited to link-local address generation. Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Link: https://patch.msgid.link/65a89583bde3bf866a1922c2e5158e4d72c520e2.1752070620.git.gnault@redhat.com Signed-off-by: Jakub Kicinski --- .../testing/selftests/net/gre_ipv6_lladdr.sh | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/net/gre_ipv6_lladdr.sh b/tools/testing/selftests/net/gre_ipv6_lladdr.sh index 5b34f6e1f831..48eb999a3120 100755 --- a/tools/testing/selftests/net/gre_ipv6_lladdr.sh +++ b/tools/testing/selftests/net/gre_ipv6_lladdr.sh @@ -24,7 +24,10 @@ setup_basenet() ip -netns "${NS0}" address add dev lo 2001:db8::10/64 nodad } -# Check if network device has an IPv6 link-local address assigned. +# Check the IPv6 configuration of a network device. +# +# We currently check the generation of the link-local IPv6 address and the +# creation of the ff00::/8 multicast route. # # Parameters: # @@ -35,7 +38,7 @@ setup_basenet() # a link-local address) # * $4: The user visible name for the scenario being tested # -check_ipv6_ll_addr() +check_ipv6_device_config() { local DEV="$1" local EXTRA_MATCH="$2" @@ -45,7 +48,11 @@ check_ipv6_ll_addr() RET=0 set +e ip -netns "${NS0}" -6 address show dev "${DEV}" scope link | grep "fe80::" | grep -q "${EXTRA_MATCH}" - check_err_fail "${XRET}" $? "" + check_err_fail "${XRET}" $? "IPv6 link-local address generation" + + ip -netns "${NS0}" -6 route show table local type multicast ff00::/8 proto kernel | grep -q "${DEV}" + check_err_fail 0 $? "IPv6 multicast route creation" + log_test "${MSG}" set -e } @@ -102,20 +109,20 @@ test_gre_device() ;; esac - # Check that IPv6 link-local address is generated when device goes up + # Check the IPv6 device configuration when it goes up ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}" ip -netns "${NS0}" link set dev gretest up - check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}" + check_ipv6_device_config gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}" # Now disable link-local address generation ip -netns "${NS0}" link set dev gretest down ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode=1 ip -netns "${NS0}" link set dev gretest up - # Check that link-local address generation works when re-enabled while - # the device is already up + # Check the IPv6 device configuration when link-local address + # generation is re-enabled while the device is already up ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}" - check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}" + check_ipv6_device_config gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}" ip -netns "${NS0}" link del dev gretest } @@ -126,7 +133,7 @@ test_gre4() local MODE for GRE_TYPE in "gre" "gretap"; do - printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n" + printf "\n####\nTesting IPv6 configuration of ${GRE_TYPE} devices\n####\n\n" for MODE in "eui64" "none" "stable-privacy" "random"; do test_gre_device "${GRE_TYPE}" 192.0.2.10 192.0.2.11 "${MODE}" @@ -142,7 +149,7 @@ test_gre6() local MODE for GRE_TYPE in "ip6gre" "ip6gretap"; do - printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n" + printf "\n####\nTesting IPv6 configuration of ${GRE_TYPE} devices\n####\n\n" for MODE in "eui64" "none" "stable-privacy" "random"; do test_gre_device "${GRE_TYPE}" 2001:db8::10 2001:db8::11 "${MODE}" From 47c84997c686b4d43b225521b732492552b84758 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 9 Jul 2025 09:12:44 +0000 Subject: [PATCH 210/234] selftests: net: lib: fix shift count out of range I got the following warning when writing other tests: + handle_test_result_pass 'bond 802.3ad' '(lacp_active off)' + local 'test_name=bond 802.3ad' + shift + local 'opt_str=(lacp_active off)' + shift + log_test_result 'bond 802.3ad' '(lacp_active off)' ' OK ' + local 'test_name=bond 802.3ad' + shift + local 'opt_str=(lacp_active off)' + shift + local 'result= OK ' + shift + local retmsg= + shift /net/tools/testing/selftests/net/forwarding/../lib.sh: line 315: shift: shift count out of range This happens because an extra shift is executed even after all arguments have been consumed. Remove the last shift in log_test_result() to avoid this warning. Fixes: a923af1ceee7 ("selftests: forwarding: Convert log_test() to recognize RET values") Signed-off-by: Hangbin Liu Link: https://patch.msgid.link/20250709091244.88395-1-liuhangbin@gmail.com Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/lib.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index 006fdadcc4b9..86a216e9aca8 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -312,7 +312,7 @@ log_test_result() local test_name=$1; shift local opt_str=$1; shift local result=$1; shift - local retmsg=$1; shift + local retmsg=$1 printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result" if [[ $retmsg ]]; then From 6d33df611a39a1b4ad9f2b609ded5d6efa04d97e Mon Sep 17 00:00:00 2001 From: Shuicheng Lin Date: Tue, 8 Jul 2025 03:54:25 +0000 Subject: [PATCH 211/234] drm/xe/pm: Restore display pm if there is error after display suspend xe_bo_evict_all() is called after xe_display_pm_suspend(). So if there is error with xe_bo_evict_all(), display pm should be restored. Fixes: 51462211f4a9 ("drm/xe/pxp: add PXP PM support") Fixes: cb8f81c17531 ("drm/xe/display: Make display suspend/resume work on discrete") Cc: Maarten Lankhorst Cc: Daniele Ceraolo Spurio Cc: John Harrison Signed-off-by: Shuicheng Lin Reviewed-by: Daniele Ceraolo Spurio Link: https://lore.kernel.org/r/20250708035424.3608190-2-shuicheng.lin@intel.com Signed-off-by: Rodrigo Vivi (cherry picked from commit 83dcee17855c4e5af037ae3262809036de127903) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_pm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index ff749edc005b..bcfda545e74f 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -134,7 +134,7 @@ int xe_pm_suspend(struct xe_device *xe) /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); if (err) - goto err_pxp; + goto err_display; for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); @@ -151,7 +151,6 @@ int xe_pm_suspend(struct xe_device *xe) err_display: xe_display_pm_resume(xe); -err_pxp: xe_pxp_pm_resume(xe->pxp); err: drm_dbg(&xe->drm, "Device suspend failed %d\n", err); From 253a174c06f8c37aa71521623205b890022b6987 Mon Sep 17 00:00:00 2001 From: Shuicheng Lin Date: Mon, 7 Jul 2025 00:49:14 +0000 Subject: [PATCH 212/234] drm/xe: Release runtime pm for error path of xe_devcoredump_read() xe_pm_runtime_put() is missed to be called for the error path in xe_devcoredump_read(). Add function description comments for xe_devcoredump_read() to help understand it. v2: more detail function comments and refine goto logic (Matt) Fixes: c4a2e5f865b7 ("drm/xe: Add devcoredump chunking") Cc: stable@vger.kernel.org Reviewed-by: Matthew Brost Signed-off-by: Shuicheng Lin Signed-off-by: Matthew Brost Link: https://lore.kernel.org/r/20250707004911.3502904-6-shuicheng.lin@intel.com (cherry picked from commit 017ef1228d735965419ff118fe1b89089e772c42) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_devcoredump.c | 38 +++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 7a8af2311318..11e60d687572 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -171,14 +171,32 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) #define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G) +/** + * xe_devcoredump_read() - Read data from the Xe device coredump snapshot + * @buffer: Destination buffer to copy the coredump data into + * @offset: Offset in the coredump data to start reading from + * @count: Number of bytes to read + * @data: Pointer to the xe_devcoredump structure + * @datalen: Length of the data (unused) + * + * Reads a chunk of the coredump snapshot data into the provided buffer. + * If the devcoredump is smaller than 1.5 GB (XE_DEVCOREDUMP_CHUNK_MAX), + * it is read directly from a pre-written buffer. For larger devcoredumps, + * the pre-written buffer must be periodically repopulated from the snapshot + * state due to kmalloc size limitations. + * + * Return: Number of bytes copied on success, or a negative error code on failure. + */ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { struct xe_devcoredump *coredump = data; struct xe_devcoredump_snapshot *ss; - ssize_t byte_copied; + ssize_t byte_copied = 0; u32 chunk_offset; ssize_t new_chunk_position; + bool pm_needed = false; + int ret = 0; if (!coredump) return -ENODEV; @@ -188,20 +206,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, /* Ensure delayed work is captured before continuing */ flush_work(&ss->work); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + pm_needed = ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX; + if (pm_needed) xe_pm_runtime_get(gt_to_xe(ss->gt)); mutex_lock(&coredump->lock); if (!ss->read.buffer) { - mutex_unlock(&coredump->lock); - return -ENODEV; + ret = -ENODEV; + goto unlock; } - if (offset >= ss->read.size) { - mutex_unlock(&coredump->lock); - return 0; - } + if (offset >= ss->read.size) + goto unlock; new_chunk_position = div_u64_rem(offset, XE_DEVCOREDUMP_CHUNK_MAX, @@ -221,12 +238,13 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, ss->read.size - offset; memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied); +unlock: mutex_unlock(&coredump->lock); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + if (pm_needed) xe_pm_runtime_put(gt_to_xe(ss->gt)); - return byte_copied; + return byte_copied ? byte_copied : ret; } static void xe_devcoredump_free(void *data) From 0539c5eaf81f3f844213bf6b3137a53e5b04b083 Mon Sep 17 00:00:00 2001 From: Shuicheng Lin Date: Tue, 8 Jul 2025 02:14:51 +0000 Subject: [PATCH 213/234] drm/xe/pm: Correct comment of xe_pm_set_vram_threshold() The parameter threshold is with size in MiB, not in bits. Correct it to avoid any confusion. v2: s/mb/MiB, s/vram/VRAM, fix return section. (Michal) Fixes: 30c399529f4c ("drm/xe: Document Xe PM component") Cc: Michal Wajdeczko Cc: Rodrigo Vivi Signed-off-by: Shuicheng Lin Link: https://lore.kernel.org/r/20250708021450.3602087-2-shuicheng.lin@intel.com Reviewed-by: Stuart Summers Signed-off-by: Rodrigo Vivi (cherry picked from commit 0efec0500117947f924e5ac83be40f96378af85a) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_pm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index bcfda545e74f..ad263de44111 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -752,11 +752,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe) } /** - * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold + * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold * @xe: xe device instance - * @threshold: VRAM size in bites for the D3cold threshold + * @threshold: VRAM size in MiB for the D3cold threshold * - * Returns 0 for success, negative error code otherwise. + * Return: + * * 0 - success + * * -EINVAL - invalid argument */ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) { From 8c018805097f52d0d35fbc273b4e6dd154811638 Mon Sep 17 00:00:00 2001 From: Julia Filipchuk Date: Thu, 26 Jun 2025 11:28:10 -0700 Subject: [PATCH 214/234] drm/xe/guc: Recommend GuC v70.46.2 for BMG, LNL, DG2 UAPI compatibility version 1.22.2 Resolves various bugs. Recommend newer version. Signed-off-by: Julia Filipchuk Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Lucas De Marchi Link: https://lore.kernel.org/r/20250626182805.1701096-13-daniele.ceraolospurio@intel.com (cherry picked from commit 0b64addcae7f04745bc5f62d41e27268052f812e) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_uc_fw.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 2741849bbf4d..a6612105201a 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -114,10 +114,10 @@ struct fw_blobs_by_type { #define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \ - fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \ - fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \ + fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \ From 7a10175a4220b3f77d74a61ef767f6c43bf39a3e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 10 Jul 2025 10:30:39 +0000 Subject: [PATCH 215/234] drm/xe/bmg: Don't use WA 16023588340 and 22019338487 on VF These workarounds are not applicable for use by the VFs. Signed-off-by: Michal Wajdeczko Tested-by: Jakub Kolakowski Reviewed-by: Satyanarayana K V P Signed-off-by: Jakub Kolakowski Link: https://lore.kernel.org/r/20250710103040.375610-2-jakub1.kolakowski@intel.com Signed-off-by: Lucas De Marchi (cherry picked from commit 1d2e2503e506ddc499cbb7afdc8b70bcf6fe241f) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_wa_oob.rules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 69c1d7fc695e..6d70109fcc43 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -38,10 +38,10 @@ GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) 22019338487 MEDIA_VERSION(2000) - GRAPHICS_VERSION(2001) + GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf) 22019338487_display PLATFORM(LUNARLAKE) -16023588340 GRAPHICS_VERSION(2001) +16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) no_media_l3 MEDIA_VERSION(3000) From 74806f69b8668ea0e98cd9d461b7803ffa1fcdf3 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 13 Jun 2025 13:00:37 -0700 Subject: [PATCH 216/234] drm/xe/guc: Default log level to non-verbose Currently xe sets the guc log level to a verbose level since it's useful to debug hangs and general development. However the verbose level may already be too much and affect performance. Michal Mrozek did some tests with the L0 compute stack for submission latency with ULLS disabled. Below are the normalized numbers with log level 3 (the current default) as baseline for each test: Test \ Log Level 3 0 1 2 ----------------------------------------------------------- ------ ------ ------ ------ BestWalkerNthCommandListSubmission(CmdListCount=2) 1.00 0.63 0.63 0.96 BestWalkerNthSubmission(KernelCount=2) 1.00 0.62 0.63 0.96 BestWalkerNthSubmissionImmediate(KernelCount=2) 1.00 0.58 0.58 0.85 BestWalkerSubmission 1.00 0.62 0.62 0.96 BestWalkerSubmissionImmediate 1.00 0.63 0.62 0.96 BestWalkerSubmissionImmediateMultiCmdlists(cmdlistCount=2) 1.00 0.58 0.58 0.86 BestWalkerSubmissionImmediateMultiCmdlists(cmdlistCount=4) 1.00 0.70 0.70 0.83 BestWalkerSubmissionImmediateMultiCmdlists(cmdlistCount=8) 1.00 0.53 0.52 0.78 Log level 2 is the first "verbose level" for GuC, where the biggest difference happens. Keep log level 3 for CONFIG_DRM_XE_DEBUG, but switch to 1, i.e. GUC_LOG_LEVEL_NON_VERBOSE, for "normal" builds. Cc: Michal Mrozek Cc: John Harrison Reviewed-by: John Harrison Link: https://lore.kernel.org/r/20250613-guc-log-level-v2-1-cb84a63e49fe@intel.com Signed-off-by: Lucas De Marchi (cherry picked from commit a37128ba613ad6a5f81f382fa3cfe5c4a6527310) Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index e4742e27e2cd..da6793c2f991 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -20,7 +20,7 @@ struct xe_modparam xe_modparam = { .probe_display = true, - .guc_log_level = 3, + .guc_log_level = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? 3 : 1, .force_probe = CONFIG_DRM_XE_FORCE_PROBE, .wedged_mode = 1, .svm_notifier_size = 512, From edb471108cf1477c44b95e87e8cec261825eb079 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Wed, 9 Jul 2025 09:18:24 +0200 Subject: [PATCH 217/234] MAINTAINERS: remove bouncing address for Nandor Han Nandor's address has been bouncing for some time now. Remove it from MAINTAINERS. The affected driver falls under the wider umbrella of GPIO modules. Link: https://lore.kernel.org/r/20250709071825.16212-1-brgl@bgdev.pl Signed-off-by: Bartosz Golaszewski --- MAINTAINERS | 7 ------- 1 file changed, 7 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 4bac4ea21b64..ac983cb1663d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -27309,13 +27309,6 @@ S: Supported W: http://www.marvell.com F: drivers/i2c/busses/i2c-xlp9xx.c -XRA1403 GPIO EXPANDER -M: Nandor Han -L: linux-gpio@vger.kernel.org -S: Maintained -F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt -F: drivers/gpio/gpio-xra1403.c - XTENSA XTFPGA PLATFORM SUPPORT M: Max Filippov S: Maintained From 58805e9cbc6f6a28f35d90e740956e983a0e036e Mon Sep 17 00:00:00 2001 From: Sean Nyekjaer Date: Fri, 11 Jul 2025 12:12:02 +0200 Subject: [PATCH 218/234] can: m_can: m_can_handle_lost_msg(): downgrade msg lost in rx message to debug level Downgrade the "msg lost in rx" message to debug level, to prevent flooding the kernel log with error messages. Fixes: e0d1f4816f2a ("can: m_can: add Bosch M_CAN controller support") Reviewed-by: Vincent Mailhol Signed-off-by: Sean Nyekjaer Link: https://patch.msgid.link/20250711-mcan_ratelimit-v3-1-7413e8e21b84@geanix.com [mkl: enhance commit message] Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 6c656bfdb323..fe74dbd2c966 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -665,7 +665,7 @@ static int m_can_handle_lost_msg(struct net_device *dev) struct can_frame *frame; u32 timestamp = 0; - netdev_err(dev, "msg lost in rxf0\n"); + netdev_dbg(dev, "msg lost in rxf0\n"); stats->rx_errors++; stats->rx_over_errors++; From cb73e53f7c0700285d743e7afbe37cba9f7df8f3 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 8 Jul 2025 13:19:22 +0300 Subject: [PATCH 219/234] MAINTAINERS: Update Kirill Shutemov's email address for TDX Update MAINTAINERS to use my @kernel.org email address. Signed-off-by: Kirill A. Shutemov Signed-off-by: Dave Hansen Link: https://lore.kernel.org/all/20250708101922.50560-4-kirill.shutemov%40linux.intel.com --- .mailmap | 1 + MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index b0ace71968ab..85ad46d20220 100644 --- a/.mailmap +++ b/.mailmap @@ -416,6 +416,7 @@ Kenneth W Chen Kenneth Westfield Kiran Gunda Kirill Tkhai +Kirill A. Shutemov Kishon Vijay Abraham I Konrad Dybcio Konrad Dybcio diff --git a/MAINTAINERS b/MAINTAINERS index fad6cb025a19..adaf349fa4b1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -26944,7 +26944,7 @@ F: arch/x86/kernel/stacktrace.c F: arch/x86/kernel/unwind_*.c X86 TRUST DOMAIN EXTENSIONS (TDX) -M: Kirill A. Shutemov +M: Kirill A. Shutemov R: Dave Hansen L: x86@kernel.org L: linux-coco@lists.linux.dev From f7b76466894083c8f518cf29fef75fcd3ec670e5 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 10 Jul 2025 16:53:42 +0300 Subject: [PATCH 220/234] net/mlx5: Reset bw_share field when changing a node's parent When changing a node's parent, its scheduling element is destroyed and re-created with bw_share 0. However, the node's bw_share field was not updated accordingly. Set the node's bw_share to 0 after re-creation to keep the software state in sync with the firmware configuration. Fixes: 9c7bbf4c3304 ("net/mlx5: Add support for setting parent of nodes") Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Reviewed-by: Dragos Tatulea Signed-off-by: Tariq Toukan Reviewed-by: Jacob Keller Link: https://patch.msgid.link/1752155624-24095-2-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index b6ae384396b3..ad9f6fca9b6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -1076,6 +1076,7 @@ static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node, return err; } esw_qos_node_set_parent(node, parent); + node->bw_share = 0; return 0; } From eb41a264a3a576dc040ee37c3d9d6b7e2d9be968 Mon Sep 17 00:00:00 2001 From: Carolina Jubran Date: Thu, 10 Jul 2025 16:53:43 +0300 Subject: [PATCH 221/234] net/mlx5e: Fix race between DIM disable and net_dim() There's a race between disabling DIM and NAPI callbacks using the dim pointer on the RQ or SQ. If NAPI checks the DIM state bit and sees it still set, it assumes `rq->dim` or `sq->dim` is valid. But if DIM gets disabled right after that check, the pointer might already be set to NULL, leading to a NULL pointer dereference in net_dim(). Fix this by calling `synchronize_net()` before freeing the DIM context. This ensures all in-progress NAPI callbacks are finished before the pointer is cleared. Kernel log: BUG: kernel NULL pointer dereference, address: 0000000000000000 ... RIP: 0010:net_dim+0x23/0x190 ... Call Trace: ? __die+0x20/0x60 ? page_fault_oops+0x150/0x3e0 ? common_interrupt+0xf/0xa0 ? sysvec_call_function_single+0xb/0x90 ? exc_page_fault+0x74/0x130 ? asm_exc_page_fault+0x22/0x30 ? net_dim+0x23/0x190 ? mlx5e_poll_ico_cq+0x41/0x6f0 [mlx5_core] ? sysvec_apic_timer_interrupt+0xb/0x90 mlx5e_handle_rx_dim+0x92/0xd0 [mlx5_core] mlx5e_napi_poll+0x2cd/0xac0 [mlx5_core] ? mlx5e_poll_ico_cq+0xe5/0x6f0 [mlx5_core] busy_poll_stop+0xa2/0x200 ? mlx5e_napi_poll+0x1d9/0xac0 [mlx5_core] ? mlx5e_trigger_irq+0x130/0x130 [mlx5_core] __napi_busy_loop+0x345/0x3b0 ? sysvec_call_function_single+0xb/0x90 ? asm_sysvec_call_function_single+0x16/0x20 ? sysvec_apic_timer_interrupt+0xb/0x90 ? pcpu_free_area+0x1e4/0x2e0 napi_busy_loop+0x11/0x20 xsk_recvmsg+0x10c/0x130 sock_recvmsg+0x44/0x70 __sys_recvfrom+0xbc/0x130 ? __schedule+0x398/0x890 __x64_sys_recvfrom+0x20/0x30 do_syscall_64+0x4c/0x100 entry_SYSCALL_64_after_hwframe+0x4b/0x53 ... ---[ end trace 0000000000000000 ]--- ... ---[ end Kernel panic - not syncing: Fatal exception in interrupt ]--- Fixes: 445a25f6e1a2 ("net/mlx5e: Support updating coalescing configuration without resetting channels") Signed-off-by: Carolina Jubran Reviewed-by: Cosmin Ratiu Signed-off-by: Tariq Toukan Reviewed-by: Jacob Keller Link: https://patch.msgid.link/1752155624-24095-3-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_dim.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 298bb74ec5e9..d1d629697e28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); } else { __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); - + synchronize_net(); mlx5e_dim_disable(rq->dim); rq->dim = NULL; } @@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); } else { __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); - + synchronize_net(); mlx5e_dim_disable(sq->dim); sq->dim = NULL; } From 4c9fce56fa702059bbc5ab737265b68f79cbaac4 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Thu, 10 Jul 2025 16:53:44 +0300 Subject: [PATCH 222/234] net/mlx5e: Add new prio for promiscuous mode An optimization for promiscuous mode adds a high-priority steering table with a single catch-all rule to steer all traffic directly to the TTC table. However, a gap exists between the creation of this table and the insertion of the catch-all rule. Packets arriving in this brief window would miss as no rule was inserted yet, unnecessarily incrementing the 'rx_steer_missed_packets' counter and dropped. This patch resolves the issue by introducing a new prio for this table, placing it between MLX5E_TC_PRIO and MLX5E_NIC_PRIO. By doing so, packets arriving during the window now fall through to the next prio (at MLX5E_NIC_PRIO) instead of being dropped. Fixes: 1c46d7409f30 ("net/mlx5e: Optimize promiscuous mode") Signed-off-by: Jianbo Liu Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Reviewed-by: Jacob Keller Link: https://patch.msgid.link/1752155624-24095-4-git-send-email-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 9 +++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 13 +++++++++---- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index b5c3a2a9d2a5..9560fcba643f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -18,7 +18,8 @@ enum { enum { MLX5E_TC_PRIO = 0, - MLX5E_NIC_PRIO + MLX5E_PROMISC_PRIO, + MLX5E_NIC_PRIO, }; struct mlx5e_flow_table { @@ -68,9 +69,13 @@ struct mlx5e_l2_table { MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) -/* NIC prio FTS */ +/* NIC promisc FT level */ enum { MLX5E_PROMISC_FT_LEVEL, +}; + +/* NIC prio FTS */ +enum { MLX5E_VLAN_FT_LEVEL, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 04a969128161..265c4ca85f7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -780,7 +780,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs) ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE; ft_attr.autogroup.max_num_groups = 1; ft_attr.level = MLX5E_PROMISC_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.prio = MLX5E_PROMISC_PRIO; ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr); if (IS_ERR(ft->t)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a8046200d376..3dd9a6f40709 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -113,13 +113,16 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, +/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 11 +#define KERNEL_NIC_PRIO_NUM_LEVELS 10 #define KERNEL_NIC_NUM_PRIOS 1 -/* One more level for tc */ -#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) +/* One more level for tc, and one more for promisc */ +#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2) + +#define KERNEL_NIC_PROMISC_NUM_PRIOS 1 +#define KERNEL_NIC_PROMISC_NUM_LEVELS 1 #define KERNEL_NIC_TC_NUM_PRIOS 1 #define KERNEL_NIC_TC_NUM_LEVELS 3 @@ -187,6 +190,8 @@ static struct init_tree_node { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), + ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS, + KERNEL_NIC_PROMISC_NUM_LEVELS), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, From e81750b4e3826fedce7362dad839cb40384d60ae Mon Sep 17 00:00:00 2001 From: Alok Tiwari Date: Thu, 10 Jul 2025 11:06:17 -0700 Subject: [PATCH 223/234] net: ll_temac: Fix missing tx_pending check in ethtools_set_ringparam() The function ll_temac_ethtools_set_ringparam() incorrectly checked rx_pending twice, once correctly for RX and once mistakenly in place of tx_pending. This caused tx_pending to be left unchecked against TX_BD_NUM_MAX. As a result, invalid TX ring sizes may have been accepted or valid ones wrongly rejected based on the RX limit, leading to potential misconfiguration or unexpected results. This patch corrects the condition to properly validate tx_pending. Fixes: f7b261bfc35e ("net: ll_temac: Make RX/TX ring sizes configurable") Signed-off-by: Alok Tiwari Link: https://patch.msgid.link/20250710180621.2383000-1-alok.a.tiwari@oracle.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/xilinx/ll_temac_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index edb36ff07a0c..6f82203a414c 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) From b74c2a2e9cc471e847abd87e50a2354c07e02040 Mon Sep 17 00:00:00 2001 From: Shravya KN Date: Thu, 10 Jul 2025 14:39:36 -0700 Subject: [PATCH 224/234] bnxt_en: Fix DCB ETS validation In bnxt_ets_validate(), the code incorrectly loops over all possible traffic classes to check and add the ETS settings. Fix it to loop over the configured traffic classes only. The unconfigured traffic classes will default to TSA_ETS with 0 bandwidth. Looping over these unconfigured traffic classes may cause the validation to fail and trigger this error message: "rejecting ETS config starving a TC\n" The .ieee_setets() will then fail. Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.") Reviewed-by: Sreekanth Reddy Signed-off-by: Shravya KN Signed-off-by: Michael Chan Link: https://patch.msgid.link/20250710213938.1959625-2-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 0dbb880a7aa0..71e14be2507e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc) return -EINVAL; + } + for (i = 0; i < max_tc; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: break; From 100c08c89d173b7fdf953e7d9f9ca8f69f80d1c5 Mon Sep 17 00:00:00 2001 From: Shruti Parab Date: Thu, 10 Jul 2025 14:39:37 -0700 Subject: [PATCH 225/234] bnxt_en: Flush FW trace before copying to the coredump bnxt_fill_drv_seg_record() calls bnxt_dbg_hwrm_log_buffer_flush() to flush the FW trace buffer. This needs to be done before we call bnxt_copy_ctx_mem() to copy the trace data. Without this fix, the coredump may not contain all the FW traces. Fixes: 3c2179e66355 ("bnxt_en: Add FW trace coredump segments to the coredump") Reviewed-by: Kalesh AP Signed-off-by: Shruti Parab Signed-off-by: Michael Chan Link: https://patch.msgid.link/20250710213938.1959625-3-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/broadcom/bnxt/bnxt_coredump.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index ce97befd3cb3..67e70d3d0980 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -368,23 +368,27 @@ static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset, if (!ctxm->mem_valid || !seg_id) continue; - if (trace) + if (trace) { extra_hlen = BNXT_SEG_RCD_LEN; + if (buf) { + u16 trace_type = bnxt_bstore_to_trace[type]; + + bnxt_fill_drv_seg_record(bp, &record, ctxm, + trace_type); + } + } + if (buf) data = buf + BNXT_SEG_HDR_LEN + extra_hlen; + seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen; if (buf) { bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len, 0, 0, 0, comp_id, seg_id); memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN); buf += BNXT_SEG_HDR_LEN; - if (trace) { - u16 trace_type = bnxt_bstore_to_trace[type]; - - bnxt_fill_drv_seg_record(bp, &record, ctxm, - trace_type); + if (trace) memcpy(buf, &record, BNXT_SEG_RCD_LEN); - } buf += seg_len; } len += BNXT_SEG_HDR_LEN + seg_len; From 3cdf199d4755d477972ee87110b2aebc88b3cfad Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Thu, 10 Jul 2025 14:39:38 -0700 Subject: [PATCH 226/234] bnxt_en: Set DMA unmap len correctly for XDP_REDIRECT When transmitting an XDP_REDIRECT packet, call dma_unmap_len_set() with the proper length instead of 0. This bug triggers this warning on a system with IOMMU enabled: WARNING: CPU: 36 PID: 0 at drivers/iommu/dma-iommu.c:842 __iommu_dma_unmap+0x159/0x170 RIP: 0010:__iommu_dma_unmap+0x159/0x170 Code: a8 00 00 00 00 48 c7 45 b0 00 00 00 00 48 c7 45 c8 00 00 00 00 48 c7 45 a0 ff ff ff ff 4c 89 45 b8 4c 89 45 c0 e9 77 ff ff ff <0f> 0b e9 60 ff ff ff e8 8b bf 6a 00 66 66 2e 0f 1f 84 00 00 00 00 RSP: 0018:ff22d31181150c88 EFLAGS: 00010206 RAX: 0000000000002000 RBX: 00000000e13a0000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ff22d31181150cf0 R08: ff22d31181150ca8 R09: 0000000000000000 R10: 0000000000000000 R11: ff22d311d36c9d80 R12: 0000000000001000 R13: ff13544d10645010 R14: ff22d31181150c90 R15: ff13544d0b2bac00 FS: 0000000000000000(0000) GS:ff13550908a00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00005be909dacff8 CR3: 0008000173408003 CR4: 0000000000f71ef0 PKRU: 55555554 Call Trace: ? show_regs+0x6d/0x80 ? __warn+0x89/0x160 ? __iommu_dma_unmap+0x159/0x170 ? report_bug+0x17e/0x1b0 ? handle_bug+0x46/0x90 ? exc_invalid_op+0x18/0x80 ? asm_exc_invalid_op+0x1b/0x20 ? __iommu_dma_unmap+0x159/0x170 ? __iommu_dma_unmap+0xb3/0x170 iommu_dma_unmap_page+0x4f/0x100 dma_unmap_page_attrs+0x52/0x220 ? srso_alias_return_thunk+0x5/0xfbef5 ? xdp_return_frame+0x2e/0xd0 bnxt_tx_int_xdp+0xdf/0x440 [bnxt_en] __bnxt_poll_work_done+0x81/0x1e0 [bnxt_en] bnxt_poll+0xd3/0x1e0 [bnxt_en] Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support") Signed-off-by: Somnath Kotur Signed-off-by: Michael Chan Link: https://patch.msgid.link/20250710213938.1959625-4-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 4a6d8cb9f970..09e7e8efa6fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, tx_buf->action = XDP_REDIRECT; tx_buf->xdpf = xdpf; dma_unmap_addr_set(tx_buf, mapping, mapping); - dma_unmap_len_set(tx_buf, len, 0); + dma_unmap_len_set(tx_buf, len, len); } void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) From a3c4a125ec725cefb40047eb05ff9eafd57830b4 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 11 Jul 2025 05:32:07 +0000 Subject: [PATCH 227/234] netlink: Fix rmem check in netlink_broadcast_deliver(). We need to allow queuing at least one skb even when skb is larger than sk->sk_rcvbuf. The cited commit made a mistake while converting a condition in netlink_broadcast_deliver(). Let's correct the rmem check for the allow-one-skb rule. Fixes: ae8f160e7eb24 ("netlink: Fix wraparounds of sk->sk_rmem_alloc.") Signed-off-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250711053208.2965945-1-kuniyu@google.com Signed-off-by: Jakub Kicinski --- net/netlink/af_netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 79fbaf7333ce..2d107a8a75d9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1395,7 +1395,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); rcvbuf = READ_ONCE(sk->sk_rcvbuf); - if ((rmem != skb->truesize || rmem <= rcvbuf) && + if ((rmem == skb->truesize || rmem <= rcvbuf) && !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { netlink_skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); From a215b5723922f8099078478122f02100e489cb80 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 10 Jul 2025 17:11:21 -0700 Subject: [PATCH 228/234] netlink: make sure we allow at least one dump skb Commit under Fixes tightened up the memory accounting for Netlink sockets. Looks like the accounting is too strict for some existing use cases, Marek reported issues with nl80211 / WiFi iw CLI. To reduce number of iterations Netlink dumps try to allocate messages based on the size of the buffer passed to previous recvmsg() calls. If user space uses a larger buffer in recvmsg() than sk_rcvbuf we will allocate an skb we won't be able to queue. Make sure we always allow at least one skb to be queued. Same workaround is already present in netlink_attachskb(). Alternative would be to cap the allocation size to rcvbuf - rmem_alloc but as I said, the workaround is already present in other places. Reported-by: Marek Szyprowski Link: https://lore.kernel.org/9794af18-4905-46c6-b12c-365ea2f05858@samsung.com Fixes: ae8f160e7eb2 ("netlink: Fix wraparounds of sk->sk_rmem_alloc.") Tested-by: Marek Szyprowski Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250711001121.3649033-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- net/netlink/af_netlink.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2d107a8a75d9..6332a0e06596 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2258,11 +2258,11 @@ static int netlink_dump(struct sock *sk, bool lock_taken) struct netlink_ext_ack extack = {}; struct netlink_callback *cb; struct sk_buff *skb = NULL; + unsigned int rmem, rcvbuf; size_t max_recvmsg_len; struct module *module; int err = -ENOBUFS; int alloc_min_size; - unsigned int rmem; int alloc_size; if (!lock_taken) @@ -2294,8 +2294,9 @@ static int netlink_dump(struct sock *sk, bool lock_taken) if (!skb) goto errout_skb; + rcvbuf = READ_ONCE(sk->sk_rcvbuf); rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc); - if (rmem >= READ_ONCE(sk->sk_rcvbuf)) { + if (rmem != skb->truesize && rmem >= rcvbuf) { atomic_sub(skb->truesize, &sk->sk_rmem_alloc); goto errout_skb; } From b44686c8391b427fb1c85a31c35077e6947c6d90 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sat, 12 Jul 2025 03:58:26 +0800 Subject: [PATCH 229/234] erofs: fix large fragment handling Fragments aren't limited by Z_EROFS_PCLUSTER_MAX_DSIZE. However, if a fragment's logical length is larger than Z_EROFS_PCLUSTER_MAX_DSIZE but the fragment is not the whole inode, it currently returns -EOPNOTSUPP because m_flags has the wrong EROFS_MAP_ENCODED flag set. It is not intended by design but should be rare, as it can only be reproduced by mkfs with `-Eall-fragments` in a specific case. Let's normalize fragment m_flags using the new EROFS_MAP_FRAGMENT. Reported-by: Axel Fontaine Closes: https://github.com/erofs/erofs-utils/issues/23 Fixes: 7c3ca1838a78 ("erofs: restrict pcluster size limitations") Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20250711195826.3601157-1-hsiangkao@linux.alibaba.com --- fs/erofs/internal.h | 4 +++- fs/erofs/zdata.c | 2 +- fs/erofs/zmap.c | 9 ++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 0d19bde8c094..06b867d2fc3b 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -315,10 +315,12 @@ static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, /* The length of extent is full */ #define EROFS_MAP_FULL_MAPPED 0x0008 /* Located in the special packed inode */ -#define EROFS_MAP_FRAGMENT 0x0010 +#define __EROFS_MAP_FRAGMENT 0x0010 /* The extent refers to partial decompressed data */ #define EROFS_MAP_PARTIAL_REF 0x0020 +#define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT) + struct erofs_map_blocks { struct erofs_buf buf; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 32e3905e75fe..e3f28a1bb945 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1034,7 +1034,7 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f, if (!(map->m_flags & EROFS_MAP_MAPPED)) { folio_zero_segment(folio, cur, end); tight = false; - } else if (map->m_flags & EROFS_MAP_FRAGMENT) { + } else if (map->m_flags & __EROFS_MAP_FRAGMENT) { erofs_off_t fpos = offset + cur - map->m_la; err = z_erofs_read_fragment(inode->i_sb, folio, cur, diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 0bebc6e3a4d7..f1a15ff22147 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -413,8 +413,7 @@ static int z_erofs_map_blocks_fo(struct inode *inode, !vi->z_tailextent_headlcn) { map->m_la = 0; map->m_llen = inode->i_size; - map->m_flags = EROFS_MAP_MAPPED | - EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT; + map->m_flags = EROFS_MAP_FRAGMENT; return 0; } initial_lcn = ofs >> lclusterbits; @@ -489,7 +488,7 @@ static int z_erofs_map_blocks_fo(struct inode *inode, goto unmap_out; } } else if (fragment && m.lcn == vi->z_tailextent_headlcn) { - map->m_flags |= EROFS_MAP_FRAGMENT; + map->m_flags = EROFS_MAP_FRAGMENT; } else { map->m_pa = erofs_pos(sb, m.pblk); err = z_erofs_get_extent_compressedlen(&m, initial_lcn); @@ -617,7 +616,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode, if (lstart < lend) { map->m_la = lstart; if (last && (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER)) { - map->m_flags |= EROFS_MAP_MAPPED | EROFS_MAP_FRAGMENT; + map->m_flags = EROFS_MAP_FRAGMENT; vi->z_fragmentoff = map->m_plen; if (recsz > offsetof(struct z_erofs_extent, pstart_lo)) vi->z_fragmentoff |= map->m_pa << 32; @@ -797,7 +796,7 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset, iomap->length = map.m_llen; if (map.m_flags & EROFS_MAP_MAPPED) { iomap->type = IOMAP_MAPPED; - iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ? + iomap->addr = map.m_flags & __EROFS_MAP_FRAGMENT ? IOMAP_NULL_ADDR : map.m_pa; } else { iomap->type = IOMAP_HOLE; From 5f02b80c21e1511c32a37f642497751041069076 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 11 Jul 2025 17:10:32 -0700 Subject: [PATCH 230/234] Revert "eventpoll: Fix priority inversion problem" This reverts commit 8c44dac8add7503c345c0f6c7962e4863b88ba42. I haven't figured out what the actual bug in this commit is, but I did spend a lot of time chasing it down and eventually succeeded in bisecting it down to this. For some reason, this eventpoll commit ends up causing delays and stuck user space processes, but it only happens on one of my machines, and only during early boot or during the flurry of initial activity when logging in. I must be triggering some very subtle timing issue, but once I figured out the behavior pattern that made it reasonably reliable to trigger, it did bisect right to this, and reverting the commit fixes the problem. Of course, that was only after I had failed at bisecting it several times, and had flailed around blaming both the drm people and the netlink people for the odd problems. The most obvious of which happened at the time of the first graphical login (the most common symptom being that some gnome app aborted due to a 30s timeout, often leading to the whole session then failing if it was some critical component like gnome-shell or similar). Acked-by: Nam Cao Cc: Frederic Weisbecker Cc: Valentin Schneider Cc: Christian Brauner Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 458 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 324 insertions(+), 134 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 895256cd2786..0fbf5dfedb24 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -137,7 +137,13 @@ struct epitem { }; /* List header used to link this structure to the eventpoll ready list */ - struct llist_node rdllink; + struct list_head rdllink; + + /* + * Works together "struct eventpoll"->ovflist in keeping the + * single linked chain of items. + */ + struct epitem *next; /* The file descriptor information this item refers to */ struct epoll_filefd ffd; @@ -185,15 +191,22 @@ struct eventpoll { /* Wait queue used by file->poll() */ wait_queue_head_t poll_wait; - /* - * List of ready file descriptors. Adding to this list is lockless. Items can be removed - * only with eventpoll::mtx - */ - struct llist_head rdllist; + /* List of ready file descriptors */ + struct list_head rdllist; + + /* Lock which protects rdllist and ovflist */ + rwlock_t lock; /* RB tree root used to store monitored fd structs */ struct rb_root_cached rbr; + /* + * This is a single linked list that chains all the "struct epitem" that + * happened while transferring ready events to userspace w/out + * holding ->lock. + */ + struct epitem *ovflist; + /* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */ struct wakeup_source *ws; @@ -348,14 +361,10 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1, (p1->file < p2->file ? -1 : p1->fd - p2->fd)); } -/* - * Add the item to its container eventpoll's rdllist; do nothing if the item is already on rdllist. - */ -static void epitem_ready(struct epitem *epi) +/* Tells us if the item is currently linked */ +static inline int ep_is_linked(struct epitem *epi) { - if (&epi->rdllink == cmpxchg(&epi->rdllink.next, &epi->rdllink, NULL)) - llist_add(&epi->rdllink, &epi->ep->rdllist); - + return !list_empty(&epi->rdllink); } static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p) @@ -374,26 +383,13 @@ static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) * * @ep: Pointer to the eventpoll context. * - * Return: true if ready events might be available, false otherwise. + * Return: a value different than %zero if ready events are available, + * or %zero otherwise. */ -static inline bool ep_events_available(struct eventpoll *ep) +static inline int ep_events_available(struct eventpoll *ep) { - bool available; - int locked; - - locked = mutex_trylock(&ep->mtx); - if (!locked) { - /* - * The lock held and someone might have removed all items while inspecting it. The - * llist_empty() check in this case is futile. Assume that something is enqueued and - * let ep_try_send_events() figure it out. - */ - return true; - } - - available = !llist_empty(&ep->rdllist); - mutex_unlock(&ep->mtx); - return available; + return !list_empty_careful(&ep->rdllist) || + READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR; } #ifdef CONFIG_NET_RX_BUSY_POLL @@ -728,6 +724,77 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi) rcu_read_unlock(); } + +/* + * ep->mutex needs to be held because we could be hit by + * eventpoll_release_file() and epoll_ctl(). + */ +static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist) +{ + /* + * Steal the ready list, and re-init the original one to the + * empty list. Also, set ep->ovflist to NULL so that events + * happening while looping w/out locks, are not lost. We cannot + * have the poll callback to queue directly on ep->rdllist, + * because we want the "sproc" callback to be able to do it + * in a lockless way. + */ + lockdep_assert_irqs_enabled(); + write_lock_irq(&ep->lock); + list_splice_init(&ep->rdllist, txlist); + WRITE_ONCE(ep->ovflist, NULL); + write_unlock_irq(&ep->lock); +} + +static void ep_done_scan(struct eventpoll *ep, + struct list_head *txlist) +{ + struct epitem *epi, *nepi; + + write_lock_irq(&ep->lock); + /* + * During the time we spent inside the "sproc" callback, some + * other events might have been queued by the poll callback. + * We re-insert them inside the main ready-list here. + */ + for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL; + nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { + /* + * We need to check if the item is already in the list. + * During the "sproc" callback execution time, items are + * queued into ->ovflist but the "txlist" might already + * contain them, and the list_splice() below takes care of them. + */ + if (!ep_is_linked(epi)) { + /* + * ->ovflist is LIFO, so we have to reverse it in order + * to keep in FIFO. + */ + list_add(&epi->rdllink, &ep->rdllist); + ep_pm_stay_awake(epi); + } + } + /* + * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after + * releasing the lock, events will be queued in the normal way inside + * ep->rdllist. + */ + WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR); + + /* + * Quickly re-inject items left on "txlist". + */ + list_splice(txlist, &ep->rdllist); + __pm_relax(ep->ws); + + if (!list_empty(&ep->rdllist)) { + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + } + + write_unlock_irq(&ep->lock); +} + static void ep_get(struct eventpoll *ep) { refcount_inc(&ep->refcount); @@ -765,12 +832,10 @@ static void ep_free(struct eventpoll *ep) static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) { struct file *file = epi->ffd.file; - struct llist_node *put_back_last; struct epitems_head *to_free; struct hlist_head *head; - LLIST_HEAD(put_back); - lockdep_assert_held(&ep->mtx); + lockdep_assert_irqs_enabled(); /* * Removes poll wait queue hooks. @@ -802,20 +867,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) rb_erase_cached(&epi->rbn, &ep->rbr); - if (llist_on_list(&epi->rdllink)) { - put_back_last = NULL; - while (true) { - struct llist_node *n = llist_del_first(&ep->rdllist); - - if (&epi->rdllink == n || WARN_ON(!n)) - break; - if (!put_back_last) - put_back_last = n; - __llist_add(n, &put_back); - } - if (put_back_last) - llist_add_batch(put_back.first, put_back_last, &ep->rdllist); - } + write_lock_irq(&ep->lock); + if (ep_is_linked(epi)) + list_del_init(&epi->rdllink); + write_unlock_irq(&ep->lock); wakeup_source_unregister(ep_wakeup_source(epi)); /* @@ -917,9 +972,8 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth) { struct eventpoll *ep = file->private_data; - struct wakeup_source *ws; - struct llist_node *n; - struct epitem *epi; + LIST_HEAD(txlist); + struct epitem *epi, *tmp; poll_table pt; __poll_t res = 0; @@ -933,39 +987,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep * the ready list. */ mutex_lock_nested(&ep->mtx, depth); - while (true) { - n = llist_del_first_init(&ep->rdllist); - if (!n) - break; - - epi = llist_entry(n, struct epitem, rdllink); - + ep_start_scan(ep, &txlist); + list_for_each_entry_safe(epi, tmp, &txlist, rdllink) { if (ep_item_poll(epi, &pt, depth + 1)) { res = EPOLLIN | EPOLLRDNORM; - epitem_ready(epi); break; } else { /* - * We need to activate ep before deactivating epi, to prevent autosuspend - * just in case epi becomes active after ep_item_poll() above. - * - * This is similar to ep_send_events(). + * Item has been dropped into the ready list by the poll + * callback, but it's not actually ready, as far as + * caller requested events goes. We can remove it here. */ - ws = ep_wakeup_source(epi); - if (ws) { - if (ws->active) - __pm_stay_awake(ep->ws); - __pm_relax(ws); - } __pm_relax(ep_wakeup_source(epi)); - - /* Just in case epi becomes active right before __pm_relax() */ - if (unlikely(ep_item_poll(epi, &pt, depth + 1))) - ep_pm_stay_awake(epi); - - __pm_relax(ep->ws); + list_del_init(&epi->rdllink); } } + ep_done_scan(ep, &txlist); mutex_unlock(&ep->mtx); return res; } @@ -1114,10 +1151,12 @@ static int ep_alloc(struct eventpoll **pep) return -ENOMEM; mutex_init(&ep->mtx); + rwlock_init(&ep->lock); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); - init_llist_head(&ep->rdllist); + INIT_LIST_HEAD(&ep->rdllist); ep->rbr = RB_ROOT_CACHED; + ep->ovflist = EP_UNACTIVE_PTR; ep->user = get_current_user(); refcount_set(&ep->refcount, 1); @@ -1199,11 +1238,94 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, } #endif /* CONFIG_KCMP */ +/* + * Adds a new entry to the tail of the list in a lockless way, i.e. + * multiple CPUs are allowed to call this function concurrently. + * + * Beware: it is necessary to prevent any other modifications of the + * existing list until all changes are completed, in other words + * concurrent list_add_tail_lockless() calls should be protected + * with a read lock, where write lock acts as a barrier which + * makes sure all list_add_tail_lockless() calls are fully + * completed. + * + * Also an element can be locklessly added to the list only in one + * direction i.e. either to the tail or to the head, otherwise + * concurrent access will corrupt the list. + * + * Return: %false if element has been already added to the list, %true + * otherwise. + */ +static inline bool list_add_tail_lockless(struct list_head *new, + struct list_head *head) +{ + struct list_head *prev; + + /* + * This is simple 'new->next = head' operation, but cmpxchg() + * is used in order to detect that same element has been just + * added to the list from another CPU: the winner observes + * new->next == new. + */ + if (!try_cmpxchg(&new->next, &new, head)) + return false; + + /* + * Initially ->next of a new element must be updated with the head + * (we are inserting to the tail) and only then pointers are atomically + * exchanged. XCHG guarantees memory ordering, thus ->next should be + * updated before pointers are actually swapped and pointers are + * swapped before prev->next is updated. + */ + + prev = xchg(&head->prev, new); + + /* + * It is safe to modify prev->next and new->prev, because a new element + * is added only to the tail and new->next is updated before XCHG. + */ + + prev->next = new; + new->prev = prev; + + return true; +} + +/* + * Chains a new epi entry to the tail of the ep->ovflist in a lockless way, + * i.e. multiple CPUs are allowed to call this function concurrently. + * + * Return: %false if epi element has been already chained, %true otherwise. + */ +static inline bool chain_epi_lockless(struct epitem *epi) +{ + struct eventpoll *ep = epi->ep; + + /* Fast preliminary check */ + if (epi->next != EP_UNACTIVE_PTR) + return false; + + /* Check that the same epi has not been just chained from another CPU */ + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) + return false; + + /* Atomically exchange tail */ + epi->next = xchg(&ep->ovflist, epi); + + return true; +} + /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they * have events to report. * + * This callback takes a read lock in order not to contend with concurrent + * events from another file descriptor, thus all modifications to ->rdllist + * or ->ovflist are lockless. Read lock is paired with the write lock from + * ep_start/done_scan(), which stops all list modifications and guarantees + * that lists state is seen correctly. + * * Another thing worth to mention is that ep_poll_callback() can be called * concurrently for the same @epi from different CPUs if poll table was inited * with several wait queues entries. Plural wakeup from different CPUs of a @@ -1213,11 +1335,15 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, */ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { + int pwake = 0; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; __poll_t pollflags = key_to_poll(key); + unsigned long flags; int ewake = 0; + read_lock_irqsave(&ep->lock, flags); + ep_set_busy_poll_napi_id(epi); /* @@ -1227,7 +1353,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * until the next EPOLL_CTL_MOD will be issued. */ if (!(epi->event.events & ~EP_PRIVATE_BITS)) - goto out; + goto out_unlock; /* * Check the events coming with the callback. At this stage, not @@ -1236,10 +1362,22 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * test for "key" != NULL before the event match test. */ if (pollflags && !(pollflags & epi->event.events)) - goto out; + goto out_unlock; - ep_pm_stay_awake_rcu(epi); - epitem_ready(epi); + /* + * If we are transferring events to userspace, we can hold no locks + * (because we're accessing user memory, and because of linux f_op->poll() + * semantics). All the events that happen during that period of time are + * chained in ep->ovflist and requeued later on. + */ + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { + if (chain_epi_lockless(epi)) + ep_pm_stay_awake_rcu(epi); + } else if (!ep_is_linked(epi)) { + /* In the usual case, add event to ready list. */ + if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) + ep_pm_stay_awake_rcu(epi); + } /* * Wake up ( if active ) both the eventpoll wait list and the ->poll() @@ -1268,9 +1406,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v wake_up(&ep->wq); } if (waitqueue_active(&ep->poll_wait)) + pwake++; + +out_unlock: + read_unlock_irqrestore(&ep->lock, flags); + + /* We have to call this outside the lock */ + if (pwake) ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE); -out: if (!(epi->event.events & EPOLLEXCLUSIVE)) ewake = 1; @@ -1515,6 +1659,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, if (is_file_epoll(tfile)) tep = tfile->private_data; + lockdep_assert_irqs_enabled(); + if (unlikely(percpu_counter_compare(&ep->user->epoll_watches, max_user_watches) >= 0)) return -ENOSPC; @@ -1526,10 +1672,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, } /* Item initialization follow here ... */ - init_llist_node(&epi->rdllink); + INIT_LIST_HEAD(&epi->rdllink); epi->ep = ep; ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; + epi->next = EP_UNACTIVE_PTR; if (tep) mutex_lock_nested(&tep->mtx, 1); @@ -1596,13 +1743,16 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, return -ENOMEM; } + /* We have to drop the new item inside our item list to keep track of it */ + write_lock_irq(&ep->lock); + /* record NAPI ID of new item if present */ ep_set_busy_poll_napi_id(epi); /* If the file is already "ready" we drop it inside the ready list */ - if (revents) { + if (revents && !ep_is_linked(epi)) { + list_add_tail(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); - epitem_ready(epi); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) @@ -1611,6 +1761,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, pwake++; } + write_unlock_irq(&ep->lock); + /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(ep, NULL, 0); @@ -1625,8 +1777,11 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, static int ep_modify(struct eventpoll *ep, struct epitem *epi, const struct epoll_event *event) { + int pwake = 0; poll_table pt; + lockdep_assert_irqs_enabled(); + init_poll_funcptr(&pt, NULL); /* @@ -1670,16 +1825,24 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, * list, push it inside. */ if (ep_item_poll(epi, &pt, 1)) { - ep_pm_stay_awake(epi); - epitem_ready(epi); + write_lock_irq(&ep->lock); + if (!ep_is_linked(epi)) { + list_add_tail(&epi->rdllink, &ep->rdllist); + ep_pm_stay_awake(epi); - /* Notify waiting tasks that events are available */ - if (waitqueue_active(&ep->wq)) - wake_up(&ep->wq); - if (waitqueue_active(&ep->poll_wait)) - ep_poll_safewake(ep, NULL, 0); + /* Notify waiting tasks that events are available */ + if (waitqueue_active(&ep->wq)) + wake_up(&ep->wq); + if (waitqueue_active(&ep->poll_wait)) + pwake++; + } + write_unlock_irq(&ep->lock); } + /* We have to call this outside the lock */ + if (pwake) + ep_poll_safewake(ep, NULL, 0); + return 0; } @@ -1687,7 +1850,7 @@ static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) { struct epitem *epi, *tmp; - LLIST_HEAD(txlist); + LIST_HEAD(txlist); poll_table pt; int res = 0; @@ -1702,18 +1865,19 @@ static int ep_send_events(struct eventpoll *ep, init_poll_funcptr(&pt, NULL); mutex_lock(&ep->mtx); + ep_start_scan(ep, &txlist); - while (res < maxevents) { + /* + * We can loop without lock because we are passed a task private list. + * Items cannot vanish during the loop we are holding ep->mtx. + */ + list_for_each_entry_safe(epi, tmp, &txlist, rdllink) { struct wakeup_source *ws; - struct llist_node *n; __poll_t revents; - n = llist_del_first(&ep->rdllist); - if (!n) + if (res >= maxevents) break; - epi = llist_entry(n, struct epitem, rdllink); - /* * Activate ep->ws before deactivating epi->ws to prevent * triggering auto-suspend here (in case we reactive epi->ws @@ -1730,30 +1894,21 @@ static int ep_send_events(struct eventpoll *ep, __pm_relax(ws); } + list_del_init(&epi->rdllink); + /* * If the event mask intersect the caller-requested one, * deliver the event to userspace. Again, we are holding ep->mtx, * so no operations coming from userspace can change the item. */ revents = ep_item_poll(epi, &pt, 1); - if (!revents) { - init_llist_node(n); - - /* - * Just in case epi becomes ready after ep_item_poll() above, but before - * init_llist_node(). Make sure to add it to the ready list, otherwise an - * event may be lost. - */ - if (unlikely(ep_item_poll(epi, &pt, 1))) { - ep_pm_stay_awake(epi); - epitem_ready(epi); - } + if (!revents) continue; - } events = epoll_put_uevent(revents, epi->event.data, events); if (!events) { - llist_add(&epi->rdllink, &ep->rdllist); + list_add(&epi->rdllink, &txlist); + ep_pm_stay_awake(epi); if (!res) res = -EFAULT; break; @@ -1761,31 +1916,25 @@ static int ep_send_events(struct eventpoll *ep, res++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; - __llist_add(n, &txlist); - } - - llist_for_each_entry_safe(epi, tmp, txlist.first, rdllink) { - init_llist_node(&epi->rdllink); - - if (!(epi->event.events & EPOLLET)) { + else if (!(epi->event.events & EPOLLET)) { /* - * If this file has been added with Level Trigger mode, we need to insert - * back inside the ready list, so that the next call to epoll_wait() will - * check again the events availability. + * If this file has been added with Level + * Trigger mode, we need to insert back inside + * the ready list, so that the next call to + * epoll_wait() will check again the events + * availability. At this point, no one can insert + * into ep->rdllist besides us. The epoll_ctl() + * callers are locked out by + * ep_send_events() holding "mtx" and the + * poll callback will queue them in ep->ovflist. */ + list_add_tail(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); - epitem_ready(epi); } } - - __pm_relax(ep->ws); + ep_done_scan(ep, &txlist); mutex_unlock(&ep->mtx); - if (!llist_empty(&ep->rdllist)) { - if (waitqueue_active(&ep->wq)) - wake_up(&ep->wq); - } - return res; } @@ -1878,6 +2027,8 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, wait_queue_entry_t wait; ktime_t expires, *to = NULL; + lockdep_assert_irqs_enabled(); + if (timeout && (timeout->tv_sec | timeout->tv_nsec)) { slack = select_estimate_accuracy(timeout); to = &expires; @@ -1937,15 +2088,54 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, init_wait(&wait); wait.func = ep_autoremove_wake_function; - prepare_to_wait_exclusive(&ep->wq, &wait, TASK_INTERRUPTIBLE); + write_lock_irq(&ep->lock); + /* + * Barrierless variant, waitqueue_active() is called under + * the same lock on wakeup ep_poll_callback() side, so it + * is safe to avoid an explicit barrier. + */ + __set_current_state(TASK_INTERRUPTIBLE); - if (!ep_events_available(ep)) + /* + * Do the final check under the lock. ep_start/done_scan() + * plays with two lists (->rdllist and ->ovflist) and there + * is always a race when both lists are empty for short + * period of time although events are pending, so lock is + * important. + */ + eavail = ep_events_available(ep); + if (!eavail) + __add_wait_queue_exclusive(&ep->wq, &wait); + + write_unlock_irq(&ep->lock); + + if (!eavail) timed_out = !ep_schedule_timeout(to) || !schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS); + __set_current_state(TASK_RUNNING); - finish_wait(&ep->wq, &wait); - eavail = ep_events_available(ep); + /* + * We were woken up, thus go and try to harvest some events. + * If timed out and still on the wait queue, recheck eavail + * carefully under lock, below. + */ + eavail = 1; + + if (!list_empty_careful(&wait.entry)) { + write_lock_irq(&ep->lock); + /* + * If the thread timed out and is not on the wait queue, + * it means that the thread was woken up after its + * timeout expired before it could reacquire the lock. + * Thus, when wait.entry is empty, it needs to harvest + * events. + */ + if (timed_out) + eavail = list_empty(&wait.entry); + __remove_wait_queue(&ep->wq, &wait); + write_unlock_irq(&ep->lock); + } } } From 347e9f5043c89695b01e66b3ed111755afcf1911 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 13 Jul 2025 14:25:58 -0700 Subject: [PATCH 231/234] Linux 6.16-rc6 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7eea2a41c905..c09766beb7ef 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Baby Opossum Posse # *DOCUMENTATION* From 69d5b62c4bded309332add0fac6760239ff47a68 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 10 Jul 2025 20:40:01 +0800 Subject: [PATCH 232/234] ASoC: codec: tlv320aic32x4: Drop aic32x4_pdata usage There is no machine is using aic32x4_pdata as platform_data, so remove the dead code. Cc: Markus Niebel Cc: Alexander Stein Reviewed-by: Alexander Stein Signed-off-by: Peng Fan Reviewed-by: Linus Walleij Link: https://patch.msgid.link/20250710-asoc-gpio-1-v2-1-2233b272a1a6@nxp.com Signed-off-by: Mark Brown --- include/sound/tlv320aic32x4.h | 9 --------- sound/soc/codecs/tlv320aic32x4.c | 9 +-------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/include/sound/tlv320aic32x4.h b/include/sound/tlv320aic32x4.h index 0abf74d7edbd..b779d671a995 100644 --- a/include/sound/tlv320aic32x4.h +++ b/include/sound/tlv320aic32x4.h @@ -40,13 +40,4 @@ struct aic32x4_setup_data { unsigned int gpio_func[5]; }; - -struct aic32x4_pdata { - struct aic32x4_setup_data *setup; - u32 power_cfg; - u32 micpga_routing; - bool swapdacs; - int rstn_gpio; -}; - #endif diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index 54ea4bc58c27..7dbcf7f7130b 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -1346,7 +1346,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap, enum aic32x4_type type) { struct aic32x4_priv *aic32x4; - struct aic32x4_pdata *pdata = dev->platform_data; struct device_node *np = dev->of_node; int ret; @@ -1363,13 +1362,7 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap, dev_set_drvdata(dev, aic32x4); - if (pdata) { - aic32x4->power_cfg = pdata->power_cfg; - aic32x4->swapdacs = pdata->swapdacs; - aic32x4->micpga_routing = pdata->micpga_routing; - aic32x4->rstn_gpio = pdata->rstn_gpio; - aic32x4->mclk_name = "mclk"; - } else if (np) { + if (np) { ret = aic32x4_parse_dt(aic32x4, np); if (ret) { dev_err(dev, "Failed to parse DT node\n"); From b709c1aef5e15db3aff5749fc7ed9c61b8d0a322 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 10 Jul 2025 20:40:02 +0800 Subject: [PATCH 233/234] ASoC: codec: tlv320aic32x4: Sort headers alphabetically Sort headers alphabetically to easily insert new ones and drop unused ones. Signed-off-by: Peng Fan Reviewed-by: Alexander Stein Link: https://patch.msgid.link/20250710-asoc-gpio-1-v2-2-2233b272a1a6@nxp.com Signed-off-by: Mark Brown --- sound/soc/codecs/tlv320aic32x4.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index 7dbcf7f7130b..2f4147387c4f 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -9,27 +9,27 @@ * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27. */ +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include #include +#include -#include #include +#include #include #include #include #include -#include #include +#include #include "tlv320aic32x4.h" From 790d5f8ee6f2a27686d042abbce16b4e03ac1608 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 10 Jul 2025 20:40:03 +0800 Subject: [PATCH 234/234] ASoC: codec: tlv320aic32x4: Convert to GPIO descriptors of_gpio.h is deprecated, update the driver to use GPIO descriptors. - Use devm_gpiod_get_optional to get GPIO descriptor, and set consumer name. - Use gpiod_set_value to configure output value. While at here, reorder the included headers. Checking the DTS that use the device, all are using GPIOD_ACTIVE_LOW polarity for reset-gpios, so all should work as expected with this patch. Cc: Markus Niebel Cc: Alexander Stein Reviewed-by: Linus Walleij Tested-by: Alexander Stein Signed-off-by: Peng Fan Link: https://patch.msgid.link/20250710-asoc-gpio-1-v2-3-2233b272a1a6@nxp.com Signed-off-by: Mark Brown --- sound/soc/codecs/tlv320aic32x4.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c index 2f4147387c4f..3b89980e9bcf 100644 --- a/sound/soc/codecs/tlv320aic32x4.c +++ b/sound/soc/codecs/tlv320aic32x4.c @@ -12,12 +12,11 @@ #include #include #include -#include +#include #include #include #include #include -#include #include #include #include @@ -38,7 +37,7 @@ struct aic32x4_priv { u32 power_cfg; u32 micpga_routing; bool swapdacs; - int rstn_gpio; + struct gpio_desc *rstn_gpio; const char *mclk_name; struct regulator *supply_ldo; @@ -1236,7 +1235,14 @@ static int aic32x4_parse_dt(struct aic32x4_priv *aic32x4, aic32x4->swapdacs = false; aic32x4->micpga_routing = 0; - aic32x4->rstn_gpio = of_get_named_gpio(np, "reset-gpios", 0); + /* Assert reset using GPIOD_OUT_HIGH, because reset is GPIO_ACTIVE_LOW */ + aic32x4->rstn_gpio = devm_gpiod_get_optional(aic32x4->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(aic32x4->rstn_gpio)) { + return dev_err_probe(aic32x4->dev, PTR_ERR(aic32x4->rstn_gpio), + "Failed to get reset gpio\n"); + } else { + gpiod_set_consumer_name(aic32x4->rstn_gpio, "tlv320aic32x4_rstn"); + } if (of_property_read_u32_array(np, "aic32x4-gpio-func", aic32x4_setup->gpio_func, 5) >= 0) @@ -1372,26 +1378,20 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap, aic32x4->power_cfg = 0; aic32x4->swapdacs = false; aic32x4->micpga_routing = 0; - aic32x4->rstn_gpio = -1; + aic32x4->rstn_gpio = NULL; aic32x4->mclk_name = "mclk"; } - if (gpio_is_valid(aic32x4->rstn_gpio)) { - ret = devm_gpio_request_one(dev, aic32x4->rstn_gpio, - GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn"); - if (ret != 0) - return ret; - } - ret = aic32x4_setup_regulators(dev, aic32x4); if (ret) { dev_err(dev, "Failed to setup regulators\n"); return ret; } - if (gpio_is_valid(aic32x4->rstn_gpio)) { + if (!aic32x4->rstn_gpio) { ndelay(10); - gpio_set_value_cansleep(aic32x4->rstn_gpio, 1); + /* deassert reset */ + gpiod_set_value_cansleep(aic32x4->rstn_gpio, 0); mdelay(1); }