mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 16:01:44 -04:00
watchdog: update saved interrupts during check
Currently, arch_touch_nmi_watchdog() causes an early return that skips updating hrtimer_interrupts_saved. This leads to stale comparisons and delayed lockup detection. I found this issue because in our system the serial console is fairly chatty. For example, the 8250 console driver frequently calls touch_nmi_watchdog() via console_write(). If a CPU locks up after a timer interrupt but before next watchdog check, we see the following sequence: * watchdog_hardlockup_check() saves counter (e.g., 1000) * Timer runs and updates the counter (1001) * touch_nmi_watchdog() is called * CPU locks up * 10s pass: check() notices touch, returns early, skips update * 10s pass: check() saves counter (1001) * 10s pass: check() finally detects lockup This delays detection to 30 seconds. With this fix, we detect the lockup in 20 seconds. Link: https://lkml.kernel.org/r/20260312-hardlockup-watchdog-fixes-v2-2-45bd8a0cc7ed@google.com Signed-off-by: Mayank Rungta <mrungta@google.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Ian Rogers <irogers@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Li Huafei <lihuafei1@huawei.com> Cc: Max Kellermann <max.kellermann@ionos.com> Cc: Shuah Khan <skhan@linuxfoundation.org> Cc: Stephane Erainan <eranian@google.com> Cc: Wang Jinchao <wangjinchao600@gmail.com> Cc: Yunhui Cui <cuiyunhui@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
3e811cae32
commit
746bb7fa91
@@ -159,21 +159,28 @@ void watchdog_hardlockup_touch_cpu(unsigned int cpu)
|
||||
per_cpu(watchdog_hardlockup_touched, cpu) = true;
|
||||
}
|
||||
|
||||
static bool is_hardlockup(unsigned int cpu)
|
||||
static void watchdog_hardlockup_update(unsigned int cpu)
|
||||
{
|
||||
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
|
||||
|
||||
if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
|
||||
* for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
|
||||
* written/read by a single CPU.
|
||||
*/
|
||||
per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
|
||||
}
|
||||
|
||||
return false;
|
||||
static bool is_hardlockup(unsigned int cpu)
|
||||
{
|
||||
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
|
||||
|
||||
if (per_cpu(hrtimer_interrupts_saved, cpu) != hrint) {
|
||||
watchdog_hardlockup_update(cpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void watchdog_hardlockup_kick(void)
|
||||
@@ -191,6 +198,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
|
||||
unsigned long flags;
|
||||
|
||||
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
|
||||
watchdog_hardlockup_update(cpu);
|
||||
per_cpu(watchdog_hardlockup_touched, cpu) = false;
|
||||
return;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user