From b27801189f7fc97a960a96a63b78dcabbb67a52f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 24 Feb 2026 17:36:24 +0100 Subject: [PATCH] x86: Inline TSC reads in timekeeping Avoid the overhead of the indirect call for a single instruction to read the TSC. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20260224163429.741886362@kernel.org --- arch/x86/Kconfig | 1 + arch/x86/include/asm/clock_inlined.h | 14 ++++++++++++++ arch/x86/kernel/tsc.c | 1 + 3 files changed, 16 insertions(+) create mode 100644 arch/x86/include/asm/clock_inlined.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e2df1b147184..d337d8dced86 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -141,6 +141,7 @@ config X86 select ARCH_USE_SYM_ANNOTATIONS select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_DEFAULT_BPF_JIT if X86_64 + select ARCH_WANTS_CLOCKSOURCE_READ_INLINE if X86_64 select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_NO_INSTR select ARCH_WANT_GENERAL_HUGETLB diff --git a/arch/x86/include/asm/clock_inlined.h b/arch/x86/include/asm/clock_inlined.h new file mode 100644 index 000000000000..29902c5bcc5c --- /dev/null +++ b/arch/x86/include/asm/clock_inlined.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_CLOCK_INLINED_H +#define _ASM_X86_CLOCK_INLINED_H + +#include + +struct clocksource; + +static __always_inline u64 arch_inlined_clocksource_read(struct clocksource *cs) +{ + return (u64)rdtsc_ordered(); +} + +#endif diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d9aa694e43f3..74a26fb4417c 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1201,6 +1201,7 @@ static struct clocksource clocksource_tsc = { .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES | + CLOCK_SOURCE_CAN_INLINE_READ | CLOCK_SOURCE_MUST_VERIFY | CLOCK_SOURCE_VERIFY_PERCPU, .id = CSID_X86_TSC,