sched: Move clock related paravirt code to kernel/sched

Paravirt clock related functions are available in multiple archs.

In order to share the common parts, move the common static keys
to kernel/sched/ and remove them from the arch specific files.

Make a common paravirt_steal_clock() implementation available in
kernel/sched/cputime.c, guarding it with a new config option
CONFIG_HAVE_PV_STEAL_CLOCK_GEN, which can be selected by an arch
in case it wants to use that common variant.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260105110520.21356-7-jgross@suse.com
This commit is contained in:
Juergen Gross
2026-01-05 12:05:05 +01:00
committed by Borislav Petkov (AMD)
parent 68b10fd40d
commit e6b2aa6d40
20 changed files with 47 additions and 40 deletions

View File

@@ -1056,6 +1056,9 @@ config HAVE_IRQ_TIME_ACCOUNTING
Archs need to ensure they use a high enough resolution clock to
support irq time accounting and then call enable_sched_clock_irqtime().
config HAVE_PV_STEAL_CLOCK_GEN
bool
config HAVE_MOVE_PUD
bool
help

View File

@@ -5,10 +5,6 @@
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);

View File

@@ -12,9 +12,6 @@
#include <linux/static_call.h>
#include <asm/paravirt.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;

View File

@@ -5,10 +5,6 @@
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);

View File

@@ -19,14 +19,12 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/static_call.h>
#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
#include <asm/smp_plat.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;

View File

@@ -5,9 +5,6 @@
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);

View File

@@ -6,11 +6,10 @@
#include <linux/kvm_para.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);

View File

@@ -23,9 +23,6 @@ static inline bool is_shared_processor(void)
}
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 pseries_paravirt_steal_clock(int cpu);
static inline u64 paravirt_steal_clock(int cpu)

View File

@@ -42,6 +42,7 @@
#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/seq_buf.h>
#include <linux/sched/cputime.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -83,9 +84,6 @@ DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL(shared_processor);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{

View File

@@ -5,10 +5,6 @@
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);

View File

@@ -16,15 +16,13 @@
#include <linux/printk.h>
#include <linux/static_call.h>
#include <linux/types.h>
#include <linux/sched/cputime.h>
#include <asm/barrier.h>
#include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/sbi.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;

View File

@@ -30,10 +30,6 @@ static __always_inline u64 paravirt_sched_clock(void)
return static_call(pv_sched_clock)();
}
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
__visible void __native_queued_spin_unlock(struct qspinlock *lock);
bool pv_is_native_spin_unlock(void);
__visible bool __native_vcpu_is_preempted(long cpu);

View File

@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
#include <linux/sched/cputime.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>

View File

@@ -30,6 +30,7 @@
#include <linux/cc_platform.h>
#include <linux/efi.h>
#include <linux/kvm_types.h>
#include <linux/sched/cputime.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>

View File

@@ -60,9 +60,6 @@ void __init native_pv_lock_init(void)
static_branch_enable(&virt_spin_lock_key);
}
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;

View File

@@ -8,6 +8,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/static_call.h>
#include <linux/sched/cputime.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>

View File

@@ -2,6 +2,7 @@
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
#include <linux/static_call_types.h>
#include <linux/sched/signal.h>
/*
@@ -180,4 +181,21 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
extern unsigned long long
task_sched_runtime(struct task_struct *task);
#ifdef CONFIG_PARAVIRT
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
return static_call(pv_steal_clock)(cpu);
}
#endif
#endif
#endif /* _LINUX_SCHED_CPUTIME_H */

View File

@@ -770,6 +770,11 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* RQ-clock updating methods:
*/
/* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
#ifdef CONFIG_PARAVIRT
struct static_key paravirt_steal_rq_enabled;
#endif
static void update_rq_clock_task(struct rq *rq, s64 delta)
{
/*

View File

@@ -251,6 +251,19 @@ void __account_forceidle_time(struct task_struct *p, u64 delta)
* ticks are not redelivered later. Due to that, this function may on
* occasion account more time than the calling functions think elapsed.
*/
#ifdef CONFIG_PARAVIRT
struct static_key paravirt_steal_enabled;
#ifdef CONFIG_HAVE_PV_STEAL_CLOCK_GEN
static u64 native_steal_clock(int cpu)
{
return 0;
}
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
#endif
#endif
static __always_inline u64 steal_account_process_time(u64 maxtime)
{
#ifdef CONFIG_PARAVIRT

View File

@@ -82,7 +82,7 @@ struct rt_rq;
struct sched_group;
struct cpuidle_state;
#ifdef CONFIG_PARAVIRT
#if defined(CONFIG_PARAVIRT) && !defined(CONFIG_HAVE_PV_STEAL_CLOCK_GEN)
# include <asm/paravirt.h>
#endif