From 4138787408aa47e9e107f28876cb59b42d78bb99 Mon Sep 17 00:00:00 2001 From: Steve Wahl Date: Mon, 27 Oct 2025 13:34:56 -0500 Subject: [PATCH 01/19] tick/sched: Limit non-timekeeper CPUs calling jiffies update On large NUMA systems, while running a test program that saturates the inter-processor and inter-NUMA links, acquiring the jiffies_lock can be very expensive. If the cpu designated to do jiffies updates (tick_do_timer_cpu) gets delayed and other cpus decide to do the jiffies update themselves, a large number of them decide to do so at the same time. The inexpensive check against tick_next_period is far quicker than actually acquiring the lock, so most of these get in line to obtain the lock. If obtaining the lock is slow enough, this spirals into the vast majority of CPUs continuously being stuck waiting for this lock, just to obtain it and find out that time has already been updated by another cpu. For example, on one random entry to kdb by manually-injected NMI, 2912 of 3840 CPUs were observed to be stuck there. To avoid this, allow only one non-timekeeper CPU to call tick_do_update_jiffies64() at any given time, resetting ts->stalled jiffies only if the jiffies update function is actually called. With this change, manually interrupting the test at most two CPUs are observed to invoke tick_do_update_jiffies64() - the timekeeper and one other. Signed-off-by: Steve Wahl Signed-off-by: Thomas Gleixner Acked-by: Shrikanth Hegde Link: https://patch.msgid.link/20251027183456.343407-1-steve.wahl@hpe.com --- kernel/time/tick-sched.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c527b421c865..3ff3eb1f90d0 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -201,6 +201,27 @@ static inline void tick_sched_flag_clear(struct tick_sched *ts, ts->flags &= ~flag; } +/* + * Allow only one non-timekeeper CPU at a time update jiffies from + * the timer tick. + * + * Returns true if update was run. + */ +static bool tick_limited_update_jiffies64(struct tick_sched *ts, ktime_t now) +{ + static atomic_t in_progress; + int inp; + + inp = atomic_read(&in_progress); + if (inp || !atomic_try_cmpxchg(&in_progress, &inp, 1)) + return false; + + if (ts->last_tick_jiffies == jiffies) + tick_do_update_jiffies64(now); + atomic_set(&in_progress, 0); + return true; +} + #define MAX_STALLED_JIFFIES 5 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) @@ -239,10 +260,11 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) ts->stalled_jiffies = 0; ts->last_tick_jiffies = READ_ONCE(jiffies); } else { - if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { - tick_do_update_jiffies64(now); - ts->stalled_jiffies = 0; - ts->last_tick_jiffies = READ_ONCE(jiffies); + if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) { + if (tick_limited_update_jiffies64(ts, now)) { + ts->stalled_jiffies = 0; + ts->last_tick_jiffies = READ_ONCE(jiffies); + } } } From 6c181b5667eea3e6564d334443536a5974190e15 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:31 +0200 Subject: [PATCH 02/19] timers/migration: Convert "while" loops to use "for" Both the "do while" and "while" loops in tmigr_setup_groups() eventually mimic the behaviour of "for" loops. Simplify accordingly. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-2-frederic@kernel.org --- kernel/time/timer_migration.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index c0c54dc5314c..1e371f1fdc86 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1642,22 +1642,23 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, static int tmigr_setup_groups(unsigned int cpu, unsigned int node) { struct tmigr_group *group, *child, **stack; - int top = 0, err = 0, i = 0; + int i, top = 0, err = 0; struct list_head *lvllist; stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); if (!stack) return -ENOMEM; - do { + for (i = 0; i < tmigr_hierarchy_levels; i++) { group = tmigr_get_group(cpu, node, i); if (IS_ERR(group)) { err = PTR_ERR(group); + i--; break; } top = i; - stack[i++] = group; + stack[i] = group; /* * When booting only less CPUs of a system than CPUs are @@ -1667,16 +1668,18 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) * be different from tmigr_hierarchy_levels, contains only a * single group. */ - if (group->parent || list_is_singular(&tmigr_level_list[i - 1])) + if (group->parent || list_is_singular(&tmigr_level_list[i])) break; + } - } while (i < tmigr_hierarchy_levels); + /* Assert single root without parent */ + if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels)) + return -EINVAL; + if (WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]))) + return -EINVAL; - /* Assert single root */ - WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top])); - - while (i > 0) { - group = stack[--i]; + for (; i >= 0; i--) { + group = stack[i]; if (err < 0) { list_del(&group->list); From fa9620355d4192200f15cb3d97c6eb9c02442249 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:32 +0200 Subject: [PATCH 03/19] timers/migration: Remove locking on group connection Initializing the tmc's group, the group's number of children and the group's parent can all be done without locking because: 1) Reading the group's parent and its group mask is done locklessly. 2) The connections prepared for a given CPU hierarchy are visible to the target CPU once online, thanks to the CPU hotplug enforced memory ordering. 3) In case of a newly created upper level, the new root and its connections and initialization are made visible by the CPU which made the connections. When that CPUs goes idle in the future, the new link is published by tmigr_inactive_up() through the atomic RmW on ->migr_state. 4) If CPUs were still walking up the active hierarchy, they could observe the new root earlier. In this case the ordering is enforced by an early initialization of the group mask and by barriers that maintain address dependency as explained in: b729cc1ec21a ("timers/migration: Fix another race between hotplug and idle entry/exit") de3ced72a792 ("timers/migration: Enforce group initialization visibility to tree walkers") 5) Timers are propagated by a chain of group locking from the bottom to the top. And while doing so, the tree also propagates groups links and initialization. Therefore remote expiration, which also relies on group locking, will observe those links and initialization while holding the root lock before walking the tree remotely and update remote timers. This is especially important for migrators in the active hierarchy that may observe the new root early. Therefore the locking is unnecessary at initialization. If anything, it just brings confusion. Remove it. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-3-frederic@kernel.org --- kernel/time/timer_migration.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 1e371f1fdc86..5f8aef94ca0f 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1573,9 +1573,6 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, { struct tmigr_walk data; - raw_spin_lock_irq(&child->lock); - raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); - if (activate) { /* * @child is the old top and @parent the new one. In this @@ -1596,9 +1593,6 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, */ smp_store_release(&child->parent, parent); - raw_spin_unlock(&parent->lock); - raw_spin_unlock_irq(&child->lock); - trace_tmigr_connect_child_parent(child); if (!activate) @@ -1695,13 +1689,9 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) if (i == 0) { struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); - raw_spin_lock_irq(&group->lock); - tmc->tmgroup = group; tmc->groupmask = BIT(group->num_children++); - raw_spin_unlock_irq(&group->lock); - trace_tmigr_connect_cpu_parent(tmc); /* There are no children that need to be connected */ From 5eb579dfd46b4949117ecb0f1ba2f12d3dc9a6f2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:33 +0200 Subject: [PATCH 04/19] timers/migration: Fix imbalanced NUMA trees When a CPU from a new node boots, the old root may happen to be connected to the new root even if their node mismatch, as depicted in the following scenario: 1) CPU 0 boots and creates the first group for node 0. [GRP0:0] node 0 | CPU 0 2) CPU 1 from node 1 boots and creates a new top that corresponds to node 1, but it also connects the old root from node 0 to the new root from node 1 by mistake. [GRP1:0] node 1 / \ / \ [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0 CPU 1 3) This eventually leads to an imbalanced tree where some node 0 CPUs migrate node 1 timers (and vice versa) way before reaching the crossnode groups, resulting in more frequent remote memory accesses than expected. [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:1] node 1 node 0 / \ | / \ [...] [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0... CPU 1... A balanced tree should only contain groups having children that belong to the same node: [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:0] node 0 node 1 / \ / \ / \ / \ [GRP0:0] [...] [...] [GRP0:1] node 0 node 1 | | CPU 0... CPU 1... In order to fix this, the hierarchy must be unfolded up to the crossnode level as soon as a node mismatch is detected. For example the stage 2 above should lead to this layout: [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:1] node 0 node 1 / \ / \ [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0 CPU 1 This means that not only GRP1:0 must be created but also GRP1:1 and GRP2:0 in order to prepare a balanced tree for next CPUs to boot. Fixes: 7ee988770326 ("timers: Implement the hierarchical pull model") Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-4-frederic@kernel.org --- kernel/time/timer_migration.c | 245 +++++++++++++++++++--------------- 1 file changed, 134 insertions(+), 111 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 5f8aef94ca0f..49635a2b7ee2 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -420,6 +420,8 @@ static struct list_head *tmigr_level_list __read_mostly; static unsigned int tmigr_hierarchy_levels __read_mostly; static unsigned int tmigr_crossnode_level __read_mostly; +static struct tmigr_group *tmigr_root; + static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); #define TMIGR_NONE 0xFF @@ -522,11 +524,9 @@ struct tmigr_walk { typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *); -static void __walk_groups(up_f up, struct tmigr_walk *data, - struct tmigr_cpu *tmc) +static void __walk_groups_from(up_f up, struct tmigr_walk *data, + struct tmigr_group *child, struct tmigr_group *group) { - struct tmigr_group *child = NULL, *group = tmc->tmgroup; - do { WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels); @@ -544,6 +544,12 @@ static void __walk_groups(up_f up, struct tmigr_walk *data, } while (group); } +static void __walk_groups(up_f up, struct tmigr_walk *data, + struct tmigr_cpu *tmc) +{ + __walk_groups_from(up, data, NULL, tmc->tmgroup); +} + static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc) { lockdep_assert_held(&tmc->lock); @@ -1498,21 +1504,6 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, s.seq = 0; atomic_set(&group->migr_state, s.state); - /* - * If this is a new top-level, prepare its groupmask in advance. - * This avoids accidents where yet another new top-level is - * created in the future and made visible before the current groupmask. - */ - if (list_empty(&tmigr_level_list[lvl])) { - group->groupmask = BIT(0); - /* - * The previous top level has prepared its groupmask already, - * simply account it as the first child. - */ - if (lvl > 0) - group->num_children = 1; - } - timerqueue_init_head(&group->events); timerqueue_init(&group->groupevt.nextevt); group->groupevt.nextevt.expires = KTIME_MAX; @@ -1567,22 +1558,51 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, return group; } +static bool tmigr_init_root(struct tmigr_group *group, bool activate) +{ + if (!group->parent && group != tmigr_root) { + /* + * This is the new top-level, prepare its groupmask in advance + * to avoid accidents where yet another new top-level is + * created in the future and made visible before this groupmask. + */ + group->groupmask = BIT(0); + WARN_ON_ONCE(activate); + + return true; + } + + return false; + +} + static void tmigr_connect_child_parent(struct tmigr_group *child, struct tmigr_group *parent, bool activate) { - struct tmigr_walk data; - - if (activate) { + if (tmigr_init_root(parent, activate)) { /* - * @child is the old top and @parent the new one. In this - * case groupmask is pre-initialized and @child already - * accounted, along with its new sibling corresponding to the - * CPU going up. + * The previous top level had prepared its groupmask already, + * simply account it in advance as the first child. If some groups + * have been created between the old and new root due to node + * mismatch, the new root's child will be intialized accordingly. */ - WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2); + parent->num_children = 1; + } + + /* Connecting old root to new root ? */ + if (!parent->parent && activate) { + /* + * @child is the old top, or in case of node mismatch, some + * intermediate group between the old top and the new one in + * @parent. In this case the @child must be pre-accounted above + * as the first child. Its new inactive sibling corresponding + * to the CPU going up has been accounted as the second child. + */ + WARN_ON_ONCE(parent->num_children != 2); + child->groupmask = BIT(0); } else { - /* Adding @child for the CPU going up to @parent. */ + /* Common case adding @child for the CPU going up to @parent. */ child->groupmask = BIT(parent->num_children++); } @@ -1594,56 +1614,28 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, smp_store_release(&child->parent, parent); trace_tmigr_connect_child_parent(child); - - if (!activate) - return; - - /* - * To prevent inconsistent states, active children need to be active in - * the new parent as well. Inactive children are already marked inactive - * in the parent group: - * - * * When new groups were created by tmigr_setup_groups() starting from - * the lowest level (and not higher then one level below the current - * top level), then they are not active. They will be set active when - * the new online CPU comes active. - * - * * But if a new group above the current top level is required, it is - * mandatory to propagate the active state of the already existing - * child to the new parent. So tmigr_connect_child_parent() is - * executed with the formerly top level group (child) and the newly - * created group (parent). - * - * * It is ensured that the child is active, as this setup path is - * executed in hotplug prepare callback. This is exectued by an - * already connected and !idle CPU. Even if all other CPUs go idle, - * the CPU executing the setup will be responsible up to current top - * level group. And the next time it goes inactive, it will release - * the new childmask and parent to subsequent walkers through this - * @child. Therefore propagate active state unconditionally. - */ - data.childmask = child->groupmask; - - /* - * There is only one new level per time (which is protected by - * tmigr_mutex). When connecting the child and the parent and set the - * child active when the parent is inactive, the parent needs to be the - * uppermost level. Otherwise there went something wrong! - */ - WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); } -static int tmigr_setup_groups(unsigned int cpu, unsigned int node) +static int tmigr_setup_groups(unsigned int cpu, unsigned int node, + struct tmigr_group *start, bool activate) { struct tmigr_group *group, *child, **stack; - int i, top = 0, err = 0; - struct list_head *lvllist; + int i, top = 0, err = 0, start_lvl = 0; + bool root_mismatch = false; stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); if (!stack) return -ENOMEM; - for (i = 0; i < tmigr_hierarchy_levels; i++) { + if (start) { + stack[start->level] = start; + start_lvl = start->level + 1; + } + + if (tmigr_root) + root_mismatch = tmigr_root->numa_node != node; + + for (i = start_lvl; i < tmigr_hierarchy_levels; i++) { group = tmigr_get_group(cpu, node, i); if (IS_ERR(group)) { err = PTR_ERR(group); @@ -1656,23 +1648,25 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) /* * When booting only less CPUs of a system than CPUs are - * available, not all calculated hierarchy levels are required. + * available, not all calculated hierarchy levels are required, + * unless a node mismatch is detected. * * The loop is aborted as soon as the highest level, which might * be different from tmigr_hierarchy_levels, contains only a - * single group. + * single group, unless the nodes mismatch below tmigr_crossnode_level */ - if (group->parent || list_is_singular(&tmigr_level_list[i])) + if (group->parent) + break; + if ((!root_mismatch || i >= tmigr_crossnode_level) && + list_is_singular(&tmigr_level_list[i])) break; } /* Assert single root without parent */ if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels)) return -EINVAL; - if (WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]))) - return -EINVAL; - for (; i >= 0; i--) { + for (; i >= start_lvl; i--) { group = stack[i]; if (err < 0) { @@ -1692,48 +1686,63 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) tmc->tmgroup = group; tmc->groupmask = BIT(group->num_children++); + tmigr_init_root(group, activate); + trace_tmigr_connect_cpu_parent(tmc); /* There are no children that need to be connected */ continue; } else { child = stack[i - 1]; - /* Will be activated at online time */ - tmigr_connect_child_parent(child, group, false); - } - - /* check if uppermost level was newly created */ - if (top != i) - continue; - - WARN_ON_ONCE(top == 0); - - lvllist = &tmigr_level_list[top]; - - /* - * Newly created root level should have accounted the upcoming - * CPU's child group and pre-accounted the old root. - */ - if (group->num_children == 2 && list_is_singular(lvllist)) { - /* - * The target CPU must never do the prepare work, except - * on early boot when the boot CPU is the target. Otherwise - * it may spuriously activate the old top level group inside - * the new one (nevertheless whether old top level group is - * active or not) and/or release an uninitialized childmask. - */ - WARN_ON_ONCE(cpu == raw_smp_processor_id()); - - lvllist = &tmigr_level_list[top - 1]; - list_for_each_entry(child, lvllist, list) { - if (child->parent) - continue; - - tmigr_connect_child_parent(child, group, true); - } + tmigr_connect_child_parent(child, group, activate); } } + if (err < 0) + goto out; + + if (activate) { + struct tmigr_walk data; + + /* + * To prevent inconsistent states, active children need to be active in + * the new parent as well. Inactive children are already marked inactive + * in the parent group: + * + * * When new groups were created by tmigr_setup_groups() starting from + * the lowest level, then they are not active. They will be set active + * when the new online CPU comes active. + * + * * But if new groups above the current top level are required, it is + * mandatory to propagate the active state of the already existing + * child to the new parents. So tmigr_active_up() activates the + * new parents while walking up from the old root to the new. + * + * * It is ensured that @start is active, as this setup path is + * executed in hotplug prepare callback. This is executed by an + * already connected and !idle CPU. Even if all other CPUs go idle, + * the CPU executing the setup will be responsible up to current top + * level group. And the next time it goes inactive, it will release + * the new childmask and parent to subsequent walkers through this + * @child. Therefore propagate active state unconditionally. + */ + WARN_ON_ONCE(!start->parent); + data.childmask = start->groupmask; + __walk_groups_from(tmigr_active_up, &data, start, start->parent); + } + + /* Root update */ + if (list_is_singular(&tmigr_level_list[top])) { + group = list_first_entry(&tmigr_level_list[top], + typeof(*group), list); + WARN_ON_ONCE(group->parent); + if (tmigr_root) { + /* Old root should be the same or below */ + WARN_ON_ONCE(tmigr_root->level > top); + } + tmigr_root = group; + } +out: kfree(stack); return err; @@ -1741,12 +1750,26 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) static int tmigr_add_cpu(unsigned int cpu) { + struct tmigr_group *old_root = tmigr_root; int node = cpu_to_node(cpu); int ret; - mutex_lock(&tmigr_mutex); - ret = tmigr_setup_groups(cpu, node); - mutex_unlock(&tmigr_mutex); + guard(mutex)(&tmigr_mutex); + + ret = tmigr_setup_groups(cpu, node, NULL, false); + + /* Root has changed? Connect the old one to the new */ + if (ret >= 0 && old_root && old_root != tmigr_root) { + /* + * The target CPU must never do the prepare work, except + * on early boot when the boot CPU is the target. Otherwise + * it may spuriously activate the old top level group inside + * the new one (nevertheless whether old top level group is + * active or not) and/or release an uninitialized childmask. + */ + WARN_ON_ONCE(cpu == raw_smp_processor_id()); + ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true); + } return ret; } From 3c8eb36e2a46786d50dbef417ef782ff37b372ca Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:34 +0200 Subject: [PATCH 05/19] timers/migration: Assert that hotplug preparing CPU is part of stable active hierarchy The CPU doing the prepare work for a remote target must be online from the tree point of view and its hierarchy must be active, otherwise propagating its active state up to the new root branch would be either incorrect or racy. Assert those conditions with more sanity checks. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-5-frederic@kernel.org --- kernel/time/timer_migration.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 49635a2b7ee2..bddd816faaeb 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1703,6 +1703,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node, if (activate) { struct tmigr_walk data; + union tmigr_state state; /* * To prevent inconsistent states, active children need to be active in @@ -1726,6 +1727,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node, * the new childmask and parent to subsequent walkers through this * @child. Therefore propagate active state unconditionally. */ + state.state = atomic_read(&start->migr_state); + WARN_ON_ONCE(!state.active); WARN_ON_ONCE(!start->parent); data.childmask = start->groupmask; __walk_groups_from(tmigr_active_up, &data, start, start->parent); @@ -1768,6 +1771,11 @@ static int tmigr_add_cpu(unsigned int cpu) * active or not) and/or release an uninitialized childmask. */ WARN_ON_ONCE(cpu == raw_smp_processor_id()); + /* + * The (likely) current CPU is expected to be online in the hierarchy, + * otherwise the old root may not be active as expected. + */ + WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->online); ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true); } From 93643b90d6c141cb90dca7c24eabee800f51f908 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:35 +0200 Subject: [PATCH 06/19] timers/migration: Remove unused "cpu" parameter from tmigr_get_group() Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-6-frederic@kernel.org --- kernel/time/timer_migration.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index bddd816faaeb..73d9b0648116 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1511,8 +1511,7 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, group->groupevt.ignore = true; } -static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, - unsigned int lvl) +static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl) { struct tmigr_group *tmp, *group = NULL; @@ -1636,7 +1635,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node, root_mismatch = tmigr_root->numa_node != node; for (i = start_lvl; i < tmigr_hierarchy_levels; i++) { - group = tmigr_get_group(cpu, node, i); + group = tmigr_get_group(node, i); if (IS_ERR(group)) { err = PTR_ERR(group); i--; From ba14500e4bfcab5e841fbf8d7fcbbc80e98d6b9e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 24 Oct 2025 15:25:36 +0200 Subject: [PATCH 07/19] timers/migration: Remove dead code handling idle CPU checking for remote timers Idle migrators don't walk the whole tree in order to find out if there are timers to migrate because they recorded the next deadline to be verified within a single check in tmigr_requires_handle_remote(). Remove the related dead code and data. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251024132536.39841-7-frederic@kernel.org --- kernel/time/timer_migration.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 73d9b0648116..19ddfa96b9df 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -504,11 +504,6 @@ static bool tmigr_check_lonely(struct tmigr_group *group) * @now: timer base monotonic * @check: is set if there is the need to handle remote timers; * required in tmigr_requires_handle_remote() only - * @tmc_active: this flag indicates, whether the CPU which triggers - * the hierarchy walk is !idle in the timer migration - * hierarchy. When the CPU is idle and the whole hierarchy is - * idle, only the first event of the top level has to be - * considered. */ struct tmigr_walk { u64 nextexp; @@ -519,7 +514,6 @@ struct tmigr_walk { unsigned long basej; u64 now; bool check; - bool tmc_active; }; typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *); @@ -1119,15 +1113,6 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group, */ if (!tmigr_check_migrator(group, childmask)) return true; - - /* - * When there is a parent group and the CPU which triggered the - * hierarchy walk is not active, proceed the walk to reach the top level - * group before reading the next_expiry value. - */ - if (group->parent && !data->tmc_active) - return false; - /* * The lock is required on 32bit architectures to read the variable * consistently with a concurrent writer. On 64bit the lock is not @@ -1172,7 +1157,6 @@ bool tmigr_requires_handle_remote(void) data.now = get_jiffies_update(&jif); data.childmask = tmc->groupmask; data.firstexp = KTIME_MAX; - data.tmc_active = !tmc->idle; data.check = false; /* From 4702f4eceb639b6af199151e352e570943619d98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Mon, 10 Nov 2025 10:38:53 +0100 Subject: [PATCH 08/19] hrtimer: Store time as ktime_t in restart block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The hrtimer core uses ktime_t to represent times, use that also for the restart block. CPU timers internally use nanoseconds instead of ktime_t but use the same restart block, so use the correct accessors for those. Signed-off-by: Thomas Weißschuh Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251110-restart-block-expiration-v1-3-5d39cc93df4f@linutronix.de --- include/linux/restart_block.h | 2 +- kernel/time/hrtimer.c | 4 ++-- kernel/time/posix-cpu-timers.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 7e50bbc94e47..36ddfa1ec301 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -43,7 +43,7 @@ struct restart_block { struct __kernel_timespec __user *rmtp; struct old_timespec32 __user *compat_rmtp; }; - u64 expires; + ktime_t expires; } nanosleep; /* For poll */ struct { diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 7e7b2b471bae..9c77e5c72556 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2145,7 +2145,7 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) int ret; hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS); - hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); + hrtimer_set_expires(&t.timer, restart->nanosleep.expires); ret = do_nanosleep(&t, HRTIMER_MODE_ABS); destroy_hrtimer_on_stack(&t.timer); return ret; @@ -2172,7 +2172,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, restart = ¤t->restart_block; restart->nanosleep.clockid = t.timer.base->clockid; - restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); + restart->nanosleep.expires = hrtimer_get_expires(&t.timer); set_restart_fn(restart, hrtimer_nanosleep_restart); out: destroy_hrtimer_on_stack(&t.timer); diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 2e5b89d7d866..0de2bb7cbec0 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1557,7 +1557,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, * Report back to the user the time still remaining. */ restart = ¤t->restart_block; - restart->nanosleep.expires = expires; + restart->nanosleep.expires = ns_to_ktime(expires); if (restart->nanosleep.type != TT_NONE) error = nanosleep_copyout(restart, &it.it_value); } @@ -1599,7 +1599,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block) clockid_t which_clock = restart_block->nanosleep.clockid; struct timespec64 t; - t = ns_to_timespec64(restart_block->nanosleep.expires); + t = ktime_to_timespec64(restart_block->nanosleep.expires); return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); } From e54dd0474c281ebcd32ada4bdbe1e0ada2e1c22b Mon Sep 17 00:00:00 2001 From: Sunday Adelodun Date: Thu, 6 Nov 2025 12:39:38 +0100 Subject: [PATCH 09/19] time: tick-oneshot: Add missing Return and parameter descriptions to kernel-doc Several functions in kernel/time/tick-oneshot.c are missing parameter and return value descriptions in their kernel-doc comments. This causes warnings during doc generation. Update the kernel-doc blocks to include detailed @param and Return: descriptions for better clarity and to fix kernel-doc warnings. No functional code changes are made. Signed-off-by: Sunday Adelodun Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251106113938.34693-3-adelodunolaoluwa@yahoo.com --- kernel/time/tick-oneshot.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 5e2c2c26b3cc..ffee943d796d 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -19,6 +19,10 @@ /** * tick_program_event - program the CPU local timer device for the next event + * @expires: the time at which the next timer event should occur + * @force: flag to force reprograming even if the event time hasn't changed + * + * Return: 0 on success, negative error code on failure */ int tick_program_event(ktime_t expires, int force) { @@ -57,6 +61,13 @@ void tick_resume_oneshot(void) /** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) + * @newdev: Pointer to the clock event device to configure + * @handler: Function to be called when the event device triggers an interrupt + * @next_event: Initial expiry time for the next event (in ktime) + * + * Configures the specified clock event device for onshot mode, + * assigns the given handler as its event callback, and programs + * the device to trigger at the specified next event time. */ void tick_setup_oneshot(struct clock_event_device *newdev, void (*handler)(struct clock_event_device *), @@ -69,6 +80,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev, /** * tick_switch_to_oneshot - switch to oneshot mode + * @handler: function to call when an event occurs on the tick device + * + * Return: 0 on success, -EINVAL if the tick device is not present, + * not functional, or does not support oneshot mode. */ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) { @@ -101,7 +116,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) /** * tick_oneshot_mode_active - check whether the system is in oneshot mode * - * returns 1 when either nohz or highres are enabled. otherwise 0. + * Return: 1 when either nohz or highres are enabled, otherwise 0. */ int tick_oneshot_mode_active(void) { @@ -120,6 +135,9 @@ int tick_oneshot_mode_active(void) * tick_init_highres - switch to high resolution mode * * Called with interrupts disabled. + * + * Return: 0 on success, -EINVAL if the tick device cannot switch + * to oneshot/high-resolution mode. */ int tick_init_highres(void) { From 4518767be9089ea4f54754ad27364d6134fc46e2 Mon Sep 17 00:00:00 2001 From: Jianyun Gao Date: Sat, 27 Sep 2025 17:34:10 +0800 Subject: [PATCH 10/19] time: Fix a few typos in time[r] related code comments Signed-off-by: Jianyun Gao Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20250927093411.1509275-1-jianyungao89@gmail.com --- include/linux/delay.h | 8 ++++---- kernel/time/posix-timers.c | 2 +- kernel/time/timer_migration.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/delay.h b/include/linux/delay.h index 89866bab100d..46412c00033a 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -68,7 +68,7 @@ void usleep_range_state(unsigned long min, unsigned long max, * @min: Minimum time in microseconds to sleep * @max: Maximum time in microseconds to sleep * - * For basic information please refere to usleep_range_state(). + * For basic information please refer to usleep_range_state(). * * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep. */ @@ -82,10 +82,10 @@ static inline void usleep_range(unsigned long min, unsigned long max) * @min: Minimum time in microseconds to sleep * @max: Maximum time in microseconds to sleep * - * For basic information please refere to usleep_range_state(). + * For basic information please refer to usleep_range_state(). * * The sleeping task has the state TASK_IDLE during the sleep to prevent - * contribution to the load avarage. + * contribution to the load average. */ static inline void usleep_range_idle(unsigned long min, unsigned long max) { @@ -96,7 +96,7 @@ static inline void usleep_range_idle(unsigned long min, unsigned long max) * ssleep - wrapper for seconds around msleep * @seconds: Requested sleep duration in seconds * - * Please refere to msleep() for detailed information. + * Please refer to msleep() for detailed information. */ static inline void ssleep(unsigned int seconds) { diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index aa3120104a51..36dbb8146517 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1242,7 +1242,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, * sys_clock_settime(). The kernel internal timekeeping is always using * nanoseconds precision independent of the clocksource device which is * used to read the time from. The resolution of that device only - * affects the presicion of the time returned by sys_clock_gettime(). + * affects the precision of the time returned by sys_clock_gettime(). * * Returns: * 0 Success. @tp contains the resolution diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 19ddfa96b9df..57e38674e56e 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -708,7 +708,7 @@ void tmigr_cpu_activate(void) /* * Returns true, if there is nothing to be propagated to the next level * - * @data->firstexp is set to expiry of first gobal event of the (top level of + * @data->firstexp is set to expiry of first global event of the (top level of * the) hierarchy, but only when hierarchy is completely idle. * * The child and group states need to be read under the lock, to prevent a race From 05d89fe7e46a52bb5dde7ac3f07b383895fab528 Mon Sep 17 00:00:00 2001 From: Wake Liu Date: Mon, 3 Nov 2025 19:45:02 +0800 Subject: [PATCH 11/19] selftests/timers: Clean up kernel version check in posix_timers Several tests in the posix_timers selftest which test timer behavior related to SIG_IGN fail on kernels older than 6.13. This is due to a refactoring of signal handling in commit caf77435dd8a ("signal: Handle ignored signals in do_sigaction(action != SIG_IGN)"). A previous attempt to fix this by adding a kernel version check to each of the nine affected tests was suboptimal, as it resulted in emitting the same skip message nine times. Following the suggestion from Thomas Gleixner, this is refactored to perform a single version check in main(). To satisfy the kselftest framework's requirement for the test count to match the declared plan, the plan is now conditionally set to 10 (for older kernels) or 19. While setting the plan conditionally may seem complex, it is the better approach to avoid the alternatives: either running tests on unsupported kernels that are known to fail, or emitting a noisy series of nine identical skip messages. A single informational message is now printed instead when the tests are skipped. Signed-off-by: Wake Liu Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20250807085042.1690931-1-wakel@google.com/ Link: https://patch.msgid.link/20251103114502.584940-1-wakel@google.com --- tools/testing/selftests/timers/posix_timers.c | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c index f0eceb0faf34..a563c438ac79 100644 --- a/tools/testing/selftests/timers/posix_timers.c +++ b/tools/testing/selftests/timers/posix_timers.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "../kselftest.h" @@ -670,8 +671,14 @@ static void check_timer_create_exact(void) int main(int argc, char **argv) { + bool run_sig_ign_tests = ksft_min_kernel_version(6, 13); + ksft_print_header(); - ksft_set_plan(19); + if (run_sig_ign_tests) { + ksft_set_plan(19); + } else { + ksft_set_plan(10); + } ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n"); ksft_print_msg("based timers if other threads run on the CPU...\n"); @@ -695,15 +702,20 @@ int main(int argc, char **argv) check_timer_create(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); check_timer_distribution(); - check_sig_ign(0); - check_sig_ign(1); - check_rearm(); - check_delete(); - check_sigev_none(CLOCK_MONOTONIC, "CLOCK_MONOTONIC"); - check_sigev_none(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); - check_gettime(CLOCK_MONOTONIC, "CLOCK_MONOTONIC"); - check_gettime(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); - check_gettime(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID"); + if (run_sig_ign_tests) { + check_sig_ign(0); + check_sig_ign(1); + check_rearm(); + check_delete(); + check_sigev_none(CLOCK_MONOTONIC, "CLOCK_MONOTONIC"); + check_sigev_none(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); + check_gettime(CLOCK_MONOTONIC, "CLOCK_MONOTONIC"); + check_gettime(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); + check_gettime(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID"); + } else { + ksft_print_msg("Skipping SIG_IGN tests on kernel < 6.13\n"); + } + check_overrun(CLOCK_MONOTONIC, "CLOCK_MONOTONIC"); check_overrun(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID"); check_overrun(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID"); From 308bc2e33885df9288d3f1ed946a2b212e37db62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Thu, 6 Nov 2025 16:15:24 +0100 Subject: [PATCH 12/19] selftests/timers/nanosleep: Add tests for return of remaining time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If interrupted by a signal clock_nanosleep() returns the remaining time into the structure pointed to by the rmtp parameter. So far this functionality was not tested by the timer selftests. Extend the nanosleep selftest to cover this feature. Signed-off-by: Thomas Weißschuh Signed-off-by: Thomas Gleixner Link: https://patch.msgid.link/20251106-nanosleep-rtmp-selftest-v1-1-f9212fb295fe@linutronix.de --- tools/testing/selftests/timers/nanosleep.c | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c index 252c6308c569..10badae13ebe 100644 --- a/tools/testing/selftests/timers/nanosleep.c +++ b/tools/testing/selftests/timers/nanosleep.c @@ -116,6 +116,56 @@ int nanosleep_test(int clockid, long long ns) return 0; } +static void dummy_event_handler(int val) +{ + /* No action needed */ +} + +static int nanosleep_test_remaining(int clockid) +{ + struct timespec rqtp = {}, rmtp = {}; + struct itimerspec itimer = {}; + struct sigaction sa = {}; + timer_t timer; + int ret; + + sa.sa_handler = dummy_event_handler; + ret = sigaction(SIGALRM, &sa, NULL); + if (ret) + return -1; + + ret = timer_create(clockid, NULL, &timer); + if (ret) + return -1; + + itimer.it_value.tv_nsec = NSEC_PER_SEC / 4; + ret = timer_settime(timer, 0, &itimer, NULL); + if (ret) + return -1; + + rqtp.tv_nsec = NSEC_PER_SEC / 2; + ret = clock_nanosleep(clockid, 0, &rqtp, &rmtp); + if (ret != EINTR) + return -1; + + ret = timer_delete(timer); + if (ret) + return -1; + + sa.sa_handler = SIG_DFL; + ret = sigaction(SIGALRM, &sa, NULL); + if (ret) + return -1; + + if (!in_order((struct timespec) {}, rmtp)) + return -1; + + if (!in_order(rmtp, rqtp)) + return -1; + + return 0; +} + int main(int argc, char **argv) { long long length; @@ -150,6 +200,11 @@ int main(int argc, char **argv) } length *= 100; } + ret = nanosleep_test_remaining(clockid); + if (ret < 0) { + ksft_test_result_fail("%-31s\n", clockstring(clockid)); + ksft_exit_fail(); + } ksft_test_result_pass("%-31s\n", clockstring(clockid)); next: ret = 0; From 8312cab5ff4702389a86129051eba6ea046a71a1 Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:47 +0100 Subject: [PATCH 13/19] timers/migration: Rename 'online' bit to 'available' The timer migration hierarchy excludes offline CPUs via the tmigr_is_not_available function, which is essentially checking the online bit for the CPU. Rename the online bit to available and all references in function names and tracepoint to generalise the concept of available CPUs. Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Link: https://patch.msgid.link/20251120145653.296659-2-gmonaco@redhat.com --- include/trace/events/timer_migration.h | 4 ++-- kernel/time/timer_migration.c | 24 ++++++++++++------------ kernel/time/timer_migration.h | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h index 47db5eaf2f9a..61171b13c687 100644 --- a/include/trace/events/timer_migration.h +++ b/include/trace/events/timer_migration.h @@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active, TP_ARGS(tmc) ); -DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online, +DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available, TP_PROTO(struct tmigr_cpu *tmc), TP_ARGS(tmc) ); -DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline, +DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable, TP_PROTO(struct tmigr_cpu *tmc), diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 57e38674e56e..2cfebed35e22 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -429,7 +429,7 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) { - return !(tmc->tmgroup && tmc->online); + return !(tmc->tmgroup && tmc->available); } /* @@ -926,7 +926,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now, * updated the event takes care when hierarchy is completely * idle. Otherwise the migrator does it as the event is enqueued. */ - if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || + if (!tmc->available || tmc->remote || tmc->cpuevt.ignore || now < tmc->cpuevt.nextevt.expires) { raw_spin_unlock_irq(&tmc->lock); return; @@ -973,7 +973,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now, * (See also section "Required event and timerqueue update after a * remote expiry" in the documentation at the top) */ - if (!tmc->online || !tmc->idle) { + if (!tmc->available || !tmc->idle) { timer_unlock_remote_bases(cpu); goto unlock; } @@ -1422,19 +1422,19 @@ static long tmigr_trigger_active(void *unused) { struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); - WARN_ON_ONCE(!tmc->online || tmc->idle); + WARN_ON_ONCE(!tmc->available || tmc->idle); return 0; } -static int tmigr_cpu_offline(unsigned int cpu) +static int tmigr_clear_cpu_available(unsigned int cpu) { struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); int migrator; u64 firstexp; raw_spin_lock_irq(&tmc->lock); - tmc->online = false; + tmc->available = false; WRITE_ONCE(tmc->wakeup, KTIME_MAX); /* @@ -1442,7 +1442,7 @@ static int tmigr_cpu_offline(unsigned int cpu) * offline; Therefore nextevt value is set to KTIME_MAX */ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); - trace_tmigr_cpu_offline(tmc); + trace_tmigr_cpu_unavailable(tmc); raw_spin_unlock_irq(&tmc->lock); if (firstexp != KTIME_MAX) { @@ -1453,7 +1453,7 @@ static int tmigr_cpu_offline(unsigned int cpu) return 0; } -static int tmigr_cpu_online(unsigned int cpu) +static int tmigr_set_cpu_available(unsigned int cpu) { struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); @@ -1462,11 +1462,11 @@ static int tmigr_cpu_online(unsigned int cpu) return -EINVAL; raw_spin_lock_irq(&tmc->lock); - trace_tmigr_cpu_online(tmc); + trace_tmigr_cpu_available(tmc); tmc->idle = timer_base_is_idle(); if (!tmc->idle) __tmigr_cpu_activate(tmc); - tmc->online = true; + tmc->available = true; raw_spin_unlock_irq(&tmc->lock); return 0; } @@ -1758,7 +1758,7 @@ static int tmigr_add_cpu(unsigned int cpu) * The (likely) current CPU is expected to be online in the hierarchy, * otherwise the old root may not be active as expected. */ - WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->online); + WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available); ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true); } @@ -1854,7 +1854,7 @@ static int __init tmigr_init(void) goto err; ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online", - tmigr_cpu_online, tmigr_cpu_offline); + tmigr_set_cpu_available, tmigr_clear_cpu_available); if (ret) goto err; diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h index ae19f70f8170..70879cde6fdd 100644 --- a/kernel/time/timer_migration.h +++ b/kernel/time/timer_migration.h @@ -97,7 +97,7 @@ struct tmigr_group { */ struct tmigr_cpu { raw_spinlock_t lock; - bool online; + bool available; bool idle; bool remote; struct tmigr_group *tmgroup; From a048ca5f00ebd5a44f8551d546a3cd81fed7a204 Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:48 +0100 Subject: [PATCH 14/19] timers/migration: Add mask for CPUs available in the hierarchy Keep track of the CPUs available for timer migration in a cpumask. This prepares the ground to generalise the concept of unavailable CPUs. Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Link: https://patch.msgid.link/20251120145653.296659-3-gmonaco@redhat.com --- kernel/time/timer_migration.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 2cfebed35e22..3325ca7989b3 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -424,6 +424,12 @@ static struct tmigr_group *tmigr_root; static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); +/* + * CPUs available for timer migration. + * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock. + */ +static cpumask_var_t tmigr_available_cpumask; + #define TMIGR_NONE 0xFF #define BIT_CNT 8 @@ -1433,6 +1439,7 @@ static int tmigr_clear_cpu_available(unsigned int cpu) int migrator; u64 firstexp; + cpumask_clear_cpu(cpu, tmigr_available_cpumask); raw_spin_lock_irq(&tmc->lock); tmc->available = false; WRITE_ONCE(tmc->wakeup, KTIME_MAX); @@ -1446,7 +1453,7 @@ static int tmigr_clear_cpu_available(unsigned int cpu) raw_spin_unlock_irq(&tmc->lock); if (firstexp != KTIME_MAX) { - migrator = cpumask_any_but(cpu_online_mask, cpu); + migrator = cpumask_any(tmigr_available_cpumask); work_on_cpu(migrator, tmigr_trigger_active, NULL); } @@ -1461,6 +1468,7 @@ static int tmigr_set_cpu_available(unsigned int cpu) if (WARN_ON_ONCE(!tmc->tmgroup)) return -EINVAL; + cpumask_set_cpu(cpu, tmigr_available_cpumask); raw_spin_lock_irq(&tmc->lock); trace_tmigr_cpu_available(tmc); tmc->idle = timer_base_is_idle(); @@ -1805,6 +1813,11 @@ static int __init tmigr_init(void) if (ncpus == 1) return 0; + if (!zalloc_cpumask_var(&tmigr_available_cpumask, GFP_KERNEL)) { + ret = -ENOMEM; + goto err; + } + /* * Calculate the required hierarchy levels. Unfortunately there is no * reliable information available, unless all possible CPUs have been From 4c2374ed86847c71dab5602c7882d21a0d56a4c7 Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:49 +0100 Subject: [PATCH 15/19] timers/migration: Use scoped_guard on available flag set/clear Cleanup tmigr_clear_cpu_available() and tmigr_set_cpu_available() to prepare for easier checks on the available flag. Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Reviewed-by: Thomas Gleixner Link: https://patch.msgid.link/20251120145653.296659-4-gmonaco@redhat.com --- kernel/time/timer_migration.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 3325ca7989b3..a01c7f8bdf52 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1440,17 +1440,17 @@ static int tmigr_clear_cpu_available(unsigned int cpu) u64 firstexp; cpumask_clear_cpu(cpu, tmigr_available_cpumask); - raw_spin_lock_irq(&tmc->lock); - tmc->available = false; - WRITE_ONCE(tmc->wakeup, KTIME_MAX); + scoped_guard(raw_spinlock_irq, &tmc->lock) { + tmc->available = false; + WRITE_ONCE(tmc->wakeup, KTIME_MAX); - /* - * CPU has to handle the local events on his own, when on the way to - * offline; Therefore nextevt value is set to KTIME_MAX - */ - firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); - trace_tmigr_cpu_unavailable(tmc); - raw_spin_unlock_irq(&tmc->lock); + /* + * CPU has to handle the local events on his own, when on the way to + * offline; Therefore nextevt value is set to KTIME_MAX + */ + firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); + trace_tmigr_cpu_unavailable(tmc); + } if (firstexp != KTIME_MAX) { migrator = cpumask_any(tmigr_available_cpumask); @@ -1469,13 +1469,13 @@ static int tmigr_set_cpu_available(unsigned int cpu) return -EINVAL; cpumask_set_cpu(cpu, tmigr_available_cpumask); - raw_spin_lock_irq(&tmc->lock); - trace_tmigr_cpu_available(tmc); - tmc->idle = timer_base_is_idle(); - if (!tmc->idle) - __tmigr_cpu_activate(tmc); - tmc->available = true; - raw_spin_unlock_irq(&tmc->lock); + scoped_guard(raw_spinlock_irq, &tmc->lock) { + trace_tmigr_cpu_available(tmc); + tmc->idle = timer_base_is_idle(); + if (!tmc->idle) + __tmigr_cpu_activate(tmc); + tmc->available = true; + } return 0; } From 22f8e41680efec63ead03d4693676587814f7a24 Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:50 +0100 Subject: [PATCH 16/19] cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_isolation_cpumasks() update_unbound_workqueue_cpumask() updates unbound workqueues settings when there's a change in isolated CPUs, but it can be used for other subsystems requiring updated when isolated CPUs change. Generalise the name to update_isolation_cpumasks() to prepare for other functions unrelated to workqueues to be called in that spot. [longman: Change the function name to update_isolation_cpumasks()] Signed-off-by: Gabriele Monaco Signed-off-by: Waiman Long Signed-off-by: Thomas Gleixner Reviewed-by: Chen Ridong Acked-by: Frederic Weisbecker Acked-by: Waiman Long Link: https://patch.msgid.link/20251120145653.296659-5-gmonaco@redhat.com --- kernel/cgroup/cpuset.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 27adb04df675..cf34623fe66f 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1339,7 +1339,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, return isolcpus_updated; } -static void update_unbound_workqueue_cpumask(bool isolcpus_updated) +static void update_isolation_cpumasks(bool isolcpus_updated) { int ret; @@ -1470,7 +1470,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, list_add(&cs->remote_sibling, &remote_children); cpumask_copy(cs->effective_xcpus, tmp->new_cpus); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); cpuset_force_rebuild(); cs->prs_err = 0; @@ -1511,7 +1511,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) compute_effective_exclusive_cpumask(cs, NULL, NULL); reset_partition_data(cs); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); cpuset_force_rebuild(); /* @@ -1580,7 +1580,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, if (xcpus) cpumask_copy(cs->exclusive_cpus, xcpus); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); if (adding || deleting) cpuset_force_rebuild(); @@ -1943,7 +1943,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, WARN_ON_ONCE(parent->nr_subparts < 0); } spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); if ((old_prs != new_prs) && (cmd == partcmd_update)) update_partition_exclusive_flag(cs, new_prs); @@ -2968,7 +2968,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) else if (isolcpus_updated) isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus); spin_unlock_irq(&callback_lock); - update_unbound_workqueue_cpumask(isolcpus_updated); + update_isolation_cpumasks(isolcpus_updated); /* Force update if switching back to member & update effective_xcpus */ update_cpumasks_hier(cs, &tmpmask, !new_prs); From 185bccc79797d71477e672a1b2a2b7d0325044e7 Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:51 +0100 Subject: [PATCH 17/19] sched/isolation: Force housekeeping if isolcpus and nohz_full don't leave any Currently the user can set up isolcpus and nohz_full in such a way that leaves no housekeeping CPU (i.e. no CPU that is neither domain isolated nor nohz full). This can be a problem for other subsystems (e.g. the timer wheel imgration). Prevent this configuration by invalidating the last setting in case the union of isolcpus (domain) and nohz_full covers all CPUs. Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Reviewed-by: Waiman Long Reviewed-by: Frederic Weisbecker Link: https://patch.msgid.link/20251120145653.296659-6-gmonaco@redhat.com --- kernel/sched/isolation.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index a4cf17b1fab0..3ad0d6df6a0a 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -167,6 +167,29 @@ static int __init housekeeping_setup(char *str, unsigned long flags) } } + /* + * Check the combination of nohz_full and isolcpus=domain, + * necessary to avoid problems with the timer migration + * hierarchy. managed_irq is ignored by this check since it + * isn't considered in the timer migration logic. + */ + iter_flags = housekeeping.flags & (HK_FLAG_KERNEL_NOISE | HK_FLAG_DOMAIN); + type = find_first_bit(&iter_flags, HK_TYPE_MAX); + /* + * Pass the check if none of these flags were previously set or + * are not in the current selection. + */ + iter_flags = flags & (HK_FLAG_KERNEL_NOISE | HK_FLAG_DOMAIN); + first_cpu = (type == HK_TYPE_MAX || !iter_flags) ? 0 : + cpumask_first_and_and(cpu_present_mask, + housekeeping_staging, housekeeping.cpumasks[type]); + if (first_cpu >= min(nr_cpu_ids, setup_max_cpus)) { + pr_warn("Housekeeping: must include one present CPU " + "neither in nohz_full= nor in isolcpus=domain, " + "ignoring setting %s\n", str); + goto free_housekeeping_staging; + } + iter_flags = flags & ~housekeeping.flags; for_each_set_bit(type, &iter_flags, HK_TYPE_MAX) From b56651007fc018effe695a68d48caa6970b23094 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Thu, 20 Nov 2025 15:56:52 +0100 Subject: [PATCH 18/19] cpumask: Add initialiser to use cleanup helpers Now we can simplify a code that allocates cpumasks for local needs. Automatic variables have to be initialized at declaration, or at least before any possibility for the logic to return, so that compiler wouldn't try to call an associate destructor function on a random stack number. Because cpumask_var_t, depending on the CPUMASK_OFFSTACK config, is either a pointer or an array, we have to have a macro for initialization. So define a CPUMASK_VAR_NULL macro, which allows to init struct cpumask pointer with NULL when CPUMASK_OFFSTACK is enabled, and effectively a no-op when CPUMASK_OFFSTACK is disabled (initialisation optimised out with -O2). Signed-off-by: Yury Norov Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://patch.msgid.link/20251120145653.296659-7-gmonaco@redhat.com --- include/linux/cpumask.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index ff8f41ab7ce6..68be522449ec 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -1005,6 +1005,7 @@ static __always_inline unsigned int cpumask_size(void) #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) #define __cpumask_var_read_mostly __read_mostly +#define CPUMASK_VAR_NULL NULL bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); @@ -1051,6 +1052,7 @@ static __always_inline bool cpumask_available(cpumask_var_t mask) #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) #define __cpumask_var_read_mostly +#define CPUMASK_VAR_NULL {} static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { From 7dec062cfcf27808dbb70a0b231d1a698792743d Mon Sep 17 00:00:00 2001 From: Gabriele Monaco Date: Thu, 20 Nov 2025 15:56:53 +0100 Subject: [PATCH 19/19] timers/migration: Exclude isolated cpus from hierarchy The timer migration mechanism allows active CPUs to pull timers from idle ones to improve the overall idle time. This is however undesired when CPU intensive workloads run on isolated cores, as the algorithm would move the timers from housekeeping to isolated cores, negatively affecting the isolation. Exclude isolated cores from the timer migration algorithm, extend the concept of unavailable cores, currently used for offline ones, to isolated ones: * A core is unavailable if isolated or offline; * A core is available if non isolated and online; A core is considered unavailable as isolated if it belongs to: * the isolcpus (domain) list * an isolated cpuset Except if it is: * in the nohz_full list (already idle for the hierarchy) * the nohz timekeeper core (must be available to handle global timers) CPUs are added to the hierarchy during late boot, excluding isolated ones, the hierarchy is also adapted when the cpuset isolation changes. Due to how the timer migration algorithm works, any CPU part of the hierarchy can have their global timers pulled by remote CPUs and have to pull remote timers, only skipping pulling remote timers would break the logic. For this reason, prevent isolated CPUs from pulling remote global timers, but also the other way around: any global timer started on an isolated CPU will run there. This does not break the concept of isolation (global timers don't come from outside the CPU) and, if considered inappropriate, can usually be mitigated with other isolation techniques (e.g. IRQ pinning). This effect was noticed on a 128 cores machine running oslat on the isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs, and the CPU with lowest count in a timer migration hierarchy (here 1 and 65) appears as always active and continuously pulls global timers, from the housekeeping CPUs. This ends up moving driver work (e.g. delayed work) to isolated CPUs and causes latency spikes: before the change: # oslat -c 1-31,33-63,65-95,97-127 -D 62s ... Maximum: 1203 10 3 4 ... 5 (us) after the change: # oslat -c 1-31,33-63,65-95,97-127 -D 62s ... Maximum: 10 4 3 4 3 ... 5 (us) The same behaviour was observed on a machine with as few as 20 cores / 40 threads with isocpus set to: 1-9,11-39 with rtla-osnoise-top. Signed-off-by: Gabriele Monaco Signed-off-by: Thomas Gleixner Tested-by: John B. Wyatt IV Reviewed-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://patch.msgid.link/20251120145653.296659-8-gmonaco@redhat.com --- include/linux/timer.h | 9 +++ kernel/cgroup/cpuset.c | 3 + kernel/time/timer_migration.c | 143 ++++++++++++++++++++++++++++++++++ 3 files changed, 155 insertions(+) diff --git a/include/linux/timer.h b/include/linux/timer.h index 0414d9e6b4fc..62e1cea71125 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -188,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu); #define timers_dead_cpu NULL #endif +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask); +#else +static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask) +{ + return 0; +} +#endif + #endif diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index cf34623fe66f..bfc3b319e1c0 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1350,6 +1350,9 @@ static void update_isolation_cpumasks(bool isolcpus_updated) ret = workqueue_unbound_exclude_cpumask(isolated_cpus); WARN_ON_ONCE(ret < 0); + + ret = tmigr_isolated_exclude_cpumask(isolated_cpus); + WARN_ON_ONCE(ret < 0); } /** diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index a01c7f8bdf52..18dda1aa782d 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "timer_migration.h" #include "tick-internal.h" @@ -427,8 +428,13 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); /* * CPUs available for timer migration. * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock. + * Additionally tmigr_available_mutex serializes set/clear operations with each other. */ static cpumask_var_t tmigr_available_cpumask; +static DEFINE_MUTEX(tmigr_available_mutex); + +/* Enabled during late initcall */ +static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated); #define TMIGR_NONE 0xFF #define BIT_CNT 8 @@ -438,6 +444,33 @@ static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) return !(tmc->tmgroup && tmc->available); } +/* + * Returns true if @cpu should be excluded from the hierarchy as isolated. + * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs + * are still part of the hierarchy but become idle (from a tick and timer + * migration perspective) when they stop their tick. This lets the timekeeping + * CPU handle their global timers. Marking also isolated CPUs as idle would be + * too costly, hence they are completely excluded from the hierarchy. + * This check is necessary, for instance, to prevent offline isolated CPUs from + * being incorrectly marked as available once getting back online. + * + * This function returns false during early boot and the isolation logic is + * enabled only after isolated CPUs are marked as unavailable at late boot. + * The tick CPU can be isolated at boot, however we cannot mark it as + * unavailable to avoid having no global migrator for the nohz_full CPUs. This + * should be ensured by the callers of this function: implicitly from hotplug + * callbacks and explicitly in tmigr_init_isolation() and + * tmigr_isolated_exclude_cpumask(). + */ +static inline bool tmigr_is_isolated(int cpu) +{ + if (!static_branch_unlikely(&tmigr_exclude_isolated)) + return false; + return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) || + cpuset_cpu_is_isolated(cpu)) && + housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE); +} + /* * Returns true, when @childmask corresponds to the group migrator or when the * group is not active - so no migrator is set. @@ -1439,8 +1472,12 @@ static int tmigr_clear_cpu_available(unsigned int cpu) int migrator; u64 firstexp; + guard(mutex)(&tmigr_available_mutex); + cpumask_clear_cpu(cpu, tmigr_available_cpumask); scoped_guard(raw_spinlock_irq, &tmc->lock) { + if (!tmc->available) + return 0; tmc->available = false; WRITE_ONCE(tmc->wakeup, KTIME_MAX); @@ -1468,8 +1505,15 @@ static int tmigr_set_cpu_available(unsigned int cpu) if (WARN_ON_ONCE(!tmc->tmgroup)) return -EINVAL; + if (tmigr_is_isolated(cpu)) + return 0; + + guard(mutex)(&tmigr_available_mutex); + cpumask_set_cpu(cpu, tmigr_available_cpumask); scoped_guard(raw_spinlock_irq, &tmc->lock) { + if (tmc->available) + return 0; trace_tmigr_cpu_available(tmc); tmc->idle = timer_base_is_idle(); if (!tmc->idle) @@ -1479,6 +1523,105 @@ static int tmigr_set_cpu_available(unsigned int cpu) return 0; } +static void tmigr_cpu_isolate(struct work_struct *ignored) +{ + tmigr_clear_cpu_available(smp_processor_id()); +} + +static void tmigr_cpu_unisolate(struct work_struct *ignored) +{ + tmigr_set_cpu_available(smp_processor_id()); +} + +/** + * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy + * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy + * + * This function can be called from cpuset code to provide the new set of + * isolated CPUs that should be excluded from the hierarchy. + * Online CPUs not present in exclude_cpumask but already excluded are brought + * back to the hierarchy. + * Functions to isolate/unisolate need to be called locally and can sleep. + */ +int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask) +{ + struct work_struct __percpu *works __free(free_percpu) = + alloc_percpu(struct work_struct); + cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL; + int cpu; + + lockdep_assert_cpus_held(); + + if (!works) + return -ENOMEM; + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + + /* + * First set previously isolated CPUs as available (unisolate). + * This cpumask contains only CPUs that switched to available now. + */ + cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask); + cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask); + + for_each_cpu(cpu, cpumask) { + struct work_struct *work = per_cpu_ptr(works, cpu); + + INIT_WORK(work, tmigr_cpu_unisolate); + schedule_work_on(cpu, work); + } + for_each_cpu(cpu, cpumask) + flush_work(per_cpu_ptr(works, cpu)); + + /* + * Then clear previously available CPUs (isolate). + * This cpumask contains only CPUs that switched to not available now. + * There cannot be overlap with the newly available ones. + */ + cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask); + cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)); + /* + * Handle this here and not in the cpuset code because exclude_cpumask + * might include also the tick CPU if included in isolcpus. + */ + for_each_cpu(cpu, cpumask) { + if (!tick_nohz_cpu_hotpluggable(cpu)) { + cpumask_clear_cpu(cpu, cpumask); + break; + } + } + + for_each_cpu(cpu, cpumask) { + struct work_struct *work = per_cpu_ptr(works, cpu); + + INIT_WORK(work, tmigr_cpu_isolate); + schedule_work_on(cpu, work); + } + for_each_cpu(cpu, cpumask) + flush_work(per_cpu_ptr(works, cpu)); + + return 0; +} + +static int __init tmigr_init_isolation(void) +{ + cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL; + + static_branch_enable(&tmigr_exclude_isolated); + + if (!housekeeping_enabled(HK_TYPE_DOMAIN)) + return 0; + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); + + /* Protect against RCU torture hotplug testing */ + guard(cpus_read_lock)(); + return tmigr_isolated_exclude_cpumask(cpumask); +} +late_initcall(tmigr_init_isolation); + static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, int node) {