cgroup/cpuset: Move housekeeping_update()/rebuild_sched_domains() together

With the latest changes in sched/isolation.c, rebuild_sched_domains*()
requires the HK_TYPE_DOMAIN housekeeping cpumask to be properly
updated first, if needed, before the sched domains can be
rebuilt. So the two naturally fit together. Do that by creating a new
update_hk_sched_domains() helper to house both actions.

The name of the isolated_cpus_updating flag to control the
call to housekeeping_update() is now outdated. So change it to
update_housekeeping to better reflect its purpose. Also move the call
to update_hk_sched_domains() to the end of cpuset and hotplug operations
before releasing the cpuset_mutex.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Waiman Long
2026-02-21 13:54:16 -05:00
committed by Tejun Heo
parent 5e6aac573c
commit 3bfe479671

View File

@@ -130,10 +130,9 @@ static cpumask_var_t subpartitions_cpus; /* RWCS */
static cpumask_var_t isolated_cpus; /* CSCB */
/*
* Set if isolated_cpus is being updated in the current cpuset_mutex
* critical section.
* Set if housekeeping cpumasks are to be updated.
*/
static bool isolated_cpus_updating; /* RWCS */
static bool update_housekeeping; /* RWCS */
/*
* A flag to force sched domain rebuild at the end of an operation.
@@ -1189,7 +1188,7 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
return;
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
isolated_cpus_updating = true;
update_housekeeping = true;
}
/*
@@ -1307,22 +1306,22 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
}
/*
* update_isolation_cpumasks - Update external isolation related CPU masks
* update_hk_sched_domains - Update HK cpumasks & rebuild sched domains
*
* The following external CPU masks will be updated if necessary:
* - workqueue unbound cpumask
* Update housekeeping cpumasks and rebuild sched domains if necessary.
* This should be called at the end of cpuset or hotplug actions.
*/
static void update_isolation_cpumasks(void)
static void update_hk_sched_domains(void)
{
int ret;
if (!isolated_cpus_updating)
return;
ret = housekeeping_update(isolated_cpus);
WARN_ON_ONCE(ret < 0);
isolated_cpus_updating = false;
if (update_housekeeping) {
/* Updating HK cpumasks implies rebuild sched domains */
WARN_ON_ONCE(housekeeping_update(isolated_cpus));
update_housekeeping = false;
force_sd_rebuild = true;
}
/* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
if (force_sd_rebuild)
rebuild_sched_domains_locked();
}
/**
@@ -1473,7 +1472,6 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
cs->remote_partition = true;
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
update_isolation_cpumasks();
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1518,7 +1516,6 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_excpus(cs, cs->effective_xcpus);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
update_isolation_cpumasks();
cpuset_force_rebuild();
/*
@@ -1589,7 +1586,6 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
update_isolation_cpumasks();
if (adding || deleting)
cpuset_force_rebuild();
@@ -1933,7 +1929,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
partition_xcpus_add(new_prs, parent, tmp->delmask);
spin_unlock_irq(&callback_lock);
update_isolation_cpumasks();
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2901,7 +2896,6 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
update_isolation_cpumasks();
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
@@ -3191,9 +3185,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
}
free_cpuset(trialcs);
if (force_sd_rebuild)
rebuild_sched_domains_locked();
out_unlock:
update_hk_sched_domains();
cpuset_full_unlock();
if (of_cft(of)->private == FILE_MEMLIST)
schedule_flush_migrate_mm();
@@ -3301,6 +3294,7 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
update_hk_sched_domains();
cpuset_full_unlock();
return retval ?: nbytes;
}
@@ -3475,6 +3469,7 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
update_hk_sched_domains();
cpuset_full_unlock();
}
@@ -3882,10 +3877,12 @@ static void cpuset_handle_hotplug(void)
rcu_read_unlock();
}
/* rebuild sched domains if necessary */
if (force_sd_rebuild)
rebuild_sched_domains_cpuslocked();
if (update_housekeeping || force_sd_rebuild) {
mutex_lock(&cpuset_mutex);
update_hk_sched_domains();
mutex_unlock(&cpuset_mutex);
}
free_tmpmasks(ptmp);
}