sched/smp: Use the SMP version of the scheduler syscalls

Simplify the scheduler by making CONFIG_SMP=y code in
idle_cpu(), __sched_setscheduler() and sched_setaffinity()
unconditional.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20250528080924.2273858-33-mingo@kernel.org
This commit is contained in:
Ingo Molnar
2025-05-28 10:09:13 +02:00
parent 9d9af2372f
commit 8a9246ddc1

View File

@@ -209,10 +209,8 @@ int idle_cpu(int cpu)
if (rq->nr_running)
return 0;
#ifdef CONFIG_SMP
if (rq->ttwu_pending)
return 0;
#endif
return 1;
}
@@ -641,7 +639,6 @@ int __sched_setscheduler(struct task_struct *p,
goto unlock;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
if (dl_bandwidth_enabled() && dl_policy(policy) &&
!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
cpumask_t *span = rq->rd->span;
@@ -657,7 +654,6 @@ int __sched_setscheduler(struct task_struct *p,
goto unlock;
}
}
#endif /* CONFIG_SMP */
}
/* Re-check policy now with rq lock held: */
@@ -1239,7 +1235,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
if (user_mask) {
cpumask_copy(user_mask, in_mask);
} else if (IS_ENABLED(CONFIG_SMP)) {
} else {
return -ENOMEM;
}