sched_ext: Inline scx_init_task() and move RESET_RUNNABLE_AT into scx_set_task_state()

Prepare for the SCX_TASK_INIT_BEGIN/DEAD work that follows by collapsing the
scx_init_task() helper. Move the SCX_TASK_RESET_RUNNABLE_AT setting into
scx_set_task_state() on the INIT transition (it was set unconditionally at
every INIT site through the scx_init_task() helper), inline scx_init_task()
into scx_fork() and scx_root_enable_workfn(), and drop the helper.

As a side effect, scx_sub_disable() migration sequence now also sets
RESET_RUNNABLE_AT (it previously wrote INIT directly without going through
scx_init_task()). The flag triggers a runnable_at reset on the next
set_task_runnable(), which is harmless on a task that has just been moved
between scheds.

On root-enable, p->scx.flags is written without the task's rq lock. The task
isn't visible to scx yet, and a follow-up patch restores the lock-held
write.

v2: Note p->scx.flags rq-lock relaxation on root-enable path. (Andrea)

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
This commit is contained in:
Tejun Heo
2026-05-10 10:08:16 -10:00
parent 6947bea4b7
commit 938dd9ab2b

View File

@@ -726,6 +726,7 @@ static void scx_set_task_state(struct task_struct *p, u32 state)
break;
case SCX_TASK_INIT:
warn = prev_state != SCX_TASK_NONE;
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
break;
case SCX_TASK_READY:
warn = prev_state == SCX_TASK_NONE;
@@ -3585,22 +3586,6 @@ static int __scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fo
return 0;
}
static int scx_init_task(struct scx_sched *sch, struct task_struct *p, bool fork)
{
int ret;
ret = __scx_init_task(sch, p, fork);
if (!ret) {
/*
* While @p's rq is not locked. @p is not visible to the rest of
* SCX yet and it's safe to update the flags and state.
*/
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
scx_set_task_state(p, SCX_TASK_INIT);
}
return ret;
}
static void __scx_enable_task(struct scx_sched *sch, struct task_struct *p)
{
struct rq *rq = task_rq(p);
@@ -3763,10 +3748,11 @@ int scx_fork(struct task_struct *p, struct kernel_clone_args *kargs)
#else
struct scx_sched *sch = scx_root;
#endif
ret = scx_init_task(sch, p, true);
if (!ret)
scx_set_task_sched(p, sch);
return ret;
ret = __scx_init_task(sch, p, true);
if (unlikely(ret))
return ret;
scx_set_task_state(p, SCX_TASK_INIT);
scx_set_task_sched(p, sch);
}
return 0;
@@ -6897,8 +6883,8 @@ static void scx_root_enable_workfn(struct kthread_work *work)
scx_task_iter_unlock(&sti);
ret = scx_init_task(sch, p, false);
if (ret) {
ret = __scx_init_task(sch, p, false);
if (unlikely(ret)) {
put_task_struct(p);
scx_task_iter_stop(&sti);
scx_error(sch, "ops.init_task() failed (%d) for %s[%d]",
@@ -6906,6 +6892,7 @@ static void scx_root_enable_workfn(struct kthread_work *work)
goto err_disable_unlock_all;
}
scx_set_task_state(p, SCX_TASK_INIT);
scx_set_task_sched(p, sch);
scx_set_task_state(p, SCX_TASK_READY);