mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 04:21:09 -04:00
sched_ext: Replace SCX_TASK_OFF_TASKS flag with SCX_TASK_DEAD state
SCX_TASK_OFF_TASKS marked tasks already through sched_ext_dead() so cgroup task iteration would skip them. This can be expressed better with a task state. Replace the flag with SCX_TASK_DEAD. scx_disable_and_exit_task() resets state to NONE on its way out, so sched_ext_dead() now sets DEAD after the wrapper returns. The validation matrix grows NONE -> DEAD, warns on DEAD -> NONE, and tightens READY's predecessor to INIT or ENABLED so the new DEAD value cannot silently transition to READY. Prepares for the following enable vs dead race fix. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Andrea Righi <arighi@nvidia.com>
This commit is contained in:
@@ -101,24 +101,25 @@ enum scx_ent_flags {
|
||||
SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
|
||||
SCX_TASK_SUB_INIT = 1 << 4, /* task being initialized for a sub sched */
|
||||
SCX_TASK_IMMED = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */
|
||||
SCX_TASK_OFF_TASKS = 1 << 6, /* removed from scx_tasks by sched_ext_dead() */
|
||||
|
||||
/*
|
||||
* Bits 8 and 9 are used to carry task state:
|
||||
* Bits 8 to 10 are used to carry task state:
|
||||
*
|
||||
* NONE ops.init_task() not called yet
|
||||
* INIT ops.init_task() succeeded, but task can be cancelled
|
||||
* READY fully initialized, but not in sched_ext
|
||||
* ENABLED fully initialized and in sched_ext
|
||||
* DEAD terminal state set by sched_ext_dead()
|
||||
*/
|
||||
SCX_TASK_STATE_SHIFT = 8, /* bits 8 and 9 are used to carry task state */
|
||||
SCX_TASK_STATE_BITS = 2,
|
||||
SCX_TASK_STATE_SHIFT = 8,
|
||||
SCX_TASK_STATE_BITS = 3,
|
||||
SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
|
||||
|
||||
SCX_TASK_NONE = 0 << SCX_TASK_STATE_SHIFT,
|
||||
SCX_TASK_INIT = 1 << SCX_TASK_STATE_SHIFT,
|
||||
SCX_TASK_READY = 2 << SCX_TASK_STATE_SHIFT,
|
||||
SCX_TASK_ENABLED = 3 << SCX_TASK_STATE_SHIFT,
|
||||
SCX_TASK_DEAD = 4 << SCX_TASK_STATE_SHIFT,
|
||||
|
||||
/*
|
||||
* Bits 12 and 13 are used to carry reenqueue reason. In addition to
|
||||
|
||||
@@ -723,17 +723,22 @@ static void scx_set_task_state(struct task_struct *p, u32 state)
|
||||
|
||||
switch (state) {
|
||||
case SCX_TASK_NONE:
|
||||
warn = prev_state == SCX_TASK_DEAD;
|
||||
break;
|
||||
case SCX_TASK_INIT:
|
||||
warn = prev_state != SCX_TASK_NONE;
|
||||
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
|
||||
break;
|
||||
case SCX_TASK_READY:
|
||||
warn = prev_state == SCX_TASK_NONE;
|
||||
warn = !(prev_state == SCX_TASK_INIT ||
|
||||
prev_state == SCX_TASK_ENABLED);
|
||||
break;
|
||||
case SCX_TASK_ENABLED:
|
||||
warn = prev_state != SCX_TASK_READY;
|
||||
break;
|
||||
case SCX_TASK_DEAD:
|
||||
warn = prev_state != SCX_TASK_NONE;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "sched_ext: Invalid task state %d -> %d for %s[%d]",
|
||||
prev_state, state, p->comm, p->pid);
|
||||
@@ -972,11 +977,11 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
|
||||
/*
|
||||
* cgroup_task_dead() removes the dead tasks from cset->tasks
|
||||
* after sched_ext_dead() and cgroup iteration may see tasks
|
||||
* which already finished sched_ext_dead(). %SCX_TASK_OFF_TASKS
|
||||
* is set by sched_ext_dead() under @p's rq lock. Test it to
|
||||
* which already finished sched_ext_dead(). %SCX_TASK_DEAD is
|
||||
* set by sched_ext_dead() under @p's rq lock. Test it to
|
||||
* avoid visiting tasks which are already dead from SCX POV.
|
||||
*/
|
||||
if (p->scx.flags & SCX_TASK_OFF_TASKS) {
|
||||
if (scx_get_task_state(p) == SCX_TASK_DEAD) {
|
||||
__scx_task_iter_rq_unlock(iter);
|
||||
continue;
|
||||
}
|
||||
@@ -3847,7 +3852,7 @@ void sched_ext_dead(struct task_struct *p)
|
||||
* @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
|
||||
* ENABLED transitions can't race us. Disable ops for @p.
|
||||
*
|
||||
* %SCX_TASK_OFF_TASKS synchronizes against cgroup task iteration - see
|
||||
* %SCX_TASK_DEAD synchronizes against cgroup task iteration - see
|
||||
* scx_task_iter_next_locked(). NONE tasks need no marking: cgroup
|
||||
* iteration is only used from sub-sched paths, which require root
|
||||
* enabled. Root enable transitions every live task to at least READY.
|
||||
@@ -3858,7 +3863,7 @@ void sched_ext_dead(struct task_struct *p)
|
||||
|
||||
rq = task_rq_lock(p, &rf);
|
||||
scx_disable_and_exit_task(scx_task_sched(p), p);
|
||||
p->scx.flags |= SCX_TASK_OFF_TASKS;
|
||||
scx_set_task_state(p, SCX_TASK_DEAD);
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user