mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 22:31:47 -04:00
Merge tag 'sched-urgent-2026-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: - Fix the delayed dequeue negative lag increase fix in the fair scheduler (Peter Zijlstra) - Fix wakeup_preempt_fair() to do proper delayed dequeue (Vincent Guittot) - Clear sched_entity::rel_deadline when initializing forked entities, which bug can cause all tasks to be EEVDF-ineligible, causing a NULL pointer dereference crash in pick_next_entity() (Zicheng Qu) * tag 'sched-urgent-2026-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Clear rel_deadline when initializing forked entities sched/fair: Fix wakeup_preempt_fair() vs delayed dequeue sched/fair: Fix the negative lag increase fix
This commit is contained in:
@@ -4458,6 +4458,7 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p)
|
||||
p->se.nr_migrations = 0;
|
||||
p->se.vruntime = 0;
|
||||
p->se.vlag = 0;
|
||||
p->se.rel_deadline = 0;
|
||||
INIT_LIST_HEAD(&p->se.group_node);
|
||||
|
||||
/* A delayed task cannot be in clone(). */
|
||||
|
||||
@@ -847,13 +847,19 @@ static s64 entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 avrunt
|
||||
* Similarly, check that the entity didn't gain positive lag when DELAY_ZERO
|
||||
* is set.
|
||||
*
|
||||
* Return true if the lag has been adjusted.
|
||||
* Return true if the vlag has been modified. Specifically:
|
||||
*
|
||||
* se->vlag != avg_vruntime() - se->vruntime
|
||||
*
|
||||
* This can be due to clamping in entity_lag() or clamping due to
|
||||
* sched_delayed. Either way, when vlag is modified and the entity is
|
||||
* retained, the tree needs to be adjusted.
|
||||
*/
|
||||
static __always_inline
|
||||
bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
s64 vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
|
||||
bool ret;
|
||||
u64 avruntime = avg_vruntime(cfs_rq);
|
||||
s64 vlag = entity_lag(cfs_rq, se, avruntime);
|
||||
|
||||
WARN_ON_ONCE(!se->on_rq);
|
||||
|
||||
@@ -863,10 +869,9 @@ bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
if (sched_feat(DELAY_ZERO))
|
||||
vlag = min(vlag, 0);
|
||||
}
|
||||
ret = (vlag == se->vlag);
|
||||
se->vlag = vlag;
|
||||
|
||||
return ret;
|
||||
return avruntime - vlag != se->vruntime;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1099,7 +1104,7 @@ static inline void cancel_protect_slice(struct sched_entity *se)
|
||||
*
|
||||
* Which allows tree pruning through eligibility.
|
||||
*/
|
||||
static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
|
||||
static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
|
||||
{
|
||||
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
|
||||
struct sched_entity *se = __pick_first_entity(cfs_rq);
|
||||
@@ -1170,11 +1175,6 @@ static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq, bool protect)
|
||||
return best;
|
||||
}
|
||||
|
||||
static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return __pick_eevdf(cfs_rq, true);
|
||||
}
|
||||
|
||||
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
|
||||
@@ -5749,11 +5749,11 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
|
||||
* 4) do not run the "skip" process, if something else is available
|
||||
*/
|
||||
static struct sched_entity *
|
||||
pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
|
||||
pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq, bool protect)
|
||||
{
|
||||
struct sched_entity *se;
|
||||
|
||||
se = pick_eevdf(cfs_rq);
|
||||
se = pick_eevdf(cfs_rq, protect);
|
||||
if (se->sched_delayed) {
|
||||
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
|
||||
/*
|
||||
@@ -9027,7 +9027,7 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
|
||||
{
|
||||
enum preempt_wakeup_action preempt_action = PREEMPT_WAKEUP_PICK;
|
||||
struct task_struct *donor = rq->donor;
|
||||
struct sched_entity *se = &donor->se, *pse = &p->se;
|
||||
struct sched_entity *nse, *se = &donor->se, *pse = &p->se;
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(donor);
|
||||
int cse_is_idle, pse_is_idle;
|
||||
|
||||
@@ -9138,12 +9138,18 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
|
||||
}
|
||||
|
||||
pick:
|
||||
/*
|
||||
* If @p has become the most eligible task, force preemption.
|
||||
*/
|
||||
if (__pick_eevdf(cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT) == pse)
|
||||
nse = pick_next_entity(rq, cfs_rq, preempt_action != PREEMPT_WAKEUP_SHORT);
|
||||
/* If @p has become the most eligible task, force preemption */
|
||||
if (nse == pse)
|
||||
goto preempt;
|
||||
|
||||
/*
|
||||
* Because p is enqueued, nse being null can only mean that we
|
||||
* dequeued a delayed task.
|
||||
*/
|
||||
if (!nse)
|
||||
goto pick;
|
||||
|
||||
if (sched_feat(RUN_TO_PARITY))
|
||||
update_protect_slice(cfs_rq, se);
|
||||
|
||||
@@ -9179,7 +9185,7 @@ static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
|
||||
|
||||
throttled |= check_cfs_rq_runtime(cfs_rq);
|
||||
|
||||
se = pick_next_entity(rq, cfs_rq);
|
||||
se = pick_next_entity(rq, cfs_rq, true);
|
||||
if (!se)
|
||||
goto again;
|
||||
cfs_rq = group_cfs_rq(se);
|
||||
|
||||
Reference in New Issue
Block a user