mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-15 23:41:35 -04:00
Merge tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: - Fix zero_vruntime tracking again (Peter Zijlstra) - Fix avg_vruntime() usage in sched_debug (Peter Zijlstra) * tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/debug: Fix avg_vruntime() usage sched/fair: Fix zero_vruntime tracking fix
This commit is contained in:
@@ -902,6 +902,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
|
||||
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
|
||||
u64 avruntime;
|
||||
struct sched_entity *last, *first, *root;
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
@@ -925,6 +926,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
if (last)
|
||||
right_vruntime = last->vruntime;
|
||||
zero_vruntime = cfs_rq->zero_vruntime;
|
||||
avruntime = avg_vruntime(cfs_rq);
|
||||
raw_spin_rq_unlock_irqrestore(rq, flags);
|
||||
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
|
||||
@@ -934,7 +936,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime",
|
||||
SPLIT_NS(zero_vruntime));
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
|
||||
SPLIT_NS(avg_vruntime(cfs_rq)));
|
||||
SPLIT_NS(avruntime));
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
|
||||
SPLIT_NS(right_vruntime));
|
||||
spread = right_vruntime - left_vruntime;
|
||||
|
||||
@@ -707,7 +707,7 @@ void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta)
|
||||
* Called in:
|
||||
* - place_entity() -- before enqueue
|
||||
* - update_entity_lag() -- before dequeue
|
||||
* - entity_tick()
|
||||
* - update_deadline() -- slice expiration
|
||||
*
|
||||
* This means it is one entry 'behind' but that puts it close enough to where
|
||||
* the bound on entity_key() is at most two lag bounds.
|
||||
@@ -1131,6 +1131,7 @@ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
* EEVDF: vd_i = ve_i + r_i / w_i
|
||||
*/
|
||||
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
|
||||
avg_vruntime(cfs_rq);
|
||||
|
||||
/*
|
||||
* The task has consumed its request, reschedule.
|
||||
@@ -5593,11 +5594,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
update_load_avg(cfs_rq, curr, UPDATE_TG);
|
||||
update_cfs_group(curr);
|
||||
|
||||
/*
|
||||
* Pulls along cfs_rq::zero_vruntime.
|
||||
*/
|
||||
avg_vruntime(cfs_rq);
|
||||
|
||||
#ifdef CONFIG_SCHED_HRTICK
|
||||
/*
|
||||
* queued ticks are scheduled to match the slice, so don't bother
|
||||
@@ -9128,7 +9124,7 @@ static void yield_task_fair(struct rq *rq)
|
||||
*/
|
||||
if (entity_eligible(cfs_rq, se)) {
|
||||
se->vruntime = se->deadline;
|
||||
se->deadline += calc_delta_fair(se->slice, se);
|
||||
update_deadline(cfs_rq, se);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user