mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-04 18:11:39 -04:00
sched/fair: Remove superfluous rcu_read_lock()
With fair switched to rcu_dereference_all() validation, having IRQ or preemption disabled is sufficient, remove the rcu_read_lock() clutter. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://patch.msgid.link/20251127154725.647502625@infradead.org
This commit is contained in:
committed by
Ingo Molnar
parent
71fedc41c2
commit
a03fee333a
@@ -12856,21 +12856,16 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
||||
*/
|
||||
rq_unpin_lock(this_rq, rf);
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference_sched_domain(this_rq->sd);
|
||||
if (!sd) {
|
||||
rcu_read_unlock();
|
||||
if (!sd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!get_rd_overloaded(this_rq->rd) ||
|
||||
this_rq->avg_idle < sd->max_newidle_lb_cost) {
|
||||
|
||||
update_next_balance(sd, &next_balance);
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Include sched_balance_update_blocked_averages() in the cost
|
||||
@@ -12883,7 +12878,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
||||
rq_modified_clear(this_rq);
|
||||
raw_spin_rq_unlock(this_rq);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_domain(this_cpu, sd) {
|
||||
u64 domain_cost;
|
||||
|
||||
@@ -12933,7 +12927,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
||||
if (pulled_task || !continue_balancing)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
raw_spin_rq_lock(this_rq);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user