diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2025-11-14 11:00:55 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2025-12-14 08:25:02 +0100 |
| commit | a03fee333a2f1e065a739bdbe5edbc5512fab9a4 (patch) | |
| tree | 88583ec814dc7e623c97b81e6aa39ea073a76e03 | |
| parent | 71fedc41c23b0010c578e6e224694ca15c19cf7d (diff) | |
sched/fair: Remove superfluous rcu_read_lock()
With fair switched to rcu_dereference_all() validation, having IRQ or
preemption disabled is sufficient, remove the rcu_read_lock()
clutter.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20251127154725.647502625@infradead.org
| -rw-r--r-- | kernel/sched/fair.c | 9 |
1 files changed, 1 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 44a359d6a299..496a30a41854 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12856,21 +12856,16 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); - rcu_read_lock(); sd = rcu_dereference_sched_domain(this_rq->sd); - if (!sd) { - rcu_read_unlock(); + if (!sd) goto out; - } if (!get_rd_overloaded(this_rq->rd) || this_rq->avg_idle < sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); - rcu_read_unlock(); goto out; } - rcu_read_unlock(); /* * Include sched_balance_update_blocked_averages() in the cost @@ -12883,7 +12878,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) rq_modified_clear(this_rq); raw_spin_rq_unlock(this_rq); - rcu_read_lock(); for_each_domain(this_cpu, sd) { u64 domain_cost; @@ -12933,7 +12927,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) if (pulled_task || !continue_balancing) break; } - rcu_read_unlock(); raw_spin_rq_lock(this_rq); |
