diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4ae06ce4a916..04993c763a06 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7138,7 +7138,6 @@ static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask); static struct { cpumask_var_t idle_cpus_mask; - atomic_t nr_cpus; int has_blocked_load; /* Idle CPUS has blocked load */ int needs_update; /* Newly idle CPUs need their next_balance collated */ unsigned long next_balance; /* in jiffy units */ @@ -12461,7 +12460,7 @@ static void nohz_balancer_kick(struct rq *rq) * None are in tickless mode and hence no need for NOHZ idle load * balancing */ - if (unlikely(!atomic_read(&nohz.nr_cpus))) + if (unlikely(cpumask_empty(nohz.idle_cpus_mask))) return; if (rq->nr_running >= 2) { @@ -12574,7 +12573,6 @@ void nohz_balance_exit_idle(struct rq *rq) rq->nohz_tick_stopped = 0; cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); - atomic_dec(&nohz.nr_cpus); set_cpu_sd_state_busy(rq->cpu); } @@ -12632,7 +12630,6 @@ void nohz_balance_enter_idle(int cpu) rq->nohz_tick_stopped = 1; cpumask_set_cpu(cpu, nohz.idle_cpus_mask); - atomic_inc(&nohz.nr_cpus); /* * Ensures that if nohz_idle_balance() fails to observe our |
