summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-03-16 14:31:44 -0700
committerClark Williams <williams@redhat.com>2012-04-04 09:14:36 -0500
commit2a4abb8dc7356fd1f7f5e66834fe6af974d892b5 (patch)
tree5a8171d5b13878ec746cd13cd03eec155525d6bd /kernel
parent6a8199cb70150d5244d63e7961446b68106e3d9c (diff)
sched: Break out from load_balancing on rq_lock contention
Also limit NEW_IDLE pull Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8a39fa3e3c6c..3747e53ee6a5 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2899,6 +2899,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
if (idle == CPU_NEWLY_IDLE)
break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
#endif
/*
@@ -3039,6 +3043,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rem_load_move -= moved_load;
if (rem_load_move < 0)
break;
+
+#ifdef CONFIG_PREEMPT
+ /*
+ * NEWIDLE balancing is a source of latency, so preemptible
+ * kernels will stop after the first task is pulled to minimize
+ * the critical section.
+ */
+ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
+ break;
+
+ if (raw_spin_is_contended(&this_rq->lock) ||
+ raw_spin_is_contended(&busiest->lock))
+ break;
+#endif
}
rcu_read_unlock();