summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-07-05 11:06:10 +0200
committerIngo Molnar <mingo@kernel.org>2014-07-05 11:06:10 +0200
commit51da9830d7a58c8f77127c622ee57d453c88af09 (patch)
tree85bd2caae0344f77f0afd5f9617a45855000b821 /kernel/sched
parent5d5e2b1bcbdc996e72815c03fdc5ea82c4642397 (diff)
parentd490b3e2c23369c6adfa183d18d9a24ced247797 (diff)
Merge branch 'timers/nohz' into sched/core
Merge these two, because upcoming patches will touch both areas. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/sched.h12
2 files changed, 22 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..7f3063c153d8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -684,10 +684,16 @@ static void wake_up_idle_cpu(int cpu)
static bool wake_up_full_nohz_cpu(int cpu)
{
+ /*
+ * We just need the target to call irq_exit() and re-evaluate
+ * the next tick. The nohz full kick at least implies that.
+ * If needed we can still optimize that later with an
+ * empty IRQ.
+ */
if (tick_nohz_full_cpu(cpu)) {
if (cpu != smp_processor_id() ||
tick_nohz_tick_stopped())
- smp_send_reschedule(cpu);
+ tick_nohz_full_kick_cpu(cpu);
return true;
}
@@ -734,10 +740,11 @@ bool sched_can_stop_tick(void)
rq = this_rq();
- /* Make sure rq->nr_running update is visible after the IPI */
- smp_rmb();
-
- /* More than one running task need preemption */
+ /*
+ * More than one running task need preemption.
+ * nr_running update is assumed to be visible
+ * after IPI is sent from wakers.
+ */
if (rq->nr_running > 1)
return false;
@@ -1568,9 +1575,7 @@ void scheduler_ipi(void)
*/
preempt_fold_need_resched();
- if (llist_empty(&this_rq()->wake_list)
- && !tick_nohz_full_cpu(smp_processor_id())
- && !got_nohz_idle_kick())
+ if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
return;
/*
@@ -1587,7 +1592,6 @@ void scheduler_ipi(void)
* somewhat pessimize the simple resched case.
*/
irq_enter();
- tick_nohz_full_check();
sched_ttwu_pending();
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 31cc02ebc54e..eb8567610295 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1221,9 +1221,15 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
#ifdef CONFIG_NO_HZ_FULL
if (prev_nr < 2 && rq->nr_running >= 2) {
if (tick_nohz_full_cpu(rq->cpu)) {
- /* Order rq->nr_running write against the IPI */
- smp_wmb();
- smp_send_reschedule(rq->cpu);
+ /*
+ * Tick is needed if more than one task runs on a CPU.
+ * Send the target an IPI to kick it out of nohz mode.
+ *
+ * We assume that IPI implies full memory barrier and the
+ * new value of rq->nr_running is visible on reception
+ * from the target.
+ */
+ tick_nohz_full_kick_cpu(rq->cpu);
}
}
#endif