summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venki@google.com>2011-02-10 10:23:28 +0100
committerGreg Kroah-Hartman <gregkh@suse.de>2011-02-17 15:37:27 -0800
commit19d3e3cbe91fd8c800654ff2b54cdb993cdf0336 (patch)
tree047ef8fcdaf133f83cafec726dabd71eb4c3dffe
parentc8c885599ad2115e0a2fe661c2fb6ba4edc92c19 (diff)
sched: Call tick_check_idle before __irq_enter
Commit: d267f87fb8179c6dba03d08b91952e81bc3723c7 upstream When CPU is idle and on first interrupt, irq_enter calls tick_check_idle() to notify interruption from idle. But, there is a problem if this call is done after __irq_enter, as all routines in __irq_enter may find stale time due to yet to be done tick_check_idle. Specifically, trace calls in __irq_enter when they use global clock and also account_system_vtime change in this patch as it wants to use sched_clock_cpu() to do proper irq timing. But, tick_check_idle was moved after __irq_enter intentionally to prevent problem of unneeded ksoftirqd wakeups by the commit ee5f80a: irq: call __irq_enter() before calling the tick_idle_check Impact: avoid spurious ksoftirqd wakeups Moving tick_check_idle() before __irq_enter and wrapping it with local_bh_enable/disable would solve both the problems. Fixed-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286237003-12406-9-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c12
2 files changed, 10 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4f8609420bec..c7f69fe024e1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1872,8 +1872,8 @@ void account_system_vtime(struct task_struct *curr)
local_irq_save(flags);
- now = sched_clock();
cpu = smp_processor_id();
+ now = sched_clock_cpu(cpu);
delta = now - per_cpu(irq_start_time, cpu);
per_cpu(irq_start_time, cpu) = now;
/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0a617de20434..04a0252d7d24 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -296,10 +296,16 @@ void irq_enter(void)
rcu_irq_enter();
if (idle_cpu(cpu) && !in_interrupt()) {
- __irq_enter();
+ /*
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+ local_bh_disable();
tick_check_idle(cpu);
- } else
- __irq_enter();
+ _local_bh_enable();
+ }
+
+ __irq_enter();
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED