diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-07-26 13:40:43 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-26 13:40:43 +0200 |
commit | 2cd4d0ea19713304963dbb2de5073700bfe253f5 (patch) | |
tree | 39a374d0918db059413f6709184669da63af81ef /kernel/sched.c | |
parent | 018a2212950457b1093e504cd834aa0fe749da6c (diff) |
[PATCH] sched: make cpu_clock() not use the rq clock
it is enough to disable interrupts to get the precise rq-clock
of the local CPU.
this also solves an NMI watchdog regression: the NMI watchdog
calls touch_softlockup_watchdog(), which might deadlock on
rq->lock if the NMI hits an rq-locked critical section.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index cc6c1192c448..3eed860cf292 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -383,13 +383,12 @@ static inline unsigned long long rq_clock(struct rq *rq) */ unsigned long long cpu_clock(int cpu) { - struct rq *rq = cpu_rq(cpu); unsigned long long now; unsigned long flags; - spin_lock_irqsave(&rq->lock, flags); - now = rq_clock(rq); - spin_unlock_irqrestore(&rq->lock, flags); + local_irq_save(flags); + now = rq_clock(cpu_rq(cpu)); + local_irq_restore(flags); return now; } |