diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-03 13:16:38 -0500 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2011-12-28 16:25:42 -0600 |
commit | 94850d09da357d9d422e28dcdca017246a49d2b6 (patch) | |
tree | 7d169742098d2ddb2cc510d844c966b2450effc6 | |
parent | bfa02feb9a9bfe38df60a581ca9bcecd94d274a6 (diff) |
softirq: Sanitize softirq pending for NOHZ/RT
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/interrupt.h | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 61 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 8 |
3 files changed, 64 insertions, 7 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b9162dc541ff..74e28d96480a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -471,6 +471,8 @@ static inline void __raise_softirq_irqoff(unsigned int nr) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void softirq_check_pending_idle(void); + /* This is the worklist that queues up per-cpu softirq work. * * send_remote_sendirq() adds work to these lists, and diff --git a/kernel/softirq.c b/kernel/softirq.c index c6c58240d267..83326220d049 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; +#ifdef CONFIG_NO_HZ +# ifdef CONFIG_PREEMPT_RT_FULL +/* + * On preempt-rt a softirq might be blocked on a lock. There might be + * no other runnable task on this CPU because the lock owner runs on + * some other CPU. So we have to go into idle with the pending bit + * set. Therefor we need to check this otherwise we warn about false + * positives which confuses users and defeats the whole purpose of + * this test. + * + * This code is called with interrupts disabled. + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + u32 warnpending = 0, pending = local_softirq_pending(); + + if (rate_limit >= 10) + return; + + if (pending) { + struct task_struct *tsk; + + tsk = __get_cpu_var(ksoftirqd); + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ + raw_spin_lock(&tsk->pi_lock); + + if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) + warnpending = 1; + + raw_spin_unlock(&tsk->pi_lock); + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + pending); + rate_limit++; + } +} +# else +/* + * On !PREEMPT_RT we just printk rate limited: + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + + if (rate_limit < 10) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; + } +} +# endif +#endif + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 31b9344e8ac6..57315c5780be 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -332,13 +332,7 @@ void tick_nohz_stop_sched_tick(int inidle) goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); goto end; } |