diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-10-05 11:45:18 -0700 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2012-04-10 16:40:52 -0500 |
commit | 3b06a483531beabbd6408ee32acdb097bbdc6053 (patch) | |
tree | 8c530876dde93f7169adf7f48e830815cc70f912 /kernel | |
parent | c33c9d5d625ebef504fa1f7823c260367b421a61 (diff) |
rcu: Make ksoftirqd do RCU quiescent states
Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
to network-based denial-of-service attacks. This patch therefore
makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq()
is running in ksoftirqd context. A wrapper layer in interposed so that
other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying
function __do_softirq_common() does the actual work.
The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is
that there might be a local_bh_enable() inside an RCU-preempt read-side
critical section. This local_bh_enable() can invoke __do_softirq()
directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just
calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be
an illegal RCU-preempt quiescent state in the middle of an RCU-preempt
read-side critical section. Therefore, quiescent states can only happen
in cases where __do_softirq() is invoked directly from ksoftirqd.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 7 | ||||
-rw-r--r-- | kernel/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 20 |
4 files changed, 21 insertions, 9 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3c1ae8c584ce..89ae09cdf9aa 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -172,7 +172,12 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } -#ifndef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_FULL +void rcu_bh_qs(int cpu) +{ + rcu_preempt_qs(cpu); +} +#else void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); diff --git a/kernel/rcutree.h b/kernel/rcutree.h index cdd1be0a4072..6ee6525cbb82 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -423,6 +423,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); long rcu_batches_completed(void); +static void rcu_preempt_qs(int cpu); static void rcu_preempt_note_context_switch(int cpu); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index e4cc151a8839..14acafc496fe 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1899,7 +1899,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done diff --git a/kernel/softirq.c b/kernel/softirq.c index 35fb713dcdaa..34fe1db0d5e0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -139,7 +139,7 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } -static void handle_pending_softirqs(u32 pending, int cpu) +static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) { struct softirq_action *h = softirq_vec; unsigned int prev_count = preempt_count(); @@ -162,7 +162,8 @@ static void handle_pending_softirqs(u32 pending, int cpu) prev_count, (unsigned int) preempt_count()); preempt_count() = prev_count; } - rcu_bh_qs(cpu); + if (need_rcu_bh_qs) + rcu_bh_qs(cpu); } local_irq_disable(); } @@ -314,7 +315,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, 1); pending = local_softirq_pending(); if (pending && --max_restart) @@ -384,7 +385,12 @@ static inline void ksoftirqd_clr_sched_params(void) { } static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); -static void __do_softirq(void); +static void __do_softirq_common(int need_rcu_bh_qs); + +void __do_softirq(void) +{ + __do_softirq_common(0); +} void __init softirq_early_init(void) { @@ -455,7 +461,7 @@ EXPORT_SYMBOL(in_serving_softirq); * Called with bh and local interrupts disabled. For full RT cpu must * be pinned. */ -static void __do_softirq(void) +static void __do_softirq_common(int need_rcu_bh_qs) { u32 pending = local_softirq_pending(); int cpu = smp_processor_id(); @@ -469,7 +475,7 @@ static void __do_softirq(void) lockdep_softirq_enter(); - handle_pending_softirqs(pending, cpu); + handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); pending = local_softirq_pending(); if (pending) @@ -508,7 +514,7 @@ static int __thread_do_softirq(int cpu) * schedule! */ if (local_softirq_pending()) - __do_softirq(); + __do_softirq_common(cpu >= 0); local_unlock(local_softirq_lock); unpin_current_cpu(); preempt_disable(); |