diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-19 17:12:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 10:10:18 -0700 |
commit | 5928a2b60cfdbad730f93696acab142d0b607280 (patch) | |
tree | 49bb21c9219673e61bad7a7c9202c7f25f5fe1be /kernel/rcutiny_plugin.h | |
parent | 5ed59af85077d28875a3a137b21933aaf1b4cd50 (diff) | |
parent | bdd4431c8d071491a68a65d9457996f222b5ecd3 (diff) |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes for v3.4 from Ingo Molnar. The major features of this
series are:
- making RCU more aggressive about entering dyntick-idle mode in order
to improve energy efficiency
- converting a few more call_rcu()s to kfree_rcu()s
- applying a number of rcutree fixes and cleanups to rcutiny
- removing CONFIG_SMP #ifdefs from treercu
- allowing RCU CPU stall times to be set via sysfs
- adding CPU-stall capability to rcutorture
- adding more RCU-abuse diagnostics
- updating documentation
- fixing yet more issues located by the still-ongoing top-to-bottom
inspection of RCU, this time with a special focus on the CPU-hotplug
code path.
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
rcu: Stop spurious warnings from synchronize_sched_expedited
rcu: Hold off RCU_FAST_NO_HZ after timer posted
rcu: Eliminate softirq-mediated RCU_FAST_NO_HZ idle-entry loop
rcu: Add RCU_NONIDLE() for idle-loop RCU read-side critical sections
rcu: Allow nesting of rcu_idle_enter() and rcu_idle_exit()
rcu: Remove redundant check for rcu_head misalignment
PTR_ERR should be called before its argument is cleared.
rcu: Convert WARN_ON_ONCE() in rcu_lock_acquire() to lockdep
rcu: Trace only after NULL-pointer check
rcu: Call out dangers of expedited RCU primitives
rcu: Rework detection of use of RCU by offline CPUs
lockdep: Add CPU-idle/offline warning to lockdep-RCU splat
rcu: No interrupt disabling for rcu_prepare_for_idle()
rcu: Move synchronize_sched_expedited() to rcutree.c
rcu: Check for illegal use of RCU from offlined CPUs
rcu: Update stall-warning documentation
rcu: Add CPU-stall capability to rcutorture
rcu: Make documentation give more realistic rcutorture duration
rcutorture: Permit holding off CPU-hotplug operations during boot
rcu: Print scheduling-clock information on RCU CPU stall-warning messages
...
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r-- | kernel/rcutiny_plugin.h | 77 |
1 files changed, 60 insertions, 17 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 9cb1ae4aabdd..22ecea0dfb62 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { RCU_TRACE(.rcb.name = "rcu_preempt") }; +static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(void); static void rcu_report_exp_done(void); @@ -146,6 +147,16 @@ static int rcu_cpu_blocking_cur_gp(void) /* * Check for a running RCU reader. Because there is only one CPU, * there can be but one running RCU reader at a time. ;-) + * + * Returns zero if there are no running readers. Returns a positive + * number if there is at least one reader within its RCU read-side + * critical section. Returns a negative number if an outermost reader + * is in the midst of exiting from its RCU read-side critical section + * + * Returns zero if there are no running readers. Returns a positive + * number if there is at least one reader within its RCU read-side + * critical section. Returns a negative number if an outermost reader + * is in the midst of exiting from its RCU read-side critical section. */ static int rcu_preempt_running_reader(void) { @@ -307,7 +318,6 @@ static int rcu_boost(void) t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&mtx, t); t->rcu_boost_mutex = &mtx; - t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; raw_local_irq_restore(flags); rt_mutex_lock(&mtx); rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ @@ -475,7 +485,7 @@ void rcu_preempt_note_context_switch(void) unsigned long flags; local_irq_save(flags); /* must exclude scheduler_tick(). */ - if (rcu_preempt_running_reader() && + if (rcu_preempt_running_reader() > 0 && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ @@ -494,6 +504,13 @@ void rcu_preempt_note_context_switch(void) list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); if (rcu_cpu_blocking_cur_gp()) rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; + } else if (rcu_preempt_running_reader() < 0 && + t->rcu_read_unlock_special) { + /* + * Complete exit from RCU read-side critical section on + * behalf of preempted instance of __rcu_read_unlock(). + */ + rcu_read_unlock_special(t); } /* @@ -526,12 +543,15 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ -static void rcu_read_unlock_special(struct task_struct *t) +static noinline void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; unsigned long flags; struct list_head *np; +#ifdef CONFIG_RCU_BOOST + struct rt_mutex *rbmp = NULL; +#endif /* #ifdef CONFIG_RCU_BOOST */ int special; /* @@ -552,7 +572,7 @@ static void rcu_read_unlock_special(struct task_struct *t) rcu_preempt_cpu_qs(); /* Hardware IRQ handlers cannot block. */ - if (in_irq()) { + if (in_irq() || in_serving_softirq()) { local_irq_restore(flags); return; } @@ -597,10 +617,10 @@ static void rcu_read_unlock_special(struct task_struct *t) } #ifdef CONFIG_RCU_BOOST /* Unboost self if was boosted. */ - if (special & RCU_READ_UNLOCK_BOOSTED) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; - rt_mutex_unlock(t->rcu_boost_mutex); + if (t->rcu_boost_mutex != NULL) { + rbmp = t->rcu_boost_mutex; t->rcu_boost_mutex = NULL; + rt_mutex_unlock(rbmp); } #endif /* #ifdef CONFIG_RCU_BOOST */ local_irq_restore(flags); @@ -618,13 +638,22 @@ void __rcu_read_unlock(void) struct task_struct *t = current; barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ - --t->rcu_read_lock_nesting; - barrier(); /* decrement before load of ->rcu_read_unlock_special */ - if (t->rcu_read_lock_nesting == 0 && - unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) - rcu_read_unlock_special(t); + if (t->rcu_read_lock_nesting != 1) + --t->rcu_read_lock_nesting; + else { + t->rcu_read_lock_nesting = INT_MIN; + barrier(); /* assign before ->rcu_read_unlock_special load */ + if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); + barrier(); /* ->rcu_read_unlock_special load before assign */ + t->rcu_read_lock_nesting = 0; + } #ifdef CONFIG_PROVE_LOCKING - WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); + { + int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); + + WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); + } #endif /* #ifdef CONFIG_PROVE_LOCKING */ } EXPORT_SYMBOL_GPL(__rcu_read_unlock); @@ -649,7 +678,7 @@ static void rcu_preempt_check_callbacks(void) invoke_rcu_callbacks(); if (rcu_preempt_gp_in_progress() && rcu_cpu_blocking_cur_gp() && - rcu_preempt_running_reader()) + rcu_preempt_running_reader() > 0) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } @@ -706,6 +735,11 @@ EXPORT_SYMBOL_GPL(call_rcu); */ void synchronize_rcu(void) { + rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && + !lock_is_held(&rcu_lock_map) && + !lock_is_held(&rcu_sched_lock_map), + "Illegal synchronize_rcu() in RCU read-side critical section"); + #ifdef CONFIG_DEBUG_LOCK_ALLOC if (!rcu_scheduler_active) return; @@ -882,7 +916,8 @@ static void rcu_preempt_process_callbacks(void) static void invoke_rcu_callbacks(void) { have_rcu_kthread_work = 1; - wake_up(&rcu_kthread_wq); + if (rcu_kthread_task != NULL) + wake_up(&rcu_kthread_wq); } #ifdef CONFIG_RCU_TRACE @@ -943,12 +978,16 @@ early_initcall(rcu_spawn_kthreads); #else /* #ifdef CONFIG_RCU_BOOST */ +/* Hold off callback invocation until early_initcall() time. */ +static int rcu_scheduler_fully_active __read_mostly; + /* * Start up softirq processing of callbacks. */ void invoke_rcu_callbacks(void) { - raise_softirq(RCU_SOFTIRQ); + if (rcu_scheduler_fully_active) + raise_softirq(RCU_SOFTIRQ); } #ifdef CONFIG_RCU_TRACE @@ -963,10 +1002,14 @@ static bool rcu_is_callbacks_kthread(void) #endif /* #ifdef CONFIG_RCU_TRACE */ -void rcu_init(void) +static int __init rcu_scheduler_really_started(void) { + rcu_scheduler_fully_active = 1; open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */ + return 0; } +early_initcall(rcu_scheduler_really_started); #endif /* #else #ifdef CONFIG_RCU_BOOST */ |