summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-09-02 14:29:27 +0200
committerClark Williams <williams@redhat.com>2011-11-14 11:01:56 -0600
commit1e1c91cabd8ac71062ce5e93b4e7ec5dbe094fc2 (patch)
treed966024068b05b6de96ef712c2aeeb816b82d889
parentbc657abbbb4738478fc5f3522970e31cc809f67a (diff)
sched: teach migrate_disable about atomic contexts
<NMI> [<ffffffff812dafd8>] spin_bug+0x94/0xa8 [<ffffffff812db07f>] do_raw_spin_lock+0x43/0xea [<ffffffff814fa9be>] _raw_spin_lock_irqsave+0x6b/0x85 [<ffffffff8106ff9e>] ? migrate_disable+0x75/0x12d [<ffffffff81078aaf>] ? pin_current_cpu+0x36/0xb0 [<ffffffff8106ff9e>] migrate_disable+0x75/0x12d [<ffffffff81115b9d>] pagefault_disable+0xe/0x1f [<ffffffff81047027>] copy_from_user_nmi+0x74/0xe6 [<ffffffff810489d7>] perf_callchain_user+0xf3/0x135 Now clearly we can't go around taking locks from NMI context, cure this by short-circuiting migrate_disable() when we're in an atomic context already. Add some extra debugging to avoid things like: preempt_disable() migrate_disable(); preempt_enable(); migrate_enable(); Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched.c21
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1c322efeaba0..562a3451a9a4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1261,6 +1261,9 @@ struct task_struct {
unsigned int policy;
#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
+#ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+#endif
#endif
cpumask_t cpus_allowed;
diff --git a/kernel/sched.c b/kernel/sched.c
index 56ec6c221340..7693fda061f2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6292,6 +6292,17 @@ void migrate_disable(void)
unsigned long flags;
struct rq *rq;
+ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
preempt_disable();
if (p->migrate_disable) {
p->migrate_disable++;
@@ -6340,6 +6351,16 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
+ if (in_atomic()) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
WARN_ON_ONCE(p->migrate_disable <= 0);
preempt_disable();