summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-09-27 08:40:25 -0400
committerClark Williams <williams@redhat.com>2012-04-13 11:01:38 -0500
commit23cb1bfa1357dd7f808d7f84a4f9d833bc468369 (patch)
tree8d4875bb74fb4cb0a323cb8b4f77b6b42ec707a5
parentff62eb8c393ad4184e7f7cda3bc4fe54f076f80c (diff)
sched: Have migrate_disable ignore bounded threads
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Clark Williams <williams@redhat.com> Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/sched/core.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 380db9e7f0eb..b740265465f8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3176,7 +3176,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -3207,7 +3207,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic()) {
+ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -3228,26 +3228,21 @@ void migrate_enable(void)
if (unlikely(migrate_disabled_updated(p))) {
/*
- * See comment in update_migrate_disable() about locking.
+ * Undo whatever update_migrate_disable() did, also see there
+ * about locking.
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);
- mask = tsk_cpus_allowed(p);
+
/*
* Clearing migrate_disable causes tsk_cpus_allowed to
* show the tasks original cpu affinity.
*/
p->migrate_disable = 0;
-
- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-
- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
- /* Get the mask now that migration is enabled */
- mask = tsk_cpus_allowed(p);
- if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, mask);
- p->rt.nr_cpus_allowed = cpumask_weight(mask);
- }
+ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
raw_spin_unlock_irqrestore(&rq->lock, flags);
} else
p->migrate_disable = 0;