diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1d0789134e78..51c0ad657663 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4397,7 +4397,7 @@ void migrate_disable(void) { struct task_struct *p = current; - if (in_atomic()) { + if (in_atomic() || p->flags & PF_THREAD_BOUND) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif @@ -4428,7 +4428,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; - if (in_atomic()) { + if (in_atomic() || p->flags & PF_THREAD_BOUND) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif @@ -4449,26 +4449,21 @@ void migrate_enable(void) if (unlikely(migrate_disabled_updated(p))) { /* - * See comment in update_migrate_disable() about locking. + * Undo whatever update_migrate_disable() did, also see there + * about locking. */ rq = this_rq(); raw_spin_lock_irqsave(&rq->lock, flags); - mask = tsk_cpus_allowed(p); + /* * Clearing migrate_disable causes tsk_cpus_allowed to * show the tasks original cpu affinity. */ p->migrate_disable = 0; - - WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); - - if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { - /* Get the mask now that migration is enabled */ - mask = tsk_cpus_allowed(p); - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, mask); - p->rt.nr_cpus_allowed = cpumask_weight(mask); - } + mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->rt.nr_cpus_allowed = cpumask_weight(mask); raw_spin_unlock_irqrestore(&rq->lock, flags); } else p->migrate_disable = 0; |