diff options
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 298 | ||||
-rw-r--r-- | kernel/sched/debug.c | 7 | ||||
-rw-r--r-- | kernel/sched/features.h | 4 | ||||
-rw-r--r-- | kernel/sched/rt.c | 1 |
4 files changed, 245 insertions, 65 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4603b9d8f30a..5fa31919118f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -263,7 +263,11 @@ late_initcall(sched_init_debug); * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ +#ifndef CONFIG_PREEMPT_RT_FULL const_debug unsigned int sysctl_sched_nr_migrate = 32; +#else +const_debug unsigned int sysctl_sched_nr_migrate = 8; +#endif /* * period over which we average the RT time consumption, measured @@ -480,6 +484,7 @@ static void init_rq_hrtick(struct rq *rq) hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; + rq->hrtick_timer.irqsafe = 1; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) @@ -1169,7 +1174,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && unlikely(p->state != match_state) + && unlikely(p->saved_state != match_state)) return 0; cpu_relax(); } @@ -1184,7 +1190,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); on_rq = p->on_rq; ncsw = 0; - if (!match_state || p->state == match_state) + if (!match_state || p->state == match_state + || p->saved_state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &flags); @@ -1320,6 +1327,12 @@ out: } } + /* + * Clear PF_THREAD_BOUND, otherwise we wreckage + * migrate_disable/enable. See optimization for + * PF_THREAD_BOUND tasks there. + */ + p->flags &= ~PF_THREAD_BOUND; return dest_cpu; } @@ -1399,10 +1412,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) { activate_task(rq, p, en_flags); p->on_rq = 1; - - /* if a worker is waking up, notify workqueue */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } /* @@ -1585,8 +1594,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) smp_wmb(); raw_spin_lock_irqsave(&p->pi_lock, flags); - if (!(p->state & state)) + if (!(p->state & state)) { + /* + * The task might be running due to a spinlock sleeper + * wakeup. Check the saved state and set it to running + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { + if (p->saved_state & state) { + p->saved_state = TASK_RUNNING; + success = 1; + } + } goto out; + } + + /* + * If this is a regular wakeup, then we can unconditionally + * clear the saved state of a "lock sleeper". + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) + p->saved_state = TASK_RUNNING; success = 1; /* we're going to change ->state */ cpu = task_cpu(p); @@ -1642,40 +1670,6 @@ out: } /** - * try_to_wake_up_local - try to wake up a local task with rq lock held - * @p: the thread to be awakened - * - * Put @p on the run-queue if it's not already there. The caller must - * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. - */ -static void try_to_wake_up_local(struct task_struct *p) -{ - struct rq *rq = task_rq(p); - - BUG_ON(rq != this_rq()); - BUG_ON(p == current); - lockdep_assert_held(&rq->lock); - - if (!raw_spin_trylock(&p->pi_lock)) { - raw_spin_unlock(&rq->lock); - raw_spin_lock(&p->pi_lock); - raw_spin_lock(&rq->lock); - } - - if (!(p->state & TASK_NORMAL)) - goto out; - - if (!p->on_rq) - ttwu_activate(rq, p, ENQUEUE_WAKEUP); - - ttwu_do_wakeup(rq, p, 0); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); -} - -/** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * @@ -1692,6 +1686,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" + * @p: The process to be woken up. + * + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate + * the nature of the wakeup. + */ +int wake_up_lock_sleeper(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); +} + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); @@ -1967,8 +1973,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); + /* + * We use mmdrop_delayed() here so we don't have to do the + * full __mmdrop() when we are the last user. + */ if (mm) - mmdrop(mm); + mmdrop_delayed(mm); if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this @@ -3134,6 +3144,126 @@ static inline void schedule_debug(struct task_struct *prev) schedstat_inc(this_rq(), sched_count); } +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) +#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ +#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) +#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) + +static inline void update_migrate_disable(struct task_struct *p) +{ + const struct cpumask *mask; + + if (likely(!p->migrate_disable)) + return; + + /* Did we already update affinity? */ + if (unlikely(migrate_disabled_updated(p))) + return; + + /* + * Since this is always current we can get away with only locking + * rq->lock, the ->cpus_allowed value can normally only be changed + * while holding both p->pi_lock and rq->lock, but seeing that this + * is current, we cannot actually be waking up, so all code that + * relies on serialization against p->pi_lock is out of scope. + * + * Having rq->lock serializes us against things like + * set_cpus_allowed_ptr() that can still happen concurrently. + */ + mask = tsk_cpus_allowed(p); + + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->rt.nr_cpus_allowed = cpumask_weight(mask); + + /* Let migrate_enable know to fix things back up */ + p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; +} + +void migrate_disable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || p->flags & PF_THREAD_BOUND) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; + preempt_enable(); + return; + } + + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + const struct cpumask *mask; + unsigned long flags; + struct rq *rq; + + if (in_atomic() || p->flags & PF_THREAD_BOUND) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + WARN_ON_ONCE(p->migrate_disable_atomic); +#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); + if (migrate_disable_count(p) > 1) { + p->migrate_disable--; + preempt_enable(); + return; + } + + if (unlikely(migrate_disabled_updated(p))) { + /* + * Undo whatever update_migrate_disable() did, also see there + * about locking. + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); + + /* + * Clearing migrate_disable causes tsk_cpus_allowed to + * show the tasks original cpu affinity. + */ + p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->rt.nr_cpus_allowed = cpumask_weight(mask); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } else + p->migrate_disable = 0; + + unpin_current_cpu(); + preempt_enable(); +} +EXPORT_SYMBOL(migrate_enable); +#else +static inline void update_migrate_disable(struct task_struct *p) { } +#define migrate_disabled_updated(p) 0 +#endif + static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->on_rq || rq->skip_clock_update < 0) @@ -3193,6 +3323,8 @@ need_resched: raw_spin_lock_irq(&rq->lock); + update_migrate_disable(prev); + switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { @@ -3200,19 +3332,6 @@ need_resched: } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; - - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. - */ - if (prev->flags & PF_WQ_WORKER) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev, cpu); - if (to_wakeup) - try_to_wake_up_local(to_wakeup); - } } switch_count = &prev->nvcsw; } @@ -3255,6 +3374,14 @@ static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; + + /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. + */ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_sleeping(tsk); + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. @@ -3263,12 +3390,19 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } +static inline void sched_update_worker(struct task_struct *tsk) +{ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_running(tsk); +} + asmlinkage void __sched schedule(void) { struct task_struct *tsk = current; sched_submit_work(tsk); __schedule(); + sched_update_worker(tsk); } EXPORT_SYMBOL(schedule); @@ -3348,7 +3482,16 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); + /* + * The add/subtract must not be traced by the function + * tracer. But we still want to account for the + * preempt off latency tracer. Since the _notrace versions + * of add/subtract skip the accounting for latency tracer + * we must force it manually. + */ + start_critical_timings(); __schedule(); + stop_critical_timings(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* @@ -4543,9 +4686,17 @@ static inline int should_resched(void) static void __cond_resched(void) { - add_preempt_count(PREEMPT_ACTIVE); - __schedule(); - sub_preempt_count(PREEMPT_ACTIVE); + do { + add_preempt_count(PREEMPT_ACTIVE); + __schedule(); + sub_preempt_count(PREEMPT_ACTIVE); + /* + * Check again in case we missed a preemption + * opportunity between schedule and now. + */ + barrier(); + + } while (need_resched()); } int __sched _cond_resched(void) @@ -4586,6 +4737,7 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); +#ifndef CONFIG_PREEMPT_RT_FULL int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); @@ -4599,6 +4751,7 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); +#endif /** * yield - yield the current processor to other threads. @@ -4953,11 +5106,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { - if (p->sched_class && p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, new_mask); - + if (!migrate_disabled_updated(p)) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->rt.nr_cpus_allowed = cpumask_weight(new_mask); + } cpumask_copy(&p->cpus_allowed, new_mask); - p->rt.nr_cpus_allowed = cpumask_weight(new_mask); } /* @@ -5008,7 +5162,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); @@ -5097,6 +5251,8 @@ static int migration_cpu_stop(void *data) #ifdef CONFIG_HOTPLUG_CPU +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); + /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. @@ -5109,7 +5265,12 @@ void idle_task_exit(void) if (mm != &init_mm) switch_mm(mm, &init_mm, current); - mmdrop(mm); + + /* + * Defer the cleanup to an alive cpu. On RT we can neither + * call mmdrop() nor mmdrop_delayed() from here. + */ + per_cpu(idle_last_mm, smp_processor_id()) = mm; } /* @@ -5430,6 +5591,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_nr_uninterruptible(rq); calc_global_load_remove(rq); break; + case CPU_DEAD: + if (per_cpu(idle_last_mm, cpu)) { + mmdrop(per_cpu(idle_last_mm, cpu)); + per_cpu(idle_last_mm, cpu) = NULL; + } + break; #endif } @@ -7058,7 +7225,8 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); + int nested = (preempt_count() & ~PREEMPT_ACTIVE) + + sched_rcu_preempt_depth(); return (nested == preempt_offset); } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 09acaa15161d..451512ff5f30 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -237,6 +237,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) P(rt_throttled); PN(rt_time); PN(rt_runtime); +#ifdef CONFIG_SMP + P(rt_nr_migratory); +#endif #undef PN #undef P @@ -485,6 +488,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.load.weight); P(policy); P(prio); +#ifdef CONFIG_PREEMPT_RT_FULL + P(migrate_disable); +#endif + P(rt.nr_cpus_allowed); #undef PN #undef __PN #undef P diff --git a/kernel/sched/features.h b/kernel/sched/features.h index e61fd73913d0..8e047dd4e4e1 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, true) */ SCHED_FEAT(NONTASK_POWER, true) +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#else +SCHED_FEAT(TTWU_QUEUE, false) +#endif SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 44af55e6d5d0..8bb9f00f8b49 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rt_b->rt_period_timer.irqsafe = 1; rt_b->rt_period_timer.function = sched_rt_period_timer; } |