diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-06-06 20:07:38 +0200 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2012-04-04 09:14:36 -0500 |
commit | 060f1b37c8df7b216822b0385e66bb8cd2a27cf0 (patch) | |
tree | ae2a49157c97ed93e95a6ffeab59ab6b1fd49138 /kernel | |
parent | 51485efac802b4db9c27074accabb37089480377 (diff) |
sched-prevent-idle-boost.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f101d970ebda..b8139468fda9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5037,6 +5037,24 @@ void rt_mutex_setprio(struct task_struct *p, int prio) rq = __task_rq_lock(p); + /* + * Idle task boosting is a nono in general. There is one + * exception, when PREEMPT_RT and NOHZ is active: + * + * The idle task calls get_next_timer_interrupt() and holds + * the timer wheel base->lock on the CPU and another CPU wants + * to access the timer (probably to cancel it). We can safely + * ignore the boosting request, as the idle CPU runs this code + * with interrupts disabled and will complete the lock + * protected section without being interrupted. So there is no + * real need to boost. + */ + if (unlikely(p == rq->idle)) { + WARN_ON(p != rq->curr); + WARN_ON(p->pi_blocked_on); + goto out_unlock; + } + trace_sched_pi_setprio(p, prio); oldprio = p->prio; prev_class = p->sched_class; @@ -5060,11 +5078,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); check_class_changed(rq, p, prev_class, oldprio); +out_unlock: __task_rq_unlock(rq); } - #endif - void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta, on_rq; |