diff options
author | Vandana Salve <vsalve@nvidia.com> | 2011-10-18 15:20:07 +0530 |
---|---|---|
committer | Ryan Wong <ryanw@nvidia.com> | 2011-10-20 13:39:14 -0700 |
commit | 81c9154dad92e72da9199ab20dd7e3874c3359a3 (patch) | |
tree | ef7f4b6d671871256fc27491ae3f168f6f4aee0a | |
parent | 9684358fc71e009141d8c09f8c95c03ea8814a06 (diff) |
sched: Fix cross-sched-class wakeup preemption
Instead of dealing with sched classes inside each check_preempt_curr()
implementation, pull out this logic into the generic wakeup preemption
path.
Manually applied the partial patch set from
http://www.spinics.net/lists/kvm-commits/msg00039.html and
cross-refernce'd the k39 kernel
Bug 886798
Change-Id: Ib5bb12eec6c276ded9231e9ed2238499d42e31da
Reviewed-on: http://git-master/r/59405
Reviewed-by: Ryan Wong <ryanw@nvidia.com>
Tested-by: Ryan Wong <ryanw@nvidia.com>
-rw-r--r-- | kernel/sched.c | 39 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 |
2 files changed, 27 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a2e90f022cdb..ab209ca2fb90 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -558,18 +558,7 @@ struct rq { static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); -static inline -void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) -{ - rq->curr->sched_class->check_preempt_curr(rq, p, flags); - - /* - * A queue event has occurred, and we're going to schedule. In - * this case, we can save a useless back to back clock update. - */ - if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) - rq->skip_clock_update = 1; -} +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); static inline int cpu_of(struct rq *rq) { @@ -1985,6 +1974,32 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, p->sched_class->prio_changed(rq, p, oldprio, running); } +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) +{ + const struct sched_class *class; + + if (p->sched_class == rq->curr->sched_class) { + rq->curr->sched_class->check_preempt_curr(rq, p, flags); + } else { + for_each_class(class) { + if (class == rq->curr->sched_class) + break; + if (class == p->sched_class) { + resched_task(rq->curr); + break; + } + } + } + + /* + * A queue event has occurred, and we're going to schedule. In + * this case, we can save a useless back to back clock update. + */ + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) + rq->skip_clock_update = 1; +} + + #ifdef CONFIG_SMP /* * Is this task likely cache-hot: diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 1bb879013173..b8ae4e51b46d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1658,12 +1658,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; - if (unlikely(rt_prio(p->prio))) - goto preempt; - - if (unlikely(p->sched_class != &fair_sched_class)) - return; - if (unlikely(se == pse)) return; |