summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Frid <afrid@nvidia.com>2012-05-18 12:18:38 -0700
committerRyan Wong <ryanw@nvidia.com>2012-06-11 22:02:45 -0700
commit77c0e45d065e2361cc3f2ad9a856e9e6c5c09b6c (patch)
treeedb8df872f7358ab448a355e301ba9b197767b0f
parent6ba21f5c089ab79e10a82f653ff66f965d744d17 (diff)
scheduler: Re-compute time-average nr_running on read
Re-compute time-average nr_running when it is read. This would prevent reading stalled average value if there were no run-queue changes for a long time. New average value is returned to the reader, but not stored to avoid concurrent writes. Light-weight sequential counter synchronization is used to assure data consistency for re-computing average. Change-Id: I08de26087414d6663633c723ddfcbf1099a01ce2 Signed-off-by: Alex Frid <afrid@nvidia.com> Reviewed-on: http://git-master/r/108072 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Ryan Wong <ryanw@nvidia.com> Tested-by: Ryan Wong <ryanw@nvidia.com>
-rw-r--r--kernel/sched.c44
1 files changed, 35 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d6d87b918399..6bf009b6aa52 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -475,6 +475,7 @@ struct rq {
/* time-based average load */
u64 nr_last_stamp;
unsigned int ave_nr_running;
+ seqcount_t ave_seqcnt;
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
@@ -1769,31 +1770,39 @@ static const struct sched_class rt_sched_class;
#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP)
#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP)
-static inline void do_avg_nr_running(struct rq *rq)
+static inline unsigned int do_avg_nr_running(struct rq *rq)
{
s64 nr, deltax;
+ unsigned int ave_nr_running = rq->ave_nr_running;
deltax = rq->clock_task - rq->nr_last_stamp;
- rq->nr_last_stamp = rq->clock_task;
nr = NR_AVE_SCALE(rq->nr_running);
if (deltax > NR_AVE_PERIOD)
- rq->ave_nr_running = nr;
+ ave_nr_running = nr;
else
- rq->ave_nr_running +=
- NR_AVE_DIV_PERIOD(deltax * (nr - rq->ave_nr_running));
+ ave_nr_running +=
+ NR_AVE_DIV_PERIOD(deltax * (nr - ave_nr_running));
+
+ return ave_nr_running;
}
static void inc_nr_running(struct rq *rq)
{
- do_avg_nr_running(rq);
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->ave_nr_running = do_avg_nr_running(rq);
+ rq->nr_last_stamp = rq->clock_task;
rq->nr_running++;
+ write_seqcount_end(&rq->ave_seqcnt);
}
static void dec_nr_running(struct rq *rq)
{
- do_avg_nr_running(rq);
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->ave_nr_running = do_avg_nr_running(rq);
+ rq->nr_last_stamp = rq->clock_task;
rq->nr_running--;
+ write_seqcount_end(&rq->ave_seqcnt);
}
static void set_load_weight(struct task_struct *p)
@@ -3288,9 +3297,26 @@ unsigned long nr_iowait(void)
unsigned long avg_nr_running(void)
{
unsigned long i, sum = 0;
+ unsigned int seqcnt, ave_nr_running;
- for_each_online_cpu(i)
- sum += cpu_rq(i)->ave_nr_running;
+ for_each_online_cpu(i) {
+ struct rq *q = cpu_rq(i);
+
+ /*
+ * Update average to avoid reading stalled value if there were
+ * no run-queue changes for a long time. On the other hand if
+ * the changes are happening right now, just read current value
+ * directly.
+ */
+ seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+ ave_nr_running = do_avg_nr_running(q);
+ if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+ read_seqcount_begin(&q->ave_seqcnt);
+ ave_nr_running = q->ave_nr_running;
+ }
+
+ sum += ave_nr_running;
+ }
return sum;
}