diff options
author | Diwakar Tundlam <dtundlam@nvidia.com> | 2012-05-07 15:12:25 -0700 |
---|---|---|
committer | Simone Willett <swillett@nvidia.com> | 2012-07-01 09:15:12 -0700 |
commit | 0b5a8a6f30fe0eb7919294c58ddedaeab069ce2a (patch) | |
tree | 3846b0190c7345fa6067b66023661fa0f6705769 /kernel/sched.c | |
parent | 42b6ed043eb443b5a8e1de576aac124c3bcba4cb (diff) |
scheduler: compute time-average nr_running per run-queue
Compute the time-average number of running tasks per run-queue for a
trailing window of a fixed time period. The detla add/sub to the
average value is weighted by the amount of time per nr_running value
relative to the total measurement period.
Change-Id: I076e24ff4ed65bed3b8dd8d2b279a503318071ff
Signed-off-by: Diwakar Tundlam <dtundlam@nvidia.com>
(cherry picked from commit 3a12d7499cee352e8a46eaf700259ba3c733f0e3)
Reviewed-on: http://git-master/r/111635
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Sai Gurrappadi <sgurrappadi@nvidia.com>
Tested-by: Sai Gurrappadi <sgurrappadi@nvidia.com>
Reviewed-by: Peter Boonstoppel <pboonstoppel@nvidia.com>
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f6cf5cbc64ba..8b1b09686199 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -472,6 +472,10 @@ struct rq { #endif int skip_clock_update; + /* time-based average load */ + u64 nr_last_stamp; + unsigned int ave_nr_running; + /* capture load from *all* tasks on this cpu: */ struct load_weight load; unsigned long nr_load_updates; @@ -1756,13 +1760,40 @@ static const struct sched_class rt_sched_class; #include "sched_stats.h" +/* 27 ~= 134217728ns = 134.2ms + * 26 ~= 67108864ns = 67.1ms + * 25 ~= 33554432ns = 33.5ms + * 24 ~= 16777216ns = 16.8ms + */ +#define NR_AVE_PERIOD_EXP 27 +#define NR_AVE_SCALE(x) ((x) << FSHIFT) +#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP) +#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP) + +static inline void do_avg_nr_running(struct rq *rq) +{ + s64 nr, deltax; + + deltax = rq->clock_task - rq->nr_last_stamp; + rq->nr_last_stamp = rq->clock_task; + nr = NR_AVE_SCALE(rq->nr_running); + + if (deltax > NR_AVE_PERIOD) + rq->ave_nr_running = nr; + else + rq->ave_nr_running += + NR_AVE_DIV_PERIOD(deltax * (nr - rq->ave_nr_running)); +} + static void inc_nr_running(struct rq *rq) { + do_avg_nr_running(rq); rq->nr_running++; } static void dec_nr_running(struct rq *rq) { + do_avg_nr_running(rq); rq->nr_running--; } @@ -3255,6 +3286,16 @@ unsigned long nr_iowait(void) return sum; } +unsigned long avg_nr_running(void) +{ + unsigned long i, sum = 0; + + for_each_online_cpu(i) + sum += cpu_rq(i)->ave_nr_running; + + return sum; +} + unsigned long nr_iowait_cpu(int cpu) { struct rq *this = cpu_rq(cpu); |