diff options
author | Peter Boonstoppel <pboonstoppel@nvidia.com> | 2012-11-12 10:48:56 -0800 |
---|---|---|
committer | Mrutyunjay Sawant <msawant@nvidia.com> | 2012-11-30 07:07:43 -0800 |
commit | 041c190f6010550197e8ddfe5ddc0b9c3e14e71b (patch) | |
tree | 6df7411f0716d9c696e87fbc0b4c8b2d7367af85 /kernel/sched | |
parent | 06078c5601e70fa8aad41fee704760ebf5cb63ac (diff) |
Revert "Revert "cpuquiet: Update averaging of nr_runnables""
This reverts commit 3bca5808dcc371ea4e1d6ce555e3eae76fbe0e7e.
Bug 1050445
Bug 1050721
Change-Id: I6d51de129a10236e43c9ce262f879aa0f8361c77
Signed-off-by: Peter Boonstoppel <pboonstoppel@nvidia.com>
Reviewed-on: http://git-master/r/163096
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Ilan Aelion <iaelion@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Diwakar Tundlam <dtundlam@nvidia.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 47 | ||||
-rw-r--r-- | kernel/sched/debug.c | 3 | ||||
-rw-r--r-- | kernel/sched/sched.h | 22 |
3 files changed, 26 insertions, 46 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b8f4618407b2..974c9b2c0754 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2161,35 +2161,10 @@ unsigned long this_cpu_load(void) return this->cpu_load[0]; } -unsigned long avg_nr_running(void) -{ - unsigned long i, sum = 0; - unsigned int seqcnt, ave_nr_running; - - for_each_online_cpu(i) { - struct rq *q = cpu_rq(i); - - /* - * Update average to avoid reading stalled value if there were - * no run-queue changes for a long time. On the other hand if - * the changes are happening right now, just read current value - * directly. - */ - seqcnt = read_seqcount_begin(&q->ave_seqcnt); - ave_nr_running = do_avg_nr_running(q); - if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) { - read_seqcount_begin(&q->ave_seqcnt); - ave_nr_running = q->ave_nr_running; - } - - sum += ave_nr_running; - } - - return sum; -} - -unsigned long get_avg_nr_running(unsigned int cpu) +u64 nr_running_integral(unsigned int cpu) { + unsigned int seqcnt; + u64 integral; struct rq *q; if (cpu >= nr_cpu_ids) @@ -2197,7 +2172,21 @@ unsigned long get_avg_nr_running(unsigned int cpu) q = cpu_rq(cpu); - return q->ave_nr_running; + /* + * Update average to avoid reading stalled value if there were + * no run-queue changes for a long time. On the other hand if + * the changes are happening right now, just read current value + * directly. + */ + + seqcnt = read_seqcount_begin(&q->ave_seqcnt); + integral = do_nr_running_integral(q); + if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) { + read_seqcount_begin(&q->ave_seqcnt); + integral = q->nr_running_integral; + } + + return integral; } /* diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 06d172eb5cea..09acaa15161d 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -266,9 +266,6 @@ static void print_cpu(struct seq_file *m, int cpu) SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); - SEQ_printf(m, " .%-30s: %d.%03d \n", "ave_nr_running", - rq->ave_nr_running / FIXED_1, - ((rq->ave_nr_running % FIXED_1) * 1000) / FIXED_1); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->load.weight); P(nr_switches); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ef5a1ff65196..99589411f980 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -365,7 +365,7 @@ struct rq { /* time-based average load */ u64 nr_last_stamp; - unsigned int ave_nr_running; + u64 nr_running_integral; seqcount_t ave_seqcnt; /* capture load from *all* tasks on this cpu: */ @@ -924,32 +924,26 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} * 25 ~= 33554432ns = 33.5ms * 24 ~= 16777216ns = 16.8ms */ -#define NR_AVE_PERIOD_EXP 27 #define NR_AVE_SCALE(x) ((x) << FSHIFT) -#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP) -#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP) -static inline unsigned int do_avg_nr_running(struct rq *rq) + +static inline u64 do_nr_running_integral(struct rq *rq) { s64 nr, deltax; - unsigned int ave_nr_running = rq->ave_nr_running; + u64 nr_running_integral = rq->nr_running_integral; deltax = rq->clock_task - rq->nr_last_stamp; nr = NR_AVE_SCALE(rq->nr_running); - if (deltax > NR_AVE_PERIOD) - ave_nr_running = nr; - else - ave_nr_running += - NR_AVE_DIV_PERIOD(deltax * (nr - ave_nr_running)); + nr_running_integral += nr * deltax; - return ave_nr_running; + return nr_running_integral; } static inline void inc_nr_running(struct rq *rq) { write_seqcount_begin(&rq->ave_seqcnt); - rq->ave_nr_running = do_avg_nr_running(rq); + rq->nr_running_integral = do_nr_running_integral(rq); rq->nr_last_stamp = rq->clock_task; rq->nr_running++; write_seqcount_end(&rq->ave_seqcnt); @@ -958,7 +952,7 @@ static inline void inc_nr_running(struct rq *rq) static inline void dec_nr_running(struct rq *rq) { write_seqcount_begin(&rq->ave_seqcnt); - rq->ave_nr_running = do_avg_nr_running(rq); + rq->nr_running_integral = do_nr_running_integral(rq); rq->nr_last_stamp = rq->clock_task; rq->nr_running--; write_seqcount_end(&rq->ave_seqcnt); |