diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f6cf5cbc64ba..bb40a1bca717 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -472,6 +472,11 @@ struct rq { #endif int skip_clock_update; + /* time-based average load */ + u64 nr_last_stamp; + unsigned int ave_nr_running; + seqcount_t ave_seqcnt; + /* capture load from *all* tasks on this cpu: */ struct load_weight load; unsigned long nr_load_updates; @@ -1756,14 +1761,49 @@ static const struct sched_class rt_sched_class; #include "sched_stats.h" +/* 27 ~= 134217728ns = 134.2ms + * 26 ~= 67108864ns = 67.1ms + * 25 ~= 33554432ns = 33.5ms + * 24 ~= 16777216ns = 16.8ms + */ +#define NR_AVE_PERIOD_EXP 27 +#define NR_AVE_SCALE(x) ((x) << FSHIFT) +#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP) +#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP) + +static inline unsigned int do_avg_nr_running(struct rq *rq) +{ + s64 nr, deltax; + unsigned int ave_nr_running = rq->ave_nr_running; + + deltax = rq->clock_task - rq->nr_last_stamp; + nr = NR_AVE_SCALE(rq->nr_running); + + if (deltax > NR_AVE_PERIOD) + ave_nr_running = nr; + else + ave_nr_running += + NR_AVE_DIV_PERIOD(deltax * (nr - ave_nr_running)); + + return ave_nr_running; +} + static void inc_nr_running(struct rq *rq) { + write_seqcount_begin(&rq->ave_seqcnt); + rq->ave_nr_running = do_avg_nr_running(rq); + rq->nr_last_stamp = rq->clock_task; rq->nr_running++; + write_seqcount_end(&rq->ave_seqcnt); } static void dec_nr_running(struct rq *rq) { + write_seqcount_begin(&rq->ave_seqcnt); + rq->ave_nr_running = do_avg_nr_running(rq); + rq->nr_last_stamp = rq->clock_task; rq->nr_running--; + write_seqcount_end(&rq->ave_seqcnt); } static void set_load_weight(struct task_struct *p) @@ -3255,6 +3295,33 @@ unsigned long nr_iowait(void) return sum; } +unsigned long avg_nr_running(void) +{ + unsigned long i, sum = 0; + unsigned int seqcnt, ave_nr_running; + + for_each_online_cpu(i) { + struct rq *q = cpu_rq(i); + + /* + * Update average to avoid reading stalled value if there were + * no run-queue changes for a long time. On the other hand if + * the changes are happening right now, just read current value + * directly. + */ + seqcnt = read_seqcount_begin(&q->ave_seqcnt); + ave_nr_running = do_avg_nr_running(q); + if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) { + read_seqcount_begin(&q->ave_seqcnt); + ave_nr_running = q->ave_nr_running; + } + + sum += ave_nr_running; + } + + return sum; +} + unsigned long nr_iowait_cpu(int cpu) { struct rq *this = cpu_rq(cpu); |