summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2012-10-26 00:47:42 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2012-11-15 00:33:07 +0100
commit4471a34f9a1f2da220272e823bdb8e8fa83a7661 (patch)
treed63e8c16a4b40da97b558d4b955f8e64157b8900 /drivers/cpufreq/cpufreq_ondemand.c
parent0676f7f2e7d2adec11f40320ca43a8897b8ef906 (diff)
cpufreq: governors: remove redundant code
Initially ondemand governor was written and then using its code conservative governor is written. It used a lot of code from ondemand governor, but copy of code was created instead of using the same routines from both governors. Which increased code redundancy, which is difficult to manage. This patch is an attempt to move common part of both the governors to cpufreq_governor.c file to come over above mentioned issues. This shouldn't change anything from functionality point of view. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c698
1 files changed, 225 insertions, 473 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d7f774bb49dd..bdaab9206303 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -10,24 +10,23 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/hrtimer.h>
+#include <linux/percpu-defs.h>
+#include <linux/sysfs.h>
#include <linux/tick.h>
-#include <linux/ktime.h>
-#include <linux/sched.h>
+#include <linux/types.h>
-/*
- * dbs is used in this file as a shortform for demandbased switching
- * It helps to keep variable names smaller, simpler
- */
+#include "cpufreq_governor.h"
+/* On-demand governor macors */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -38,80 +37,10 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-/*
- * The polling frequency of this governor depends on the capability of
- * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
- * rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
- * All times here are in uS.
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-
-static unsigned int min_sampling_rate;
-
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
-static void do_dbs_timer(struct work_struct *work);
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event);
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
-#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-struct cpu_dbs_info_s {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_iowait;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_nice;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_lo;
- unsigned int freq_lo_jiffies;
- unsigned int freq_hi_jiffies;
- unsigned int rate_mult;
- int cpu;
- unsigned int sample_type:1;
- /*
- * percpu mutex that serializes governor limit change with
- * do_dbs_timer invocation. We do not want do_dbs_timer to run
- * when user is changing the governor or limits.
- */
- struct mutex timer_mutex;
-};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
-
-static unsigned int dbs_enable; /* number of CPUs using this policy */
+static struct dbs_data od_dbs_data;
+static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
-/*
- * dbs_mutex protects dbs_enable in governor start/stop.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int up_threshold;
- unsigned int down_differential;
- unsigned int ignore_nice;
- unsigned int sampling_down_factor;
- unsigned int powersave_bias;
- unsigned int io_is_busy;
-} dbs_tuners_ins = {
+static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -119,14 +48,35 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+static void ondemand_powersave_bias_init_cpu(int cpu)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- if (iowait_time == -1ULL)
- return 0;
+ dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ dbs_info->freq_lo = 0;
+}
- return iowait_time;
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
}
/*
@@ -135,14 +85,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
*/
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
- unsigned int freq_next,
- unsigned int relation)
+ unsigned int freq_next, unsigned int relation)
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);
if (!dbs_info->freq_table) {
@@ -154,7 +103,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
relation, &index);
freq_req = dbs_info->freq_table[index].frequency;
- freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+ freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
@@ -173,7 +122,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
dbs_info->freq_lo_jiffies = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
jiffies_hi += ((freq_hi - freq_lo) / 2);
jiffies_hi /= (freq_hi - freq_lo);
@@ -184,13 +133,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
return freq_hi;
}
-static void ondemand_powersave_bias_init_cpu(int cpu)
-{
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
- dbs_info->freq_lo = 0;
-}
-
static void ondemand_powersave_bias_init(void)
{
int i;
@@ -199,53 +141,138 @@ static void ondemand_powersave_bias_init(void)
}
}
-/************************** sysfs interface ************************/
+static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+{
+ if (od_tuners.powersave_bias)
+ freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ else if (p->cur == p->max)
+ return;
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency Every sampling_rate, we look for
+ * a the lowest frequency which can sustain the load while keeping idle time
+ * over 30%. If such a frequency exist, we try to decrease to this frequency.
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of current frequency
+ */
+static void od_check_cpu(int cpu, unsigned int load_freq)
{
- return sprintf(buf, "%u\n", min_sampling_rate);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+
+ dbs_info->freq_lo = 0;
+
+ /* Check for frequency increase */
+ if (load_freq > od_tuners.up_threshold * policy->cur) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ dbs_info->rate_mult =
+ od_tuners.sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ /* if we cannot reduce the frequency anymore, break out early */
+ if (policy->cur == policy->min)
+ return;
+
+ /*
+ * The optimal frequency is the frequency that is the lowest that can
+ * support the current CPU usage without triggering the up policy. To be
+ * safe, we focus 10 points under the threshold.
+ */
+ if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
+ policy->cur) {
+ unsigned int freq_next;
+ freq_next = load_freq / (od_tuners.up_threshold -
+ od_tuners.down_differential);
+
+ /* No longer fully busy, reset rate_mult */
+ dbs_info->rate_mult = 1;
+
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
+ if (!od_tuners.powersave_bias) {
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ } else {
+ int freq = powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq,
+ CPUFREQ_RELATION_L);
+ }
+ }
}
-define_one_global_ro(sampling_rate_min);
+static void od_dbs_timer(struct work_struct *work)
+{
+ struct od_cpu_dbs_info_s *dbs_info =
+ container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cpu;
+ int delay, sample_type = dbs_info->sample_type;
-/* cpufreq_ondemand Governor Tunables */
-#define show_one(file_name, object) \
-static ssize_t show_##file_name \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+
+ /* Common NORMAL_SAMPLE setup */
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ if (sample_type == OD_SUB_SAMPLE) {
+ delay = dbs_info->freq_lo_jiffies;
+ __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
+ dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ } else {
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = dbs_info->freq_hi_jiffies;
+ } else {
+ delay = delay_for_sampling_rate(dbs_info->rate_mult);
+ }
+ }
+
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+}
+
+/************************** sysfs interface ************************/
+
+static ssize_t show_sampling_rate_min(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
}
-show_one(sampling_rate, sampling_rate);
-show_one(io_is_busy, io_is_busy);
-show_one(up_threshold, up_threshold);
-show_one(sampling_down_factor, sampling_down_factor);
-show_one(ignore_nice_load, ignore_nice);
-show_one(powersave_bias, powersave_bias);
/**
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
* If new rate is smaller than the old, simply updaing
- * dbs_tuners_int.sampling_rate might not be appropriate. For example,
- * if the original sampling_rate was 1 second and the requested new sampling
- * rate is 10 ms because the user needs immediate reaction from ondemand
- * governor, but not sure if higher frequency will be required or not,
- * then, the governor may change the sampling rate too late; up to 1 second
- * later. Thus, if we are reducing the sampling rate, we need to make the
- * new value effective immediately.
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
*/
static void update_sampling_rate(unsigned int new_rate)
{
int cpu;
- dbs_tuners_ins.sampling_rate = new_rate
- = max(new_rate, min_sampling_rate);
+ od_tuners.sampling_rate = new_rate = max(new_rate,
+ od_dbs_data.min_sampling_rate);
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy;
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
unsigned long next_sampling, appointed_at;
policy = cpufreq_cpu_get(cpu);
@@ -254,28 +281,28 @@ static void update_sampling_rate(unsigned int new_rate)
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
cpufreq_cpu_put(policy);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- if (!delayed_work_pending(&dbs_info->work)) {
- mutex_unlock(&dbs_info->timer_mutex);
+ if (!delayed_work_pending(&dbs_info->cdbs.work)) {
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
continue;
}
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->work.timer.expires;
-
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->cdbs.work.timer.expires;
if (time_before(next_sampling, appointed_at)) {
- mutex_unlock(&dbs_info->timer_mutex);
- cancel_delayed_work_sync(&dbs_info->work);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->cdbs.work);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
- usecs_to_jiffies(new_rate));
+ schedule_delayed_work_on(dbs_info->cdbs.cpu,
+ &dbs_info->cdbs.work,
+ usecs_to_jiffies(new_rate));
}
- mutex_unlock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
}
@@ -300,7 +327,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.io_is_busy = !!input;
+ od_tuners.io_is_busy = !!input;
return count;
}
@@ -315,7 +342,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
- dbs_tuners_ins.up_threshold = input;
+ od_tuners.up_threshold = input;
return count;
}
@@ -328,12 +355,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- dbs_tuners_ins.sampling_down_factor = input;
+ od_tuners.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
dbs_info->rate_mult = 1;
}
return count;
@@ -354,19 +381,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ if (input == od_tuners.ignore_nice) { /* nothing to do */
return count;
}
- dbs_tuners_ins.ignore_nice = input;
+ od_tuners.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(od_cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall);
+ if (od_tuners.ignore_nice)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -385,17 +413,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- dbs_tuners_ins.powersave_bias = input;
+ od_tuners.powersave_bias = input;
ondemand_powersave_bias_init();
return count;
}
+show_one(od, sampling_rate, sampling_rate);
+show_one(od, io_is_busy, io_is_busy);
+show_one(od, up_threshold, up_threshold);
+show_one(od, sampling_down_factor, sampling_down_factor);
+show_one(od, ignore_nice_load, ignore_nice);
+show_one(od, powersave_bias, powersave_bias);
+
define_one_global_rw(sampling_rate);
define_one_global_rw(io_is_busy);
define_one_global_rw(up_threshold);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
+define_one_global_ro(sampling_rate_min);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -408,354 +444,71 @@ static struct attribute *dbs_attributes[] = {
NULL
};
-static struct attribute_group dbs_attr_group = {
+static struct attribute_group od_attr_group = {
.attrs = dbs_attributes,
.name = "ondemand",
};
/************************** sysfs end ************************/
-static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
-{
- if (dbs_tuners_ins.powersave_bias)
- freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
- else if (p->cur == p->max)
- return;
-
- __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
- CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
-}
-
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
-{
- unsigned int max_load_freq;
-
- struct cpufreq_policy *policy;
- unsigned int j;
-
- this_dbs_info->freq_lo = 0;
- policy = this_dbs_info->cur_policy;
-
- /*
- * Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
- * Every sampling_rate, we look for a the lowest
- * frequency which can sustain the load while keeping idle time over
- * 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of current frequency
- */
-
- /* Get Absolute Load - in terms of freq */
- max_load_freq = 0;
-
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
- unsigned int idle_time, wall_time, iowait_time;
- unsigned int load, load_freq;
- int freq_avg;
-
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
-
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
-
- wall_time = (unsigned int)
- (cur_wall_time - j_dbs_info->prev_cpu_wall);
- j_dbs_info->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = cur_idle_time;
-
- iowait_time = (unsigned int)
- (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
- j_dbs_info->prev_cpu_iowait = cur_iowait_time;
-
- if (dbs_tuners_ins.ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- j_dbs_info->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
-
- j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
- }
-
- /*
- * For the purpose of ondemand, waiting for disk IO is an
- * indication that you're performance critical, and not that
- * the system is actually idle. So subtract the iowait time
- * from the cpu idle time.
- */
-
- if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
- idle_time -= iowait_time;
+define_get_cpu_dbs_routines(od_cpu_dbs_info);
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- load = 100 * (wall_time - idle_time) / wall_time;
-
- freq_avg = __cpufreq_driver_getavg(policy, j);
- if (freq_avg <= 0)
- freq_avg = policy->cur;
-
- load_freq = load * freq_avg;
- if (load_freq > max_load_freq)
- max_load_freq = load_freq;
- }
-
- /* Check for frequency increase */
- if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
- /* If switching to max speed, apply sampling_down_factor */
- if (policy->cur < policy->max)
- this_dbs_info->rate_mult =
- dbs_tuners_ins.sampling_down_factor;
- dbs_freq_increase(policy, policy->max);
- return;
- }
-
- /* Check for frequency decrease */
- /* if we cannot reduce the frequency anymore, break out early */
- if (policy->cur == policy->min)
- return;
-
- /*
- * The optimal frequency is the frequency that is the lowest that
- * can support the current CPU usage without triggering the up
- * policy. To be safe, we focus 10 points under the threshold.
- */
- if (max_load_freq <
- (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
- policy->cur) {
- unsigned int freq_next;
- freq_next = max_load_freq /
- (dbs_tuners_ins.up_threshold -
- dbs_tuners_ins.down_differential);
-
- /* No longer fully busy, reset rate_mult */
- this_dbs_info->rate_mult = 1;
-
- if (freq_next < policy->min)
- freq_next = policy->min;
-
- if (!dbs_tuners_ins.powersave_bias) {
- __cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- } else {
- int freq = powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
- }
- }
-}
-
-static void do_dbs_timer(struct work_struct *work)
-{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
- int sample_type = dbs_info->sample_type;
-
- int delay;
-
- mutex_lock(&dbs_info->timer_mutex);
-
- /* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- if (!dbs_tuners_ins.powersave_bias ||
- sample_type == DBS_NORMAL_SAMPLE) {
- dbs_check_cpu(dbs_info);
- if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = DBS_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
- } else {
- /* We want all CPUs to do sampling nearly on
- * same jiffy
- */
- delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
- }
- } else {
- __cpufreq_driver_target(dbs_info->cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
- delay = dbs_info->freq_lo_jiffies;
- }
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
- mutex_unlock(&dbs_info->timer_mutex);
-}
-
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
-{
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+static struct od_ops od_ops = {
+ .io_busy = should_io_be_busy,
+ .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
+ .powersave_bias_target = powersave_bias_target,
+ .freq_increase = dbs_freq_increase,
+};
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
-}
+static struct dbs_data od_dbs_data = {
+ .governor = GOV_ONDEMAND,
+ .attr_group = &od_attr_group,
+ .tuners = &od_tuners,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = od_dbs_timer,
+ .gov_check_cpu = od_check_cpu,
+ .gov_ops = &od_ops,
+};
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
{
- cancel_delayed_work_sync(&dbs_info->work);
+ return cpufreq_governor_dbs(&od_dbs_data, policy, event);
}
-/*
- * Not all CPUs want IO time to be accounted as busy; this dependson how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
#endif
- return 0;
-}
-
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
-{
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info_s *this_dbs_info;
- unsigned int j;
- int rc;
-
- this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
- return -EINVAL;
-
- mutex_lock(&dbs_mutex);
-
- dbs_enable++;
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- j_dbs_info->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- this_dbs_info->cpu = cpu;
- this_dbs_info->rate_mult = 1;
- ondemand_powersave_bias_init_cpu(cpu);
- /*
- * Start the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 1) {
- unsigned int latency;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &dbs_attr_group);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
- /* Bring kernel and HW constraints together */
- min_sampling_rate = max(min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_tuners_ins.sampling_rate =
- max(min_sampling_rate,
- latency * LATENCY_MULTIPLIER);
- dbs_tuners_ins.io_is_busy = should_io_be_busy();
- }
- mutex_unlock(&dbs_mutex);
-
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_timer_init(this_dbs_info);
- break;
-
- case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
-
- mutex_lock(&dbs_mutex);
- mutex_destroy(&this_dbs_info->timer_mutex);
- dbs_enable--;
- mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
- sysfs_remove_group(cpufreq_global_kobject,
- &dbs_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&this_dbs_info->timer_mutex);
- if (policy->max < this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(this_dbs_info);
- mutex_unlock(&this_dbs_info->timer_mutex);
- break;
- }
- return 0;
-}
+struct cpufreq_governor cpufreq_gov_ondemand = {
+ .name = "ondemand",
+ .governor = od_cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
static int __init cpufreq_gov_dbs_init(void)
{
u64 idle_time;
int cpu = get_cpu();
+ mutex_init(&od_dbs_data.mutex);
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
- dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- dbs_tuners_ins.down_differential =
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
- min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
/* For correct statistics, we need 10 ticks for each measure */
- min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
+ od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
}
return cpufreq_register_governor(&cpufreq_gov_ondemand);
@@ -766,7 +519,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
}
-
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "