From 2c906b317b2d9c7e32b0d513e102bd68a2c49112 Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Wed, 22 Mar 2006 09:54:10 +0000 Subject: [PATCH] cpufreq_conservative: aligning of codebase with ondemand Since the conservative govenor was released its codebase has drifted from the the direction and updates that have been applied to the ondemand govornor. This patch addresses the lack of updates in that period and brings conservative back up to date. The resulting diff file between cpufreq_ondemand.c and cpufreq_conservative.c is now much smaller and shows more clearly the differences between the two. Another reason to do this is ages ago, knowingly, I did a piss poor attempt at making conservative less responsive by knocking up DEF_SAMPLING_RATE_LATENCY_MULTIPLIER by two orders of magnitude. I did fix this ages ago but in my dis-organisation I must have toasted the diff and left it the way it was. About two weeks ago a user contacted me saying he was having problems with the conservative governor with his AMD Athlon XP-M 2800+ as /sys/devices/system/cpu/cpu0/cpufreq/conservative showed sampling_rate_min 9950000 sampling_rate_max 1360065408 Nine seconds to decide about changing the frequency....not too responsive :) Signed-off-by: Alexander Clouter Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_conservative.c | 53 +++++++++++++++++----------------- 1 file changed, 27 insertions(+), 26 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index ac38766b2583..adecd31f6156 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -35,12 +35,7 @@ */ #define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MIN_FREQUENCY_UP_THRESHOLD (0) -#define MAX_FREQUENCY_UP_THRESHOLD (100) - #define DEF_FREQUENCY_DOWN_THRESHOLD (20) -#define MIN_FREQUENCY_DOWN_THRESHOLD (0) -#define MAX_FREQUENCY_DOWN_THRESHOLD (100) /* * The polling frequency of this governor depends on the capability of @@ -53,10 +48,14 @@ * All times here are in uS. */ static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE (def_sampling_rate / 2) +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) +#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) #define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000) -#define DEF_SAMPLING_DOWN_FACTOR (5) +#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000) static void do_dbs_timer(void *data); @@ -136,7 +135,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf (buf, "%u", &input); - if (ret != 1 ) + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; mutex_lock(&dbs_mutex); @@ -173,8 +172,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -194,8 +192,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, ret = sscanf (buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || - input < MIN_FREQUENCY_DOWN_THRESHOLD || + if (ret != 1 || input > 100 || input < 0 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -337,7 +334,6 @@ static void dbs_check_cpu(int cpu) */ /* Check for frequency increase */ - idle_ticks = UINT_MAX; for_each_cpu_mask(j, policy->cpus) { unsigned int tmp_idle_ticks, total_idle_ticks; @@ -357,7 +353,7 @@ static void dbs_check_cpu(int cpu) /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { down_skip[cpu] = 0; @@ -398,6 +394,7 @@ static void dbs_check_cpu(int cpu) struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); + /* Check for frequency decrease */ total_idle_ticks = j_dbs_info->prev_cpu_idle_up; tmp_idle_ticks = total_idle_ticks - j_dbs_info->prev_cpu_idle_down; @@ -414,12 +411,14 @@ static void dbs_check_cpu(int cpu) freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); + usecs_to_jiffies(freq_down_sampling_rate); if (idle_ticks > down_idle_ticks) { - /* if we are already at the lowest speed then break out early + /* + * if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero */ + * freq_step to be zero + */ if (requested_freq[cpu] == policy->min || dbs_tuners_ins.freq_step == 0) return; @@ -434,9 +433,8 @@ static void dbs_check_cpu(int cpu) if (requested_freq[cpu] < policy->min) requested_freq[cpu] = policy->min; - __cpufreq_driver_target(policy, - requested_freq[cpu], - CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, requested_freq[cpu], + CPUFREQ_RELATION_H); return; } } @@ -507,13 +505,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - latency = policy->cpuinfo.transition_latency; - if (latency < 1000) - latency = 1000; - - def_sampling_rate = (latency / 1000) * + def_sampling_rate = latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = def_sampling_rate; dbs_tuners_ins.ignore_nice = 0; dbs_tuners_ins.freq_step = 5; -- cgit v1.2.3 From e8a02572252f9115c2b8296c40fd8b985f06f872 Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Wed, 22 Mar 2006 09:56:23 +0000 Subject: [PATCH] cpufreq_conservative: alter default responsiveness The sensible approach to making conservative less responsive than ondemand :) As mentioned in patch [1/4]. We do not want conservative to shoot through all the frequencies, its point (by default) is to slowly move through them. By default its ten times less responsive. Signed-off-by: Alexander Clouter Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_conservative.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index adecd31f6156..3ca3cf061642 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -509,7 +509,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (latency == 0) latency = 1; - def_sampling_rate = latency * + def_sampling_rate = 10 * latency * DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) -- cgit v1.2.3 From 08a28e2e98aa821cf6f15f8a267beb2f33377bb9 Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Wed, 22 Mar 2006 09:59:16 +0000 Subject: [PATCH] cpufreq_conservative: make for_each_cpu() safe All these changes should make cpufreq_conservative safe in regards to the x86 for_each_cpu cpumask.h changes and whatnot. Whilst making it safe a number of pointless for loops related to the cpu mask's were removed. I was never comfortable with all those for loops, especially as the iteration is over the same data again and again for each CPU you had in a single poll, an O(n^2) outcome to frequency scaling. The approach I use is to assume by default no CPU's exist and it sets the requested_freq to zero as a kind of flag, the reasoning is in the source ;) If the CPU is queried and requested_freq is zero then it initialises the variable to current_freq and then continues as if nothing happened which should be the same net effect as before? Signed-off-by: Alexander Clouter Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_conservative.c | 91 ++++++++++++++++------------------ 1 file changed, 42 insertions(+), 49 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 3ca3cf061642..7498f2506ade 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -294,31 +294,40 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; - static int down_skip[NR_CPUS]; - static int requested_freq[NR_CPUS]; - static unsigned short init_flag = 0; - struct cpu_dbs_info_s *this_dbs_info; - struct cpu_dbs_info_s *dbs_info; - + static unsigned short down_skip[NR_CPUS]; + static unsigned int requested_freq[NR_CPUS]; + static unsigned int init_flag = NR_CPUS; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); struct cpufreq_policy *policy; - unsigned int j; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); if (!this_dbs_info->enable) return; - policy = this_dbs_info->cur_policy; - - if ( init_flag == 0 ) { - for_each_online_cpu(j) { - dbs_info = &per_cpu(cpu_dbs_info, j); - requested_freq[j] = dbs_info->cur_policy->cur; + if ( init_flag != 0 ) { + for_each_cpu(init_flag) { + down_skip[init_flag] = 0; + /* I doubt a CPU exists with a freq of 0hz :) */ + requested_freq[init_flag] = 0; } - init_flag = 1; + init_flag = 0; } + /* + * If its a freshly initialised cpu we setup requested_freq. This + * check could be avoided if we did not care about a first time + * stunted increase in CPU speed when there is a load. I feel we + * should be initialising this to something. The removal of a CPU + * is not a problem, after a short time the CPU should settle down + * to a 'natural' frequency. + */ + if (requested_freq[cpu] == 0) + requested_freq[cpu] = this_dbs_info->cur_policy->cur; + + policy = this_dbs_info->cur_policy; + /* * The default safe range is 20% to 80% * Every sampling_rate, we check @@ -335,20 +344,15 @@ static void dbs_check_cpu(int cpu) /* Check for frequency increase */ idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(j); - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_up; - j_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; @@ -357,13 +361,9 @@ static void dbs_check_cpu(int cpu) if (idle_ticks < up_idle_ticks) { down_skip[cpu] = 0; - for_each_cpu_mask(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_down = - j_dbs_info->prev_cpu_idle_up; - } /* if we are already at full speed then break out early */ if (requested_freq[cpu] == policy->max) return; @@ -388,21 +388,14 @@ static void dbs_check_cpu(int cpu) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) return; - idle_ticks = UINT_MAX; - for_each_cpu_mask(j, policy->cpus) { - unsigned int tmp_idle_ticks, total_idle_ticks; - struct cpu_dbs_info_s *j_dbs_info; + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - /* Check for frequency decrease */ - total_idle_ticks = j_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - j_dbs_info->prev_cpu_idle_down; - j_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - } + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; @@ -491,7 +484,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } -- cgit v1.2.3 From a159b82770ab84e1b5e0306fa65e158188492b16 Mon Sep 17 00:00:00 2001 From: Alexander Clouter Date: Wed, 22 Mar 2006 10:00:18 +0000 Subject: [PATCH] cpufreq_conservative: alternative initialise approach Venki, author of cpufreq_ondemand, came up with a neater way to remove the initialiser code from the main loop of my code and out to the point when the governor is actually initialised. Not only does it look but it also feels cleaner, plus its simpler to understand. It also saves a bunch of pointless conditional statements in the main loop. Signed-off-by: Alexander Clouter Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_conservative.c | 55 +++++++++++----------------------- 1 file changed, 18 insertions(+), 37 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7498f2506ade..a152d2c46be7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -65,6 +65,8 @@ struct cpu_dbs_info_s { unsigned int prev_cpu_idle_up; unsigned int prev_cpu_idle_down; unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); @@ -297,35 +299,12 @@ static void dbs_check_cpu(int cpu) unsigned int tmp_idle_ticks, total_idle_ticks; unsigned int freq_step; unsigned int freq_down_sampling_rate; - static unsigned short down_skip[NR_CPUS]; - static unsigned int requested_freq[NR_CPUS]; - static unsigned int init_flag = NR_CPUS; struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); struct cpufreq_policy *policy; if (!this_dbs_info->enable) return; - if ( init_flag != 0 ) { - for_each_cpu(init_flag) { - down_skip[init_flag] = 0; - /* I doubt a CPU exists with a freq of 0hz :) */ - requested_freq[init_flag] = 0; - } - init_flag = 0; - } - - /* - * If its a freshly initialised cpu we setup requested_freq. This - * check could be avoided if we did not care about a first time - * stunted increase in CPU speed when there is a load. I feel we - * should be initialising this to something. The removal of a CPU - * is not a problem, after a short time the CPU should settle down - * to a 'natural' frequency. - */ - if (requested_freq[cpu] == 0) - requested_freq[cpu] = this_dbs_info->cur_policy->cur; - policy = this_dbs_info->cur_policy; /* @@ -360,12 +339,12 @@ static void dbs_check_cpu(int cpu) usecs_to_jiffies(dbs_tuners_ins.sampling_rate); if (idle_ticks < up_idle_ticks) { - down_skip[cpu] = 0; + this_dbs_info->down_skip = 0; this_dbs_info->prev_cpu_idle_down = this_dbs_info->prev_cpu_idle_up; /* if we are already at full speed then break out early */ - if (requested_freq[cpu] == policy->max) + if (this_dbs_info->requested_freq == policy->max) return; freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; @@ -374,18 +353,18 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] += freq_step; - if (requested_freq[cpu] > policy->max) - requested_freq[cpu] = policy->max; + this_dbs_info->requested_freq += freq_step; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; - __cpufreq_driver_target(policy, requested_freq[cpu], + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } /* Check for frequency decrease */ - down_skip[cpu]++; - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) + this_dbs_info->down_skip++; + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) return; /* Check for frequency decrease */ @@ -399,7 +378,7 @@ static void dbs_check_cpu(int cpu) /* Scale idle ticks by 100 and compare with up and down ticks */ idle_ticks *= 100; - down_skip[cpu] = 0; + this_dbs_info->down_skip = 0; freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * dbs_tuners_ins.sampling_down_factor; @@ -412,7 +391,7 @@ static void dbs_check_cpu(int cpu) * or if we 'cannot' reduce the speed as the user might want * freq_step to be zero */ - if (requested_freq[cpu] == policy->min + if (this_dbs_info->requested_freq == policy->min || dbs_tuners_ins.freq_step == 0) return; @@ -422,11 +401,11 @@ static void dbs_check_cpu(int cpu) if (unlikely(freq_step == 0)) freq_step = 5; - requested_freq[cpu] -= freq_step; - if (requested_freq[cpu] < policy->min) - requested_freq[cpu] = policy->min; + this_dbs_info->requested_freq -= freq_step; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; - __cpufreq_driver_target(policy, requested_freq[cpu], + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } @@ -489,6 +468,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, = j_dbs_info->prev_cpu_idle_up; } this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; sysfs_create_group(&policy->kobj, &dbs_attr_group); dbs_enable++; /* -- cgit v1.2.3 From ff8c288d7d1a368b663058cdee1ea0adcdef2fa2 Mon Sep 17 00:00:00 2001 From: Eric Piel Date: Fri, 10 Mar 2006 11:34:16 +0200 Subject: [PATCH] cpufreq_ondemand: Warn if it cannot run due to too long transition latency Display a warning if the ondemand governor can not be selected due to a transition latency of the cpufreq driver which is too long. Signed-off-by: Eric Piel Acked-by: Venkatesh Pallipadi Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_ondemand.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 69aa1db8336c..6430489db6f4 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -395,8 +395,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return -EINVAL; if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) + (TRANSITION_LATENCY_LIMIT * 1000)) { + printk(KERN_WARNING "ondemand governor failed to load " + "due to too long transition latency\n"); return -EINVAL; + } if (this_dbs_info->enable) /* Already enabled */ break; -- cgit v1.2.3 From 9cbad61b41f0b6f0a4c600fe96d8292ffd592b50 Mon Sep 17 00:00:00 2001 From: Eric Piel Date: Fri, 10 Mar 2006 11:35:27 +0200 Subject: [PATCH] cpufreq_ondemand: keep ignore_nice_load value when it is reselected Keep the value of ignore_nice_load of the ondemand governor even after the governor has been deselected and selected back. This is the behavior of the other exported values of the ondemand governor and it's much more user-friendly. Signed-off-by: Eric Piel Acked-by: Venkatesh Pallipadi Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_ondemand.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 6430489db6f4..cd846f57147e 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -84,6 +84,7 @@ struct dbs_tuners { static struct dbs_tuners dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, }; static inline unsigned int get_cpu_idle_time(unsigned int cpu) @@ -434,8 +435,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, def_sampling_rate = MIN_STAT_SAMPLING_RATE; dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_timer_init(); } -- cgit v1.2.3 From 7c9d8c0e84d395a01289ebd1597758939a875a86 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sun, 26 Mar 2006 11:11:03 +0200 Subject: [PATCH] cpufreq_ondemand: add range check Assert that cpufreq_target is, at least, called with the minimum frequency allowed by this policy, not something lower. It triggered problems on ARM. Signed-off-by: Dominik Brodowski --- drivers/cpufreq/cpufreq_ondemand.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index cd846f57147e..956d121cb161 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -351,6 +351,9 @@ static void dbs_check_cpu(int cpu) freq_next = (freq_next * policy->cur) / (dbs_tuners_ins.up_threshold - 10); + if (freq_next < policy->min) + freq_next = policy->min; + if (freq_next <= ((policy->cur * 95) / 100)) __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } -- cgit v1.2.3