diff options
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/longrun.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 42 |
4 files changed, 46 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 733093d60436..141abebc4516 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -393,7 +393,7 @@ static struct cpufreq_driver nforce2_driver = { * Detects nForce2 A2 and C1 stepping * */ -static unsigned int nforce2_detect_chipset(void) +static int nforce2_detect_chipset(void) { nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index fc09f142d94d..d9f51367666b 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -35,7 +35,7 @@ static unsigned int longrun_low_freq, longrun_high_freq; * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS * and MSR_TMTA_LONGRUN_CTRL */ -static void __init longrun_get_policy(struct cpufreq_policy *policy) +static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) { u32 msr_lo, msr_hi; @@ -165,7 +165,7 @@ static unsigned int longrun_get(unsigned int cpu) * TMTA rules: * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) */ -static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq, +static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, unsigned int *high_freq) { u32 msr_lo, msr_hi; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 199dcb9f0b83..c63a43823744 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -918,8 +918,8 @@ static int cpufreq_add_dev_interface(unsigned int cpu, spin_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) { - if (!cpu_online(j)) - continue; + if (!cpu_online(j)) + continue; per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_policy_cpu, j) = policy->cpu; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 7b5093664e49..c631f27a3dcc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -30,6 +30,8 @@ #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) @@ -82,6 +84,7 @@ struct cpu_dbs_info_s { unsigned int freq_lo; unsigned int freq_lo_jiffies; unsigned int freq_hi_jiffies; + unsigned int rate_mult; int cpu; unsigned int sample_type:1; /* @@ -108,10 +111,12 @@ static struct dbs_tuners { unsigned int up_threshold; unsigned int down_differential; unsigned int ignore_nice; + unsigned int sampling_down_factor; unsigned int powersave_bias; unsigned int io_is_busy; } dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, .ignore_nice = 0, .powersave_bias = 0, @@ -259,6 +264,7 @@ static ssize_t show_##file_name \ show_one(sampling_rate, sampling_rate); show_one(io_is_busy, io_is_busy); show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); show_one(ignore_nice_load, ignore_nice); show_one(powersave_bias, powersave_bias); @@ -340,6 +346,29 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, return count; } +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + mutex_unlock(&dbs_mutex); + + return count; +} + static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { @@ -401,6 +430,7 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, define_one_global_rw(sampling_rate); define_one_global_rw(io_is_busy); define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); @@ -409,6 +439,7 @@ static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, &sampling_rate.attr, &up_threshold.attr, + &sampling_down_factor.attr, &ignore_nice_load.attr, &powersave_bias.attr, &io_is_busy.attr, @@ -562,6 +593,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) /* Check for frequency increase */ if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } @@ -584,6 +619,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + if (freq_next < policy->min) freq_next = policy->min; @@ -607,7 +645,8 @@ static void do_dbs_timer(struct work_struct *work) int sample_type = dbs_info->sample_type; /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); if (num_online_cpus() > 1) delay -= jiffies % delay; @@ -711,6 +750,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, } } this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; ondemand_powersave_bias_init_cpu(cpu); /* * Start the timerschedule work, when this governor |