summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorElaine K. Tam <etam@nvidia.com>2013-07-03 17:18:15 -0700
committerRiham Haidar <rhaidar@nvidia.com>2013-07-26 13:49:55 -0700
commit640399b2b583f3f66113bdfba2d7d89f1a517979 (patch)
treeac68d5cd4a0d8772945cd486fe02124c3c20e1ff /drivers/cpufreq
parent24966f43ec2c871b8d03db8aa247c63b33725833 (diff)
Cpufreq: interactive: Add fresh google interactive gov, etc.
PART 1: Author: Puneet Saxena <puneets@nvidia.com> Date: Mon May 13 19:41:00 2013 +0530 Cpufreq: interactive: change min_sample_time Reduce min_sample_time so that frequency can be ramped down quickly. Bug 1281679 Change-Id: I1585c30a6bb4c2cb6e5567c8e472e03af3596971 Signed-off-by: Puneet Saxena <puneets@nvidia.com> Reviewed-on: http://git-master/r/227977 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Sachin Nikam <snikam@nvidia.com> Reviewed-by: Peter Boonstoppel <pboonstoppel@nvidia.com> PART 2: Merge: 92354f2 857c6ca Author: Puneet Saxena <puneets@nvidia.com> Date: Fri May 31 12:37:53 2013 +0530 Cpufreq: interactive: Add fresh google interactive gov Existing interactive gov code diverged significantly from the version maintained by Google. The change ports current version of Google interactive governor code maintained in "android-3.4" branch. Merge branch 'android-3.4' into android-tegra-nv-3.4 Conflicts: drivers/cpufreq/cpufreq_interactive.c include/trace/events/cpufreq_interactive.h Bug 1281679 Signed-off-by: Puneet Saxena <puneets@nvidia.com> Change-Id: I10883f2c0ec29c4f06d70c3637b5fcbb9d8aa8bd Signed-off-by: Elaine K. Tam <etam@nvidia.com> Reviewed-on: http://git-master/r/251579 Reviewed-by: Jay Cheng <jacheng@nvidia.com> Reviewed-by: Venkata (Muni) Anda <vanda@nvidia.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c1188
1 files changed, 815 insertions, 373 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 42a9ad59ea46..45e7f3f75164 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -2,7 +2,7 @@
* drivers/cpufreq/cpufreq_interactive.c
*
* Copyright (C) 2010 Google, Inc.
- * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -21,32 +21,38 @@
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/tick.h>
+#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
-#include <linux/mutex.h>
-
+#include <linux/slab.h>
+#include <linux/kernel_stat.h>
#include <asm/cputime.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+static int active_count;
struct cpufreq_interactive_cpuinfo {
struct timer_list cpu_timer;
- int timer_idlecancel;
+ struct timer_list cpu_slack_timer;
+ spinlock_t load_lock; /* protects the next 4 fields */
u64 time_in_idle;
- u64 time_in_iowait;
- u64 idle_exit_time;
- u64 timer_run_time;
- int idling;
- u64 freq_change_time;
- u64 freq_change_time_in_idle;
- u64 freq_change_time_in_iowait;
- u64 last_high_freq_time;
+ u64 time_in_idle_timestamp;
+ u64 cputime_speedadj;
+ u64 cputime_speedadj_timestamp;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
+ unsigned int floor_freq;
+ u64 floor_validate_time;
+ u64 hispeed_validate_time;
+ struct rw_semaphore enable_sem;
int governor_enabled;
};
@@ -56,70 +62,60 @@ static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
static struct task_struct *speedchange_task;
static cpumask_t speedchange_cpumask;
static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
-/* Go to max speed when CPU load at or above this value. */
-#define DEFAULT_GO_MAXSPEED_LOAD 85
-static unsigned long go_maxspeed_load;
-
-/* Base of exponential raise to max speed; if 0 - jump to maximum */
-static unsigned long boost_factor;
+/* Hi speed to bump to from lo speed when load burst (default max) */
+static unsigned int hispeed_freq;
-/* Max frequency boost in Hz; if 0 - no max is enforced */
-static unsigned long max_boost;
+/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
-/* Consider IO as busy */
-static unsigned long io_is_busy;
-
-/*
- * Targeted sustainable load relatively to current frequency.
- * If 0, target is set realtively to the max speed
- */
-static unsigned long sustain_load;
+/* Target load. Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+static spinlock_t target_loads_lock;
+static unsigned int *target_loads = default_target_loads;
+static int ntarget_loads = ARRAY_SIZE(default_target_loads);
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
-#define DEFAULT_MIN_SAMPLE_TIME 30000;
-static unsigned long min_sample_time;
+#define DEFAULT_MIN_SAMPLE_TIME (30 * USEC_PER_MSEC)
+static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
/*
* The sample rate of the timer used to increase frequency
*/
-#define DEFAULT_TIMER_RATE 20000;
-static unsigned long timer_rate;
-
-/*
- * The minimum delay before frequency is allowed to raise over normal rate.
- * Since it must remain at high frequency for a minimum of MIN_SAMPLE_TIME
- * once it rises, setting this delay to a multiple of MIN_SAMPLE_TIME
- * becomes the best way to enforce a square wave.
- * e.g. 5*MIN_SAMPLE_TIME = 20% high freq duty cycle
- */
-#define DEFAULT_HIGH_FREQ_MIN_DELAY 5*DEFAULT_MIN_SAMPLE_TIME
-static unsigned long high_freq_min_delay;
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+static unsigned long timer_rate = DEFAULT_TIMER_RATE;
/*
- * The maximum frequency CPUs are allowed to run normally
- * 0 if disabled
+ * Wait this long before raising speed above hispeed, by default a single
+ * timer interval.
*/
-#define DEFAULT_MAX_NORMAL_FREQ 0
-static unsigned long max_normal_freq;
-
-
-/* Defines to control mid-range frequencies */
-#define DEFAULT_MID_RANGE_GO_MAXSPEED_LOAD 95
-
-static unsigned long midrange_freq;
-static unsigned long midrange_go_maxspeed_load;
-static unsigned long midrange_max_boost;
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+ DEFAULT_ABOVE_HISPEED_DELAY };
+static spinlock_t above_hispeed_delay_lock;
+static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
+static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
+
+/* Non-zero means indefinite speed boost active */
+static int boost_val;
+/* Duration of a boot pulse in usecs */
+static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+/* End time of boost pulse in ktime converted to usecs */
+static u64 boostpulse_endtime;
/*
- * gov_state_lock protects interactive node creation in governor start/stop.
+ * Max additional time to wait in idle, beyond timer_rate, at speeds above
+ * minimum before wakeup to reduce speed, or -1 if unnecessary.
*/
-static DEFINE_MUTEX(gov_state_lock);
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+static int timer_slack_val = DEFAULT_TIMER_SLACK;
-static struct mutex gov_state_lock;
-static unsigned int active_count;
+static bool io_is_busy;
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -134,140 +130,264 @@ struct cpufreq_governor cpufreq_gov_interactive = {
.owner = THIS_MODULE,
};
-static unsigned int cpufreq_interactive_get_target(
- int cpu_load, int load_since_change, struct cpufreq_policy *policy)
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+ cputime64_t *wall)
{
- unsigned int target_freq;
- unsigned int maxspeed_load = go_maxspeed_load;
- unsigned int mboost = max_boost;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
- /*
- * Choose greater of short-term load (since last idle timer
- * started or timer function re-armed itself) or long-term load
- * (since last frequency change).
- */
- if (load_since_change > cpu_load)
- cpu_load = load_since_change;
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- if (midrange_freq && policy->cur > midrange_freq) {
- maxspeed_load = midrange_go_maxspeed_load;
- mboost = midrange_max_boost;
- }
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- if (cpu_load >= maxspeed_load) {
- if (!boost_factor)
- return policy->max;
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = jiffies_to_usecs(cur_wall_time);
- target_freq = policy->cur * boost_factor;
+ return jiffies_to_usecs(idle_time);
+}
- if (mboost && target_freq > policy->cur + mboost)
- target_freq = policy->cur + mboost;
- }
- else {
- if (!sustain_load)
- return policy->max * cpu_load / 100;
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
+ cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, wall);
+
+ if (idle_time == -1ULL)
+ idle_time = get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_is_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
- target_freq = policy->cur * cpu_load / sustain_load;
+ return idle_time;
+}
+
+static void cpufreq_interactive_timer_resched(
+ struct cpufreq_interactive_cpuinfo *pcpu)
+{
+ unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+ unsigned long flags;
+
+ mod_timer_pinned(&pcpu->cpu_timer, expires);
+ if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
}
- target_freq = min(target_freq, policy->max);
- return target_freq;
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ pcpu->time_in_idle =
+ get_cpu_idle_time(smp_processor_id(),
+ &pcpu->time_in_idle_timestamp);
+ pcpu->cputime_speedadj = 0;
+ pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
-static inline cputime64_t get_cpu_iowait_time(
- unsigned int cpu, cputime64_t *wall)
+static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
- if (iowait_time == -1ULL)
- return 0;
+ for (i = 0; i < nabove_hispeed_delay - 1 &&
+ freq >= above_hispeed_delay[i+1]; i += 2)
+ ;
- return iowait_time;
+ ret = above_hispeed_delay[i];
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return ret;
}
-static void cpufreq_interactive_timer(unsigned long data)
+static unsigned int freq_to_targetload(unsigned int freq)
+{
+ int i;
+ unsigned int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
+ ;
+
+ ret = target_loads[i];
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+
+static unsigned int choose_freq(
+ struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
+{
+ unsigned int freq = pcpu->policy->cur;
+ unsigned int prevfreq, freqmin, freqmax;
+ unsigned int tl;
+ int index;
+
+ freqmin = 0;
+ freqmax = UINT_MAX;
+
+ do {
+ prevfreq = freq;
+ tl = freq_to_targetload(freq);
+
+ /*
+ * Find the lowest frequency where the computed load is less
+ * than or equal to the target load.
+ */
+
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+ CPUFREQ_RELATION_L, &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq > prevfreq) {
+ /* The previous frequency is too low. */
+ freqmin = prevfreq;
+
+ if (freq >= freqmax) {
+ /*
+ * Find the highest frequency that is less
+ * than freqmax.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmax - 1, CPUFREQ_RELATION_H,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ if (freq == freqmin) {
+ /*
+ * The first frequency below freqmax
+ * has already been found to be too
+ * low. freqmax is the lowest speed
+ * we found that is fast enough.
+ */
+ freq = freqmax;
+ break;
+ }
+ }
+ } else if (freq < prevfreq) {
+ /* The previous frequency is high enough. */
+ freqmax = prevfreq;
+
+ if (freq <= freqmin) {
+ /*
+ * Find the lowest frequency that is higher
+ * than freqmin.
+ */
+ cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ freqmin + 1, CPUFREQ_RELATION_L,
+ &index);
+ freq = pcpu->freq_table[index].frequency;
+
+ /*
+ * If freqmax is the first frequency above
+ * freqmin then we have already found that
+ * this speed is fast enough.
+ */
+ if (freq == freqmax)
+ break;
+ }
+ }
+
+ /* If same frequency chosen as previous then done. */
+ } while (freq != prevfreq);
+
+ return freq;
+}
+
+static u64 update_load(int cpu)
{
+ struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+ u64 now;
+ u64 now_idle;
unsigned int delta_idle;
- unsigned int delta_iowait;
unsigned int delta_time;
+ u64 active_time;
+
+ now_idle = get_cpu_idle_time(cpu, &now);
+ delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+ delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+ active_time = delta_time - delta_idle;
+ pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+ pcpu->time_in_idle = now_idle;
+ pcpu->time_in_idle_timestamp = now;
+ return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+ u64 now;
+ unsigned int delta_time;
+ u64 cputime_speedadj;
int cpu_load;
- int load_since_change;
- u64 time_in_idle;
- u64 time_in_iowait;
- u64 idle_exit_time;
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, data);
- u64 now_idle;
- u64 now_iowait;
unsigned int new_freq;
+ unsigned int loadadjfreq;
unsigned int index;
unsigned long flags;
+ bool boosted;
- smp_rmb();
-
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return;
if (!pcpu->governor_enabled)
goto exit;
- /*
- * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
- * this lets idle exit know the current idle time sample has
- * been processed, and idle exit can generate a new sample and
- * re-arm the timer. This prevents a concurrent idle
- * exit on that CPU from writing a new set of info at the same time
- * the timer function runs (the timer function can't use that info
- * until more time passes).
- */
- time_in_idle = pcpu->time_in_idle;
- time_in_iowait = pcpu->time_in_iowait;
- idle_exit_time = pcpu->idle_exit_time;
- now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
- now_iowait = get_cpu_iowait_time(data, NULL);
- smp_wmb();
-
- /* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time)
- goto exit;
-
- delta_idle = (unsigned int)(now_idle - time_in_idle);
- delta_iowait = (unsigned int)(now_iowait - time_in_iowait);
- delta_time = (unsigned int)(pcpu->timer_run_time - idle_exit_time);
+ spin_lock_irqsave(&pcpu->load_lock, flags);
+ now = update_load(data);
+ delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+ cputime_speedadj = pcpu->cputime_speedadj;
+ spin_unlock_irqrestore(&pcpu->load_lock, flags);
- /*
- * If timer ran less than 1ms after short-term sample started, retry.
- */
- if (delta_time < 1000)
+ if (WARN_ON_ONCE(!delta_time))
goto rearm;
- if (!io_is_busy)
- delta_idle += delta_iowait;
+ do_div(cputime_speedadj, delta_time);
+ loadadjfreq = (unsigned int)cputime_speedadj * 100;
+ cpu_load = loadadjfreq / pcpu->target_freq;
+ boosted = boost_val || now < boostpulse_endtime;
- if (delta_idle > delta_time)
- cpu_load = 0;
- else
- cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+ if (cpu_load >= go_hispeed_load || boosted) {
+ if (pcpu->target_freq < hispeed_freq) {
+ new_freq = hispeed_freq;
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
- delta_idle = (unsigned int)(now_idle - pcpu->freq_change_time_in_idle);
- delta_iowait = (unsigned int)(now_iowait - pcpu->freq_change_time_in_iowait);
- delta_time = (unsigned int)(pcpu->timer_run_time - pcpu->freq_change_time);
+ if (new_freq < hispeed_freq)
+ new_freq = hispeed_freq;
+ }
+ } else {
+ new_freq = choose_freq(pcpu, loadadjfreq);
+ }
- if (!io_is_busy)
- delta_idle += delta_iowait;
+ if (pcpu->target_freq >= hispeed_freq &&
+ new_freq > pcpu->target_freq &&
+ now - pcpu->hispeed_validate_time <
+ freq_to_above_hispeed_delay(pcpu->target_freq)) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm;
+ }
- if ((delta_time == 0) || (delta_idle > delta_time))
- load_since_change = 0;
- else
- load_since_change =
- 100 * (delta_time - delta_idle) / delta_time;
- /*
- * Combine short-term load (since last idle timer started or timer
- * function re-armed itself) and long-term load (since last frequency
- * change) to determine new target frequency
- */
- new_freq = cpufreq_interactive_get_target(cpu_load, load_since_change,
- pcpu->policy);
+ pcpu->hispeed_validate_time = now;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_H,
+ new_freq, CPUFREQ_RELATION_L,
&index)) {
pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
(int) data);
@@ -276,33 +396,42 @@ static void cpufreq_interactive_timer(unsigned long data)
new_freq = pcpu->freq_table[index].frequency;
- if (pcpu->target_freq == new_freq)
- goto rearm_if_notmax;
-
/*
- * Do not scale down unless we have been at this frequency for the
- * minimum sample time.
+ * Do not scale below floor_freq unless we have been at or above the
+ * floor frequency for the minimum sample time since last validated.
*/
- if (new_freq < pcpu->target_freq) {
- if (pcpu->timer_run_time - pcpu->freq_change_time
- < min_sample_time)
+ if (new_freq < pcpu->floor_freq) {
+ if (now - pcpu->floor_validate_time < min_sample_time) {
+ trace_cpufreq_interactive_notyet(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
goto rearm;
+ }
}
/*
- * Can only overclock if the delay is satisfy. Otherwise, cap it to
- * maximum allowed normal frequency
+ * Update the timestamp for checking whether speed has been held at
+ * or above the selected frequency for a minimum of min_sample_time,
+ * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
+ * allow the speed to drop as soon as the boostpulse duration expires
+ * (or the indefinite boost is turned off).
*/
- if (max_normal_freq && (new_freq > max_normal_freq)) {
- if ((pcpu->timer_run_time - pcpu->last_high_freq_time)
- < high_freq_min_delay) {
- new_freq = max_normal_freq;
- }
- else {
- pcpu->last_high_freq_time = pcpu->timer_run_time;
- }
+
+ if (!boosted || new_freq > hispeed_freq) {
+ pcpu->floor_freq = new_freq;
+ pcpu->floor_validate_time = now;
}
+ if (pcpu->target_freq == new_freq) {
+ trace_cpufreq_interactive_already(
+ data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+ goto rearm_if_notmax;
+ }
+
+ trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+ pcpu->policy->cur, new_freq);
+
pcpu->target_freq = new_freq;
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
cpumask_set_cpu(data, &speedchange_cpumask);
@@ -318,31 +447,11 @@ rearm_if_notmax:
goto exit;
rearm:
- if (!timer_pending(&pcpu->cpu_timer)) {
- /*
- * If already at min: if that CPU is idle, don't set timer.
- * Else cancel the timer if that CPU goes idle. We don't
- * need to re-evaluate speed until the next idle exit.
- */
- if (pcpu->target_freq == pcpu->policy->min) {
- smp_rmb();
-
- if (pcpu->idling)
- goto exit;
-
- pcpu->timer_idlecancel = 1;
- }
-
- pcpu->time_in_idle = get_cpu_idle_time_us(
- data, &pcpu->idle_exit_time);
- pcpu->time_in_iowait = get_cpu_iowait_time(
- data, NULL);
-
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
+ if (!timer_pending(&pcpu->cpu_timer))
+ cpufreq_interactive_timer_resched(pcpu);
exit:
+ up_read(&pcpu->enable_sem);
return;
}
@@ -352,15 +461,16 @@ static void cpufreq_interactive_idle_start(void)
&per_cpu(cpuinfo, smp_processor_id());
int pending;
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
- pcpu->idling = 1;
- smp_wmb();
pending = timer_pending(&pcpu->cpu_timer);
if (pcpu->target_freq != pcpu->policy->min) {
-#ifdef CONFIG_SMP
/*
* Entering idle while not at lowest speed. On some
* platforms this can hold the other CPU(s) at that speed
@@ -369,35 +479,11 @@ static void cpufreq_interactive_idle_start(void)
* min indefinitely. This should probably be a quirk of
* the CPUFreq driver.
*/
- if (!pending) {
- pcpu->time_in_idle = get_cpu_idle_time_us(
- smp_processor_id(), &pcpu->idle_exit_time);
- pcpu->time_in_iowait = get_cpu_iowait_time(
- smp_processor_id(), NULL);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
- }
-#endif
- } else {
- /*
- * If at min speed and entering idle after load has
- * already been evaluated, and a timer has been set just in
- * case the CPU suddenly goes busy, cancel that timer. The
- * CPU didn't go busy; we'll recheck things upon idle exit.
- */
- if (pending && pcpu->timer_idlecancel) {
- del_timer_sync(&pcpu->cpu_timer);
- /*
- * Ensure last timer run time is after current idle
- * sample start time, so next idle exit will always
- * start a new idle sampling period.
- */
- pcpu->idle_exit_time = 0;
- pcpu->timer_idlecancel = 0;
- }
+ if (!pending)
+ cpufreq_interactive_timer_resched(pcpu);
}
+ up_read(&pcpu->enable_sem);
}
static void cpufreq_interactive_idle_end(void)
@@ -405,37 +491,23 @@ static void cpufreq_interactive_idle_end(void)
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, smp_processor_id());
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
return;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return;
+ }
- pcpu->idling = 0;
- smp_wmb();
-
- /*
- * Arm the timer for 1-2 ticks later if not already, and if the timer
- * function has already processed the previous load sampling
- * interval. (If the timer is not pending but has not processed
- * the previous interval, it is probably racing with us on another
- * CPU. Let it compute load based on the previous sample and then
- * re-arm the timer for another interval when it's done, rather
- * than updating the interval start time to be "now", which doesn't
- * give the timer function enough time to make a decision on this
- * run.)
- */
- if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time >= pcpu->idle_exit_time &&
- pcpu->governor_enabled) {
- pcpu->time_in_idle =
- get_cpu_idle_time_us(smp_processor_id(),
- &pcpu->idle_exit_time);
- pcpu->time_in_iowait =
- get_cpu_iowait_time(smp_processor_id(),
- NULL);
- pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
+ /* Arm the timer for 1-2 ticks later if not already. */
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ cpufreq_interactive_timer_resched(pcpu);
+ } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+ del_timer(&pcpu->cpu_timer);
+ del_timer(&pcpu->cpu_slack_timer);
+ cpufreq_interactive_timer(smp_processor_id());
}
+ up_read(&pcpu->enable_sem);
}
static int cpufreq_interactive_speedchange_task(void *data)
@@ -470,10 +542,12 @@ static int cpufreq_interactive_speedchange_task(void *data)
unsigned int max_freq = 0;
pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
+ if (!down_read_trylock(&pcpu->enable_sem))
continue;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ continue;
+ }
for_each_cpu(j, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
@@ -483,72 +557,441 @@ static int cpufreq_interactive_speedchange_task(void *data)
max_freq = pjcpu->target_freq;
}
- __cpufreq_driver_target(pcpu->policy,
- max_freq,
- CPUFREQ_RELATION_H);
+ if (max_freq != pcpu->policy->cur)
+ __cpufreq_driver_target(pcpu->policy,
+ max_freq,
+ CPUFREQ_RELATION_H);
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
+ pcpu->policy->cur);
- pcpu->freq_change_time_in_idle =
- get_cpu_idle_time_us(cpu,
- &pcpu->freq_change_time);
- pcpu->freq_change_time_in_iowait =
- get_cpu_iowait_time(cpu, NULL);
+ up_read(&pcpu->enable_sem);
}
}
return 0;
}
-#define DECL_CPUFREQ_INTERACTIVE_ATTR(name) \
-static ssize_t show_##name(struct kobject *kobj, \
- struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%lu\n", name); \
-} \
-\
-static ssize_t store_##name(struct kobject *kobj,\
- struct attribute *attr, const char *buf, size_t count) \
-{ \
- int ret; \
- unsigned long val; \
-\
- ret = strict_strtoul(buf, 0, &val); \
- if (ret < 0) \
- return ret; \
- name = val; \
- return count; \
-} \
-\
-static struct global_attr name##_attr = __ATTR(name, 0644, \
- show_##name, store_##name);
-
-DECL_CPUFREQ_INTERACTIVE_ATTR(go_maxspeed_load)
-DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_freq)
-DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_go_maxspeed_load)
-DECL_CPUFREQ_INTERACTIVE_ATTR(boost_factor)
-DECL_CPUFREQ_INTERACTIVE_ATTR(io_is_busy)
-DECL_CPUFREQ_INTERACTIVE_ATTR(max_boost)
-DECL_CPUFREQ_INTERACTIVE_ATTR(midrange_max_boost)
-DECL_CPUFREQ_INTERACTIVE_ATTR(sustain_load)
-DECL_CPUFREQ_INTERACTIVE_ATTR(min_sample_time)
-DECL_CPUFREQ_INTERACTIVE_ATTR(timer_rate)
-DECL_CPUFREQ_INTERACTIVE_ATTR(high_freq_min_delay)
-DECL_CPUFREQ_INTERACTIVE_ATTR(max_normal_freq)
-
-#undef DECL_CPUFREQ_INTERACTIVE_ATTR
+static void cpufreq_interactive_boost(void)
+{
+ int i;
+ int anyboost = 0;
+ unsigned long flags;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+ for_each_online_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+
+ if (pcpu->target_freq < hispeed_freq) {
+ pcpu->target_freq = hispeed_freq;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->hispeed_validate_time =
+ ktime_to_us(ktime_get());
+ anyboost = 1;
+ }
+
+ /*
+ * Set floor freq and (re)start timer for when last
+ * validated.
+ */
+
+ pcpu->floor_freq = hispeed_freq;
+ pcpu->floor_validate_time = ktime_to_us(ktime_get());
+ }
+
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+ if (anyboost)
+ wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+ struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ int cpu;
+ unsigned long flags;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ pcpu = &per_cpu(cpuinfo, freq->cpu);
+ if (!down_read_trylock(&pcpu->enable_sem))
+ return 0;
+ if (!pcpu->governor_enabled) {
+ up_read(&pcpu->enable_sem);
+ return 0;
+ }
+
+ for_each_cpu(cpu, pcpu->policy->cpus) {
+ struct cpufreq_interactive_cpuinfo *pjcpu =
+ &per_cpu(cpuinfo, cpu);
+ spin_lock_irqsave(&pjcpu->load_lock, flags);
+ update_load(cpu);
+ spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+ }
+
+ up_read(&pcpu->enable_sem);
+ }
+ return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+ .notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+ const char *cp;
+ int i;
+ int ntokens = 1;
+ unsigned int *tokenized_data;
+ int err = -EINVAL;
+
+ cp = buf;
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ if (!(ntokens & 0x1))
+ goto err;
+
+ tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+ if (!tokenized_data) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cp = buf;
+ i = 0;
+ while (i < ntokens) {
+ if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+ goto err_kfree;
+
+ cp = strpbrk(cp, " :");
+ if (!cp)
+ break;
+ cp++;
+ }
+
+ if (i != ntokens)
+ goto err_kfree;
+
+ *num_tokens = ntokens;
+ return tokenized_data;
+
+err_kfree:
+ kfree(tokenized_data);
+err:
+ return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+
+ for (i = 0; i < ntarget_loads; i++)
+ ret += sprintf(buf + ret, "%u%s", target_loads[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + ret, "\n");
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return ret;
+}
+
+static ssize_t store_target_loads(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ntokens;
+ unsigned int *new_target_loads = NULL;
+ unsigned long flags;
+
+ new_target_loads = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_target_loads))
+ return PTR_RET(new_target_loads);
+
+ spin_lock_irqsave(&target_loads_lock, flags);
+ if (target_loads != default_target_loads)
+ kfree(target_loads);
+ target_loads = new_target_loads;
+ ntarget_loads = ntokens;
+ spin_unlock_irqrestore(&target_loads_lock, flags);
+ return count;
+}
+
+static struct global_attr target_loads_attr =
+ __ATTR(target_loads, S_IRUGO | S_IWUSR,
+ show_target_loads, store_target_loads);
+
+static ssize_t show_above_hispeed_delay(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ int i;
+ ssize_t ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
+
+ for (i = 0; i < nabove_hispeed_delay; i++)
+ ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
+ i & 0x1 ? ":" : " ");
+
+ ret += sprintf(buf + ret, "\n");
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ntokens;
+ unsigned int *new_above_hispeed_delay = NULL;
+ unsigned long flags;
+
+ new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+ if (IS_ERR(new_above_hispeed_delay))
+ return PTR_RET(new_above_hispeed_delay);
+
+ spin_lock_irqsave(&above_hispeed_delay_lock, flags);
+ if (above_hispeed_delay != default_above_hispeed_delay)
+ kfree(above_hispeed_delay);
+ above_hispeed_delay = new_above_hispeed_delay;
+ nabove_hispeed_delay = ntokens;
+ spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
+ return count;
+
+}
+
+static struct global_attr above_hispeed_delay_attr =
+ __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
+ show_above_hispeed_delay, store_above_hispeed_delay);
+
+static ssize_t show_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ long unsigned int val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ hispeed_freq = val;
+ return count;
+}
+
+static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
+ show_hispeed_freq, store_hispeed_freq);
+
+
+static ssize_t show_go_hispeed_load(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ go_hispeed_load = val;
+ return count;
+}
+
+static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
+ show_go_hispeed_load, store_go_hispeed_load);
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ min_sample_time = val;
+ return count;
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+ show_min_sample_time, store_min_sample_time);
+
+static ssize_t show_timer_rate(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", timer_rate);
+}
+
+static ssize_t store_timer_rate(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ timer_rate = val;
+ return count;
+}
+
+static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
+ show_timer_rate, store_timer_rate);
+
+static ssize_t show_timer_slack(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", timer_slack_val);
+}
+
+static ssize_t store_timer_slack(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ timer_slack_val = val;
+ return count;
+}
+
+define_one_global_rw(timer_slack);
+
+static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", boost_val);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boost_val = val;
+
+ if (boost_val) {
+ trace_cpufreq_interactive_boost("on");
+ cpufreq_interactive_boost();
+ } else {
+ trace_cpufreq_interactive_unboost("off");
+ }
+
+ return count;
+}
+
+define_one_global_rw(boost);
+
+static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
+ trace_cpufreq_interactive_boost("pulse");
+ cpufreq_interactive_boost();
+ return count;
+}
+
+static struct global_attr boostpulse =
+ __ATTR(boostpulse, 0200, NULL, store_boostpulse);
+
+static ssize_t show_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(
+ struct kobject *kobj, struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ boostpulse_duration_val = val;
+ return count;
+}
+
+define_one_global_rw(boostpulse_duration);
+
+static ssize_t show_io_is_busy(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ io_is_busy = val;
+ return count;
+}
+
+static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
+ show_io_is_busy, store_io_is_busy);
static struct attribute *interactive_attributes[] = {
- &go_maxspeed_load_attr.attr,
- &midrange_freq_attr.attr,
- &midrange_go_maxspeed_load_attr.attr,
- &boost_factor_attr.attr,
- &max_boost_attr.attr,
- &midrange_max_boost_attr.attr,
- &io_is_busy_attr.attr,
- &sustain_load_attr.attr,
+ &target_loads_attr.attr,
+ &above_hispeed_delay_attr.attr,
+ &hispeed_freq_attr.attr,
+ &go_hispeed_load_attr.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
- &high_freq_min_delay_attr.attr,
- &max_normal_freq_attr.attr,
+ &timer_slack.attr,
+ &boost.attr,
+ &boostpulse.attr,
+ &boostpulse_duration.attr,
+ &io_is_busy_attr.attr,
NULL,
};
@@ -590,79 +1033,82 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
if (!cpu_online(policy->cpu))
return -EINVAL;
+ mutex_lock(&gov_lock);
+
freq_table =
cpufreq_frequency_get_table(policy->cpu);
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
for_each_cpu(j, policy->cpus) {
+ unsigned long expires;
+
pcpu = &per_cpu(cpuinfo, j);
pcpu->policy = policy;
pcpu->target_freq = policy->cur;
pcpu->freq_table = freq_table;
- pcpu->freq_change_time_in_idle =
- get_cpu_idle_time_us(j,
- &pcpu->freq_change_time);
- pcpu->time_in_idle = pcpu->freq_change_time_in_idle;
- pcpu->idle_exit_time = pcpu->freq_change_time;
- pcpu->freq_change_time_in_iowait =
- get_cpu_iowait_time(j, NULL);
- pcpu->time_in_iowait = pcpu->freq_change_time_in_iowait;
- if (!pcpu->last_high_freq_time)
- pcpu->last_high_freq_time = pcpu->freq_change_time;
- pcpu->timer_idlecancel = 1;
+ pcpu->floor_freq = pcpu->target_freq;
+ pcpu->floor_validate_time =
+ ktime_to_us(ktime_get());
+ pcpu->hispeed_validate_time =
+ pcpu->floor_validate_time;
+ down_write(&pcpu->enable_sem);
+ expires = jiffies + usecs_to_jiffies(timer_rate);
+ pcpu->cpu_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_timer, j);
+ if (timer_slack_val >= 0) {
+ expires += usecs_to_jiffies(timer_slack_val);
+ pcpu->cpu_slack_timer.expires = expires;
+ add_timer_on(&pcpu->cpu_slack_timer, j);
+ }
pcpu->governor_enabled = 1;
- smp_wmb();
-
- if (!timer_pending(&pcpu->cpu_timer))
- mod_timer(&pcpu->cpu_timer, jiffies + 2);
+ up_write(&pcpu->enable_sem);
}
- mutex_lock(&gov_state_lock);
- active_count++;
-
/*
* Do not register the idle hook and create sysfs
* entries if we have already done so.
*/
- if (active_count == 1) {
- rc = sysfs_create_group(cpufreq_global_kobject,
- &interactive_attr_group);
+ if (++active_count > 1) {
+ mutex_unlock(&gov_lock);
+ return 0;
+ }
- if (rc) {
- mutex_unlock(&gov_state_lock);
- return rc;
- }
- idle_notifier_register(&cpufreq_interactive_idle_nb);
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ if (rc) {
+ mutex_unlock(&gov_lock);
+ return rc;
}
- mutex_unlock(&gov_state_lock);
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
+ cpufreq_register_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ mutex_unlock(&gov_lock);
break;
case CPUFREQ_GOV_STOP:
+ mutex_lock(&gov_lock);
for_each_cpu(j, policy->cpus) {
pcpu = &per_cpu(cpuinfo, j);
+ down_write(&pcpu->enable_sem);
pcpu->governor_enabled = 0;
- smp_wmb();
del_timer_sync(&pcpu->cpu_timer);
-
- /*
- * Reset idle exit time since we may cancel the timer
- * before it can run after the last idle exit time,
- * to avoid tripping the check in idle exit for a timer
- * that is trying to run.
- */
- pcpu->idle_exit_time = 0;
+ del_timer_sync(&pcpu->cpu_slack_timer);
+ up_write(&pcpu->enable_sem);
}
- mutex_lock(&gov_state_lock);
- active_count--;
-
- if (active_count == 0) {
- idle_notifier_unregister(&cpufreq_interactive_idle_nb);
-
- sysfs_remove_group(cpufreq_global_kobject,
- &interactive_attr_group);
+ if (--active_count > 0) {
+ mutex_unlock(&gov_lock);
+ return 0;
}
- mutex_unlock(&gov_state_lock);
+
+ cpufreq_unregister_notifier(
+ &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
+ idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactive_attr_group);
+ mutex_unlock(&gov_lock);
break;
@@ -673,41 +1119,37 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
else if (policy->min > policy->cur)
__cpufreq_driver_target(policy,
policy->min, CPUFREQ_RELATION_L);
-
- /* reschedule the timer if we stopped it */
- pcpu = &per_cpu(cpuinfo, policy->cpu);
-
- if (pcpu && !timer_pending(&pcpu->cpu_timer))
- mod_timer(&pcpu->cpu_timer,
- jiffies + usecs_to_jiffies(timer_rate));
-
break;
}
return 0;
}
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
struct cpufreq_interactive_cpuinfo *pcpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
- midrange_go_maxspeed_load = DEFAULT_MID_RANGE_GO_MAXSPEED_LOAD;
- min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- timer_rate = DEFAULT_TIMER_RATE;
- high_freq_min_delay = DEFAULT_HIGH_FREQ_MIN_DELAY;
- max_normal_freq = DEFAULT_MAX_NORMAL_FREQ;
-
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
- init_timer(&pcpu->cpu_timer);
+ init_timer_deferrable(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
+ init_timer(&pcpu->cpu_slack_timer);
+ pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+ spin_lock_init(&pcpu->load_lock);
+ init_rwsem(&pcpu->enable_sem);
}
+ spin_lock_init(&target_loads_lock);
spin_lock_init(&speedchange_cpumask_lock);
+ spin_lock_init(&above_hispeed_delay_lock);
+ mutex_init(&gov_lock);
speedchange_task =
kthread_create(cpufreq_interactive_speedchange_task, NULL,
"cfinteractive");