diff options
author | Todd Poynor <toddpoynor@google.com> | 2010-12-03 11:20:09 -0800 |
---|---|---|
committer | Todd Poynor <toddpoynor@google.com> | 2010-12-03 11:58:11 -0800 |
commit | 1b7360b23f167a295bd92289b7aade7a1517fb03 (patch) | |
tree | 1cc32da17eecd64e4125931e95e677d98d018b56 /drivers/cpufreq | |
parent | d301f2563a60a0fe0369dcf9ccce2062919bffff (diff) |
cpufreq interactive governor save/restore IRQs around cpumask spinlocks
Need to use irqsave/restore spin locking for cpumasks since these
are accessed in timers and in thread context.
Change-Id: I4a53eaf0ced7e73b445feddba90ec11482de9126
Signed-off-by: Todd Poynor <toddpoynor@google.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_interactive.c | 23 |
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 6069ca20a014..4909c7bb741b 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -180,6 +180,7 @@ static void cpufreq_interactive_timer(unsigned long data) u64 now_idle; unsigned int new_freq; unsigned int index; + unsigned long flags; /* * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, @@ -280,18 +281,18 @@ static void cpufreq_interactive_timer(unsigned long data) if (new_freq < pcpu->target_freq) { pcpu->target_freq = new_freq; - spin_lock(&down_cpumask_lock); + spin_lock_irqsave(&down_cpumask_lock, flags); cpumask_set_cpu(data, &down_cpumask); - spin_unlock(&down_cpumask_lock); + spin_unlock_irqrestore(&down_cpumask_lock, flags); queue_work(down_wq, &freq_scale_down_work); } else { pcpu->target_freq = new_freq; #if DEBUG up_request_time = ktime_to_us(ktime_get()); #endif - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); cpumask_set_cpu(data, &up_cpumask); - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); wake_up_process(up_task); } @@ -423,6 +424,7 @@ static int cpufreq_interactive_up_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; + unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; #if DEBUG @@ -433,16 +435,16 @@ static int cpufreq_interactive_up_task(void *data) while (1) { set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); if (cpumask_empty(&up_cpumask)) { - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); schedule(); if (kthread_should_stop()) break; - spin_lock(&up_cpumask_lock); + spin_lock_irqsave(&up_cpumask_lock, flags); } set_current_state(TASK_RUNNING); @@ -461,7 +463,7 @@ static int cpufreq_interactive_up_task(void *data) tmp_mask = up_cpumask; cpumask_clear(&up_cpumask); - spin_unlock(&up_cpumask_lock); + spin_unlock_irqrestore(&up_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { pcpu = &per_cpu(cpuinfo, cpu); @@ -488,12 +490,13 @@ static void cpufreq_interactive_freq_down(struct work_struct *work) { unsigned int cpu; cpumask_t tmp_mask; + unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; - spin_lock(&down_cpumask_lock); + spin_lock_irqsave(&down_cpumask_lock, flags); tmp_mask = down_cpumask; cpumask_clear(&down_cpumask); - spin_unlock(&down_cpumask_lock); + spin_unlock_irqrestore(&down_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { pcpu = &per_cpu(cpuinfo, cpu); |