summaryrefslogtreecommitdiff
path: root/drivers/cpuidle
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r--drivers/cpuidle/Kconfig19
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c161
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/cpuidle.h13
-rw-r--r--drivers/cpuidle/driver.c222
-rw-r--r--drivers/cpuidle/governors/menu.c174
-rw-r--r--drivers/cpuidle/sysfs.c199
9 files changed, 725 insertions, 139 deletions
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index a76b689e553b..c4cc27e5c8a5 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -9,6 +9,15 @@ config CPU_IDLE
If you're using an ACPI-enabled platform, you should say Y here.
+config CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Support multiple cpuidle drivers"
+ depends on CPU_IDLE
+ default n
+ help
+ Allows the cpuidle framework to use different drivers for each CPU.
+ This is useful if you have a system with different CPU latencies and
+ states. If unsure say N.
+
config CPU_IDLE_GOV_LADDER
bool
depends on CPU_IDLE
@@ -21,3 +30,13 @@ config CPU_IDLE_GOV_MENU
config ARCH_NEEDS_CPU_IDLE_COUPLED
def_bool n
+
+if CPU_IDLE
+
+config CPU_IDLE_CALXEDA
+ bool "CPU Idle Driver for Calxeda processors"
+ depends on ARCH_HIGHBANK
+ help
+ Select this to enable cpuidle on Calxeda processors.
+
+endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 38c8f69f30cf..03ee87482c71 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,3 +4,5 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
+
+obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 3265844839bf..2a297f86dbad 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
int all;
int ret;
- all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ all = coupled->online_count | (coupled->online_count << WAITING_BITS);
ret = atomic_add_unless(&coupled->ready_waiting_counts,
-MAX_WAITING_CPUS, all);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
new file mode 100644
index 000000000000..e1aab38c5a8d
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * Based on arch/arm/plat-mxc/cpuidle.c:
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/smp_scu.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+
+extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
+extern void *scu_base_addr;
+
+static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
+
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+static noinline void calxeda_idle_restore(void)
+{
+ set_cr(get_cr() | CR_C);
+ set_auxcr(get_auxcr() | 0x40);
+ scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
+}
+
+static int calxeda_idle_finish(unsigned long val)
+{
+ /* Already flushed cache, but do it again as the outer cache functions
+ * dirty the cache with spinlocks */
+ flush_cache_all();
+
+ set_auxcr(get_auxcr() & ~0x40);
+ set_cr(get_cr() & ~CR_C);
+
+ scu_power_mode(scu_base_addr, SCU_PM_DORMANT);
+
+ cpu_do_idle();
+
+ /* Restore things if we didn't enter power-gating */
+ calxeda_idle_restore();
+ return 1;
+}
+
+static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ highbank_set_cpu_jump(smp_processor_id(), cpu_resume);
+ cpu_suspend(0, calxeda_idle_finish);
+ return index;
+}
+
+static void calxeda_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(calxeda_idle_cpuidle_devices);
+}
+
+static struct cpuidle_driver calxeda_idle_driver = {
+ .name = "calxeda_idle",
+ .en_core_tk_irqen = 1,
+ .states = {
+ ARM_CPUIDLE_WFI_STATE,
+ {
+ .name = "PG",
+ .desc = "Power Gate",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 30,
+ .power_usage = 50,
+ .target_residency = 200,
+ .enter = calxeda_pwrdown_idle,
+ },
+ },
+ .state_count = 2,
+};
+
+static int __init calxeda_cpuidle_init(void)
+{
+ int cpu_id;
+ int ret;
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv = &calxeda_idle_driver;
+
+ if (!of_machine_is_compatible("calxeda,highbank"))
+ return -ENODEV;
+
+ ret = cpuidle_register_driver(drv);
+ if (ret)
+ return ret;
+
+ calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (calxeda_idle_cpuidle_devices == NULL) {
+ ret = -ENOMEM;
+ goto unregister_drv;
+ }
+
+ /* initialize state data for each cpuidle_device */
+ for_each_possible_cpu(cpu_id) {
+ dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
+ dev->cpu = cpu_id;
+ dev->state_count = drv->state_count;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpu %u, error: %d\n",
+ cpu_id, ret);
+ goto uninit;
+ }
+ }
+
+ return 0;
+
+uninit:
+ calxeda_idle_cpuidle_devices_uninit();
+unregister_drv:
+ cpuidle_unregister_driver(drv);
+ return ret;
+}
+module_init(calxeda_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f15b8514a18..e1f6860e069c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -68,25 +68,16 @@ static cpuidle_enter_t cpuidle_enter_ops;
int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
- int i, dead_state = -1;
- int power_usage = -1;
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
-
- if (s->power_usage < power_usage && s->enter_dead) {
- power_usage = s->power_usage;
- dead_state = i;
- }
- }
-
- if (dead_state != -1)
- return drv->states[dead_state].enter_dead(dev, dead_state);
+ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ if (drv->states[i].enter_dead)
+ return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}
@@ -109,8 +100,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* This can be moved to within driver enter routine
* but that results in multiple copies of same code.
*/
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {
dev->last_residency = 0;
@@ -128,7 +118,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
int next_state, entered_state;
if (off)
@@ -141,9 +131,15 @@ int cpuidle_idle_call(void)
if (!dev || !dev->enabled)
return -EBUSY;
+ drv = cpuidle_get_cpu_driver(dev);
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
+ dev->last_residency = 0;
+ /* give the governor an opportunity to reflect on the outcome */
+ if (cpuidle_curr_governor->reflect)
+ cpuidle_curr_governor->reflect(dev, next_state);
local_irq_enable();
return 0;
}
@@ -308,15 +304,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
int cpuidle_enable_device(struct cpuidle_device *dev)
{
int ret, i;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
if (!dev)
return -EINVAL;
if (dev->enabled)
return 0;
+
+ drv = cpuidle_get_cpu_driver(dev);
+
if (!drv || !cpuidle_curr_governor)
return -EIO;
+
if (!dev->state_count)
dev->state_count = drv->state_count;
@@ -331,7 +331,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
poll_idle_init(drv);
- if ((ret = cpuidle_add_state_sysfs(dev)))
+ ret = cpuidle_add_device_sysfs(dev);
+ if (ret)
return ret;
if (cpuidle_curr_governor->enable &&
@@ -352,7 +353,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return 0;
fail_sysfs:
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
return ret;
}
@@ -368,17 +369,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device);
*/
void cpuidle_disable_device(struct cpuidle_device *dev)
{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
if (!dev || !dev->enabled)
return;
- if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+
+ if (!drv || !cpuidle_curr_governor)
return;
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
- cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
+ cpuidle_curr_governor->disable(drv, dev);
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
enabled_devices--;
}
@@ -394,17 +398,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- if (!try_module_get(cpuidle_driver->owner))
+ if (!try_module_get(drv->owner))
return -EINVAL;
- init_completion(&dev->kobj_unregister);
-
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- ret = cpuidle_add_sysfs(cpu_dev);
+ ret = cpuidle_add_sysfs(dev);
if (ret)
goto err_sysfs;
@@ -416,12 +417,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
return 0;
err_coupled:
- cpuidle_remove_sysfs(cpu_dev);
- wait_for_completion(&dev->kobj_unregister);
+ cpuidle_remove_sysfs(dev);
err_sysfs:
list_del(&dev->device_list);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
return ret;
}
@@ -460,8 +460,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
if (dev->registered == 0)
return;
@@ -470,16 +469,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(cpu_dev);
+ cpuidle_remove_sysfs(dev);
list_del(&dev->device_list);
- wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
cpuidle_coupled_unregister_device(dev);
cpuidle_resume_and_unlock();
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 76e7f696ad8c..ee97e9672ecf 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,8 +5,6 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/device.h>
-
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct list_head cpuidle_governors;
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
+
+struct device;
+
extern int cpuidle_add_interface(struct device *dev);
extern void cpuidle_remove_interface(struct device *dev);
-extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
-extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct device *dev);
-extern void cpuidle_remove_sysfs(struct device *dev);
+extern int cpuidle_add_device_sysfs(struct cpuidle_device *device);
+extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device);
+extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
+extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 87db3877fead..422c7b69ba7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,37 +14,17 @@
#include "cpuidle.h"
-static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
-int cpuidle_driver_refcount;
-
-static void set_power_states(struct cpuidle_driver *drv)
-{
- int i;
-
- /*
- * cpuidle driver should set the drv->power_specified bit
- * before registering if the driver provides
- * power_usage numbers.
- *
- * If power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
- drv->states[i].power_usage = -1 - i;
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
+static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
+
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ drv->refcnt = 0;
}
-/**
- * cpuidle_register_driver - registers a driver
- * @drv: the driver
- */
-int cpuidle_register_driver(struct cpuidle_driver *drv)
+static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
{
if (!drv || !drv->state_count)
return -EINVAL;
@@ -52,31 +32,101 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver) {
- spin_unlock(&cpuidle_driver_lock);
+ if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY;
+
+ __cpuidle_driver_init(drv);
+
+ __cpuidle_set_cpu_driver(drv, cpu);
+
+ return 0;
+}
+
+static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
+{
+ if (drv != __cpuidle_get_cpu_driver(cpu))
+ return;
+
+ if (!WARN_ON(drv->refcnt > 0))
+ __cpuidle_set_cpu_driver(NULL, cpu);
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+
+static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers);
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ per_cpu(cpuidle_drivers, cpu) = drv;
+}
+
+static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
+{
+ return per_cpu(cpuidle_drivers, cpu);
+}
+
+static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ __cpuidle_unregister_driver(drv, cpu);
+}
+
+static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int ret = 0;
+ int i, cpu;
+
+ for_each_present_cpu(cpu) {
+ ret = __cpuidle_register_driver(drv, cpu);
+ if (ret)
+ break;
}
- if (!drv->power_specified)
- set_power_states(drv);
+ if (ret)
+ for_each_present_cpu(i) {
+ if (i == cpu)
+ break;
+ __cpuidle_unregister_driver(drv, i);
+ }
- cpuidle_curr_driver = drv;
+ return ret;
+}
+
+int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
- return 0;
+ return ret;
+}
+
+void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
}
-EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
- * cpuidle_get_driver - return the current driver
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
*/
-struct cpuidle_driver *cpuidle_get_driver(void)
+int cpuidle_register_driver(struct cpuidle_driver *drv)
{
- return cpuidle_curr_driver;
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
@@ -84,20 +134,88 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver);
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (drv != cpuidle_curr_driver) {
- WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
- drv->name);
- return;
- }
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+
+#else
+
+static struct cpuidle_driver *cpuidle_curr_driver;
+
+static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ cpuidle_curr_driver = drv;
+}
+
+static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
+{
+ return cpuidle_curr_driver;
+}
+
+/**
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
+ */
+int cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+ int ret, cpu;
+ cpu = get_cpu();
spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register_driver);
- if (!WARN_ON(cpuidle_driver_refcount > 0))
- cpuidle_curr_driver = NULL;
+/**
+ * cpuidle_unregister_driver - unregisters a driver
+ * @drv: the driver
+ */
+void cpuidle_unregister_driver(struct cpuidle_driver *drv)
+{
+ int cpu;
+ cpu = get_cpu();
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+#endif
+
+/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ struct cpuidle_driver *drv;
+ int cpu;
+
+ cpu = get_cpu();
+ drv = __cpuidle_get_cpu_driver(cpu);
+ put_cpu();
+
+ return drv;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
+ * cpuidle_get_cpu_driver - return the driver tied with a cpu
+ */
+struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
+{
+ if (!dev)
+ return NULL;
+
+ return __cpuidle_get_cpu_driver(dev->cpu);
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
struct cpuidle_driver *cpuidle_driver_ref(void)
{
@@ -105,8 +223,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
spin_lock(&cpuidle_driver_lock);
- drv = cpuidle_curr_driver;
- cpuidle_driver_refcount++;
+ drv = cpuidle_get_driver();
+ drv->refcnt++;
spin_unlock(&cpuidle_driver_lock);
return drv;
@@ -114,10 +232,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
void cpuidle_driver_unref(void)
{
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+
spin_lock(&cpuidle_driver_lock);
- if (!WARN_ON(cpuidle_driver_refcount <= 0))
- cpuidle_driver_refcount--;
+ if (drv && !WARN_ON(drv->refcnt <= 0))
+ drv->refcnt--;
spin_unlock(&cpuidle_driver_lock);
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 5b1f2c372c1f..fe343a06b7da 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,6 +28,13 @@
#define MAX_INTERESTING 50000
#define STDDEV_THRESH 400
+/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
+#define MAX_DEVIATION 60
+
+static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
+static DEFINE_PER_CPU(int, hrtimer_status);
+/* menu hrtimer mode */
+enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/*
* Concepts and ideas behind the menu governor
@@ -109,6 +116,13 @@
*
*/
+/*
+ * The C-state residency is so long that is is worthwhile to exit
+ * from the shallow C-state and re-enter into a deeper C-state.
+ */
+static unsigned int perfect_cstate_ms __read_mostly = 30;
+module_param(perfect_cstate_ms, uint, 0000);
+
struct menu_device {
int last_state_idx;
int needs_update;
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/* Cancel the hrtimer if it is not triggered yet */
+void menu_hrtimer_cancel(void)
+{
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
+
+ /* The timer is still not time out*/
+ if (per_cpu(hrtimer_status, cpu)) {
+ hrtimer_cancel(hrtmr);
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+ }
+}
+EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
+
+/* Call back for hrtimer is triggered */
+static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ int cpu = smp_processor_id();
+ struct menu_device *data = &per_cpu(menu_devices, cpu);
+
+ /* In general case, the expected residency is much larger than
+ * deepest C-state target residency, but prediction logic still
+ * predicts a small predicted residency, so the prediction
+ * history is totally broken if the timer is triggered.
+ * So reset the correction factor.
+ */
+ if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
+
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+
+ return HRTIMER_NORESTART;
+}
+
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static void detect_repeating_patterns(struct menu_device *data)
+static u32 get_typical_interval(struct menu_device *data)
{
- int i;
- uint64_t avg = 0;
- uint64_t stddev = 0; /* contains the square of the std deviation */
-
- /* first calculate average and standard deviation of the past */
- for (i = 0; i < INTERVALS; i++)
- avg += data->intervals[i];
- avg = avg / INTERVALS;
-
- /* if the avg is beyond the known next tick, it's worthless */
- if (avg > data->expected_us)
- return;
+ int i = 0, divisor = 0;
+ uint64_t max = 0, avg = 0, stddev = 0;
+ int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
+ unsigned int ret = 0;
- for (i = 0; i < INTERVALS; i++)
- stddev += (data->intervals[i] - avg) *
- (data->intervals[i] - avg);
+again:
- stddev = stddev / INTERVALS;
+ /* first calculate average and standard deviation of the past */
+ max = avg = divisor = stddev = 0;
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ avg += value;
+ divisor++;
+ if (value > max)
+ max = value;
+ }
+ }
+ do_div(avg, divisor);
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ int64_t diff = value - avg;
+ stddev += diff * diff;
+ }
+ }
+ do_div(stddev, divisor);
+ stddev = int_sqrt(stddev);
/*
- * now.. if stddev is small.. then assume we have a
- * repeating pattern and predict we keep doing this.
+ * If we have outliers to the upside in our distribution, discard
+ * those by setting the threshold to exclude these outliers, then
+ * calculate the average and standard deviation again. Once we get
+ * down to the bottom 3/4 of our samples, stop excluding samples.
+ *
+ * This can deal with workloads that have long pauses interspersed
+ * with sporadic activity with a bunch of short pauses.
+ *
+ * The typical interval is obtained when standard deviation is small
+ * or standard deviation is small compared to the average interval.
*/
-
- if (avg && stddev < STDDEV_THRESH)
+ if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
+ || stddev <= 20) {
data->predicted_us = avg;
+ ret = 1;
+ return ret;
+
+ } else if ((divisor * 4) > INTERVALS * 3) {
+ /* Exclude the max interval */
+ thresh = max - 1;
+ goto again;
+ }
+
+ return ret;
}
/**
@@ -236,10 +312,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- int power_usage = -1;
int i;
int multiplier;
struct timespec t;
+ int repeat = 0, low_predicted = 0;
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) {
menu_update(drv, dev);
@@ -274,7 +352,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- detect_repeating_patterns(data);
+ repeat = get_typical_interval(data);
/*
* We want to default to C1 (hlt), not to busy polling
@@ -295,18 +373,55 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
- if (s->target_residency > data->predicted_us)
+ if (s->target_residency > data->predicted_us) {
+ low_predicted = 1;
continue;
+ }
if (s->exit_latency > latency_req)
continue;
if (s->exit_latency * multiplier > data->predicted_us)
continue;
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
+ }
+
+ /* not deepest C-state chosen for low predicted residency */
+ if (low_predicted) {
+ unsigned int timer_us = 0;
+ unsigned int perfect_us = 0;
+
+ /*
+ * Set a timer to detect whether this sleep is much
+ * longer than repeat mode predicted. If the timer
+ * triggers, the code will evaluate whether to put
+ * the CPU into a deeper C-state.
+ * The timer is cancelled on CPU wakeup.
+ */
+ timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
+
+ perfect_us = perfect_cstate_ms * 1000;
+
+ if (repeat && (4 * timer_us < data->expected_us)) {
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In repeat case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
+ } else if (perfect_us < data->expected_us) {
+ /*
+ * The next timer is long. This could be because
+ * we did not make a useful prediction.
+ * In that case, it makes sense to re-enter
+ * into a deeper C-state after some time.
+ */
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In general case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
}
+
}
return data->last_state_idx;
@@ -399,6 +514,9 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
+ struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device));
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 5f809e337b89..428754af6236 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/capability.h>
+#include <linux/device.h>
#include "cpuidle.h"
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = {
NULL
};
+struct cpuidle_state_kobj {
+ struct cpuidle_state *state;
+ struct cpuidle_state_usage *state_usage;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
@@ -356,14 +364,14 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
}
/**
- * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes
+ * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes
* @device: the target device
*/
-int cpuidle_add_state_sysfs(struct cpuidle_device *device)
+static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
for (i = 0; i < device->state_count; i++) {
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
- ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
- "state%d", i);
+ ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
+ &device->kobj, "state%d", i);
if (ret) {
kfree(kobj);
goto error_state;
@@ -393,10 +401,10 @@ error_state:
}
/**
- * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes
+ * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
* @device: the target device
*/
-void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
+static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
int i;
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
cpuidle_free_state_kobj(device, i);
}
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+#define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj)
+#define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr)
+
+#define define_one_driver_ro(_name, show) \
+ static struct cpuidle_driver_attr attr_driver_##_name = \
+ __ATTR(_name, 0644, show, NULL)
+
+struct cpuidle_driver_kobj {
+ struct cpuidle_driver *drv;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
+struct cpuidle_driver_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpuidle_driver *, char *);
+ ssize_t (*store)(struct cpuidle_driver *, const char *, size_t);
+};
+
+static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
+{
+ ssize_t ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
+}
+
+static void cpuidle_driver_sysfs_release(struct kobject *kobj)
+{
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ complete(&driver_kobj->kobj_unregister);
+}
+
+static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
+ char * buf)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->show)
+ ret = dattr->show(driver_kobj->drv, buf);
+
+ return ret;
+}
+
+static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->store)
+ ret = dattr->store(driver_kobj->drv, buf, size);
+
+ return ret;
+}
+
+define_one_driver_ro(name, show_driver_name);
+
+static const struct sysfs_ops cpuidle_driver_sysfs_ops = {
+ .show = cpuidle_driver_show,
+ .store = cpuidle_driver_store,
+};
+
+static struct attribute *cpuidle_driver_default_attrs[] = {
+ &attr_driver_name.attr,
+ NULL
+};
+
+static struct kobj_type ktype_driver_cpuidle = {
+ .sysfs_ops = &cpuidle_driver_sysfs_ops,
+ .default_attrs = cpuidle_driver_default_attrs,
+ .release = cpuidle_driver_sysfs_release,
+};
+
+/**
+ * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
+ * @device: the target device
+ */
+static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv;
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int ret;
+
+ kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL);
+ if (!kdrv)
+ return -ENOMEM;
+
+ kdrv->drv = drv;
+ init_completion(&kdrv->kobj_unregister);
+
+ ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
+ &dev->kobj, "driver");
+ if (ret) {
+ kfree(kdrv);
+ return ret;
+ }
+
+ kobject_uevent(&kdrv->kobj, KOBJ_ADD);
+ dev->kobj_driver = kdrv;
+
+ return ret;
+}
+
+/**
+ * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
+ * @device: the target device
+ */
+static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv = dev->kobj_driver;
+ kobject_put(&kdrv->kobj);
+ wait_for_completion(&kdrv->kobj_unregister);
+ kfree(kdrv);
+}
+#else
+static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ ;
+}
+#endif
+
+/**
+ * cpuidle_add_device_sysfs - adds device specific sysfs attributes
+ * @device: the target device
+ */
+int cpuidle_add_device_sysfs(struct cpuidle_device *device)
+{
+ int ret;
+
+ ret = cpuidle_add_state_sysfs(device);
+ if (ret)
+ return ret;
+
+ ret = cpuidle_add_driver_sysfs(device);
+ if (ret)
+ cpuidle_remove_state_sysfs(device);
+ return ret;
+}
+
+/**
+ * cpuidle_remove_device_sysfs : removes device specific sysfs attributes
+ * @device : the target device
+ */
+void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
+{
+ cpuidle_remove_driver_sysfs(device);
+ cpuidle_remove_state_sysfs(device);
+}
+
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
* @dev: the target device
*/
-int cpuidle_add_sysfs(struct device *cpu_dev)
+int cpuidle_add_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
- dev = per_cpu(cpuidle_devices, cpu);
+ init_completion(&dev->kobj_unregister);
+
error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev)
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
* @dev: the target device
*/
-void cpuidle_remove_sysfs(struct device *cpu_dev)
+void cpuidle_remove_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
-
- dev = per_cpu(cpuidle_devices, cpu);
kobject_put(&dev->kobj);
+ wait_for_completion(&dev->kobj_unregister);
}