summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig57
-rw-r--r--drivers/cpufreq/Kconfig.arm8
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c72
-rw-r--r--drivers/cpufreq/cpufreq.c56
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c11
-rw-r--r--drivers/cpufreq/intel_pstate.c146
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c223
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c7
13 files changed, 510 insertions, 92 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 3489f8f5fada..29b2ef5a68b9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
config CPU_FREQ_DEFAULT_GOV_POWERSAVE
bool "powersave"
- depends on EXPERT
select CPU_FREQ_GOV_POWERSAVE
help
Use the CPUFreq governor 'powersave' as default. This sets
@@ -183,6 +182,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
+comment "CPU frequency scaling drivers"
+
config CPUFREQ_DT
tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF
@@ -196,19 +197,19 @@ config CPUFREQ_DT
If in doubt, say N.
-menu "x86 CPU frequency scaling drivers"
-depends on X86
+if X86
source "drivers/cpufreq/Kconfig.x86"
-endmenu
+endif
-menu "ARM CPU frequency scaling drivers"
-depends on ARM || ARM64
+if ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
-endmenu
+endif
-menu "AVR32 CPU frequency scaling drivers"
-depends on AVR32
+if PPC32 || PPC64
+source "drivers/cpufreq/Kconfig.powerpc"
+endif
+if AVR32
config AVR32_AT32AP_CPUFREQ
bool "CPU frequency driver for AT32AP"
depends on PLATFORM_AT32AP
@@ -216,12 +217,9 @@ config AVR32_AT32AP_CPUFREQ
help
This enables the CPU frequency driver for AT32AP processors.
If in doubt, say N.
+endif
-endmenu
-
-menu "CPUFreq processor drivers"
-depends on IA64
-
+if IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
depends on ACPI_PROCESSOR
@@ -232,12 +230,9 @@ config IA64_ACPI_CPUFREQ
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
+endif
-endmenu
-
-menu "MIPS CPUFreq processor drivers"
-depends on MIPS
-
+if MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
help
@@ -250,15 +245,18 @@ config LOONGSON2_CPUFREQ
If in doubt, say N.
-endmenu
+config LOONGSON1_CPUFREQ
+ tristate "Loongson1 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for loongson1 processors which
+ support software configurable cpu frequency.
-menu "PowerPC CPU frequency scaling drivers"
-depends on PPC32 || PPC64
-source "drivers/cpufreq/Kconfig.powerpc"
-endmenu
+ For details, take a look at <file:Documentation/cpu-freq/>.
-menu "SPARC CPU frequency scaling drivers"
-depends on SPARC64
+ If in doubt, say N.
+endif
+
+if SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
help
@@ -276,10 +274,9 @@ config SPARC_US2E_CPUFREQ
For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
-endmenu
+endif
-menu "SH CPU Frequency scaling"
-depends on SUPERH
+if SUPERH
config SH_CPU_FREQ
tristate "SuperH CPU Frequency driver"
help
@@ -293,7 +290,7 @@ config SH_CPU_FREQ
For details, take a look at <file:Documentation/cpu-freq>.
If unsure, say N.
-endmenu
+endif
endif
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 83a75dc84761..0f9a2c3c0e0d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -247,3 +247,11 @@ config ARM_TEGRA_CPUFREQ
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_PXA2xx_CPUFREQ
+ tristate "Intel PXA2xx CPUfreq driver"
+ depends on PXA27x || PXA25x
+ help
+ This add the CPUFreq driver support for Intel PXA2xx SOCs.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..b3ca7b0b2c33 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -61,8 +61,7 @@ obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
-obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
-obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
+obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
@@ -98,6 +97,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index a46c223c2506..e1a6ba66a7f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -289,6 +289,8 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (arm_bL_ops->free_opp_table)
+ arm_bL_ops->free_opp_table(cpu_dev);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
@@ -337,7 +339,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
if (ret) {
dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto out;
+ goto free_opp_table;
}
name[12] = cluster + '0';
@@ -354,6 +356,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
ret = PTR_ERR(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+free_opp_table:
+ if (arm_bL_ops->free_opp_table)
+ arm_bL_ops->free_opp_table(cpu_dev);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..a211f7db9d32 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -25,13 +25,16 @@
struct cpufreq_arm_bL_ops {
char name[CPUFREQ_NAME_LEN];
- int (*get_transition_latency)(struct device *cpu_dev);
/*
* This must set opp table for cpu_dev in a similar way as done by
* of_init_opp_table().
*/
int (*init_opp_table)(struct device *cpu_dev);
+
+ /* Optional */
+ int (*get_transition_latency)(struct device *cpu_dev);
+ void (*free_opp_table)(struct device *cpu_dev);
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index efa8efae50d1..36d91dba2965 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -82,6 +82,7 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
.init_opp_table = dt_init_opp_table,
+ .free_opp_table = of_free_opp_table,
};
static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 1d2990d7f365..f56147a1daed 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
+ unsigned long opp_freq;
+
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
@@ -67,13 +69,16 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
+ opp_freq = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
tol = volt * priv->voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
+ dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
+ opp_freq / 1000, volt);
}
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
@@ -89,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg))
+ if (!IS_ERR(cpu_reg) && volt_old > 0)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
return ret;
}
@@ -166,8 +171,8 @@ try_again:
if (ret == -EPROBE_DEFER)
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
- cpu);
+ dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+ ret);
} else {
*cdev = cpu_dev;
*creg = cpu_reg;
@@ -181,7 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_dt_platform_data *pd;
struct cpufreq_frequency_table *freq_table;
- struct thermal_cooling_device *cdev;
struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
@@ -193,7 +197,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
if (ret) {
- pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
+ pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
return ret;
}
@@ -210,7 +214,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- goto out_put_node;
+ goto out_free_opp;
}
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -264,20 +268,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_priv;
}
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
- if (IS_ERR(cdev))
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
- else
- priv->cdev = cdev;
- }
-
priv->cpu_dev = cpu_dev;
priv->cpu_reg = cpu_reg;
policy->driver_data = priv;
@@ -287,7 +277,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
ret);
- goto out_cooling_unregister;
+ goto out_free_cpufreq_table;
}
policy->cpuinfo.transition_latency = transition_latency;
@@ -300,12 +290,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
return 0;
-out_cooling_unregister:
- cpufreq_cooling_unregister(priv->cdev);
+out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_priv:
kfree(priv);
-out_put_node:
+out_free_opp:
+ of_free_opp_table(cpu_dev);
of_node_put(np);
out_put_reg_clk:
clk_put(cpu_clk);
@@ -319,8 +309,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- cpufreq_cooling_unregister(priv->cdev);
+ if (priv->cdev)
+ cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ of_free_opp_table(priv->cpu_dev);
clk_put(policy->clk);
if (!IS_ERR(priv->cpu_reg))
regulator_put(priv->cpu_reg);
@@ -329,6 +321,33 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
+static void cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ struct device_node *np = of_node_get(priv->cpu_dev->of_node);
+
+ if (WARN_ON(!np))
+ return;
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ priv->cdev = of_cpufreq_cooling_register(np,
+ policy->related_cpus);
+ if (IS_ERR(priv->cdev)) {
+ dev_err(priv->cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(priv->cdev));
+
+ priv->cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+}
+
static struct cpufreq_driver dt_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
@@ -336,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = cpufreq_init,
.exit = cpufreq_exit,
+ .ready = cpufreq_ready,
.name = "cpufreq-dt",
.attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 644b54e1e7d1..a09a29c312a9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
- int ret; \
+ int ret, temp; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -546,8 +546,10 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
+ temp = new_policy.object; \
ret = cpufreq_set_policy(policy, &new_policy); \
- policy->user_policy.object = policy->object; \
+ if (!ret) \
+ policy->user_policy.object = temp; \
\
return ret ? ret : count; \
}
@@ -898,46 +900,31 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct freq_attr **drv_attr;
int ret = 0;
- /* prepare interface data */
- ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &dev->kobj, "cpufreq");
- if (ret)
- return ret;
-
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret)
- goto err_out_kobj_put;
+ return ret;
drv_attr++;
}
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret)
- goto err_out_kobj_put;
+ return ret;
}
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
- goto err_out_kobj_put;
+ return ret;
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
- goto err_out_kobj_put;
+ return ret;
}
- ret = cpufreq_add_dev_symlink(policy);
- if (ret)
- goto err_out_kobj_put;
-
- return ret;
-
-err_out_kobj_put:
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
- return ret;
+ return cpufreq_add_dev_symlink(policy);
}
static void cpufreq_init_policy(struct cpufreq_policy *policy)
@@ -1022,7 +1009,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy->governor = NULL;
+ if (policy)
+ policy->governor = NULL;
return policy;
}
@@ -1195,6 +1183,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_set_policy_cpu;
}
+ down_write(&policy->rwsem);
+
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1207,9 +1197,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
if (!recover_policy) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
+
+ /* prepare interface data */
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ &dev->kobj, "cpufreq");
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n",
+ __func__, ret);
+ goto err_init_policy_kobj;
+ }
}
- down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
@@ -1287,8 +1285,13 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
+
up_read(&cpufreq_rwsem);
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
pr_debug("initialization complete\n");
return 0;
@@ -1300,6 +1303,11 @@ err_get_freq:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ if (!recover_policy) {
+ kobject_put(&policy->kobj);
+ wait_for_completion(&policy->kobj_unregister);
+ }
+err_init_policy_kobj:
up_write(&policy->rwsem);
if (cpufreq_driver->exit)
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 3e7a03620f37..21a90ed7f3d8 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -371,7 +371,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
if (ret) {
dev_err(dvfs_info->dev,
"failed to init cpufreq table: %d\n", ret);
- goto err_put_node;
+ goto err_free_opp;
}
dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
exynos_sort_descend_freq_table();
@@ -423,6 +423,8 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
err_free_table:
dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+err_free_opp:
+ of_free_opp_table(dvfs_info->dev);
err_put_node:
of_node_put(np);
dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -433,6 +435,7 @@ static int exynos_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&exynos_driver);
dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ of_free_opp_table(dvfs_info->dev);
return 0;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index b2d0bc137f34..380a90d3c57e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -31,6 +31,7 @@ static struct clk *step_clk;
static struct clk *pll2_pfd2_396m_clk;
static struct device *cpu_dev;
+static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
@@ -207,11 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
- goto put_reg;
+ goto out_free_opp;
}
}
@@ -306,6 +310,9 @@ soc_opp_out:
free_freq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_opp:
+ if (free_opp)
+ of_free_opp_table(cpu_dev);
put_reg:
if (!IS_ERR(arm_reg))
regulator_put(arm_reg);
@@ -332,6 +339,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+ if (free_opp)
+ of_free_opp_table(cpu_dev);
regulator_put(arm_reg);
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..1405b393c93d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
+static int hwp_active;
struct perf_limits {
int no_turbo;
@@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+#define PCT_TO_HWP(x) (x * 255 / 100)
+static void intel_pstate_hwp_set(void)
+{
+ int min, max, cpu;
+ u64 value, freq;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ min = PCT_TO_HWP(limits.min_perf_pct);
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(min);
+
+ max = PCT_TO_HWP(limits.max_perf_pct);
+ if (limits.no_turbo) {
+ rdmsrl( MSR_HWP_CAPABILITIES, freq);
+ max = HWP_GUARANTEED_PERF(freq);
+ }
+
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(max);
+ wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ }
+
+ put_online_cpus();
+}
+
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
@@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
struct dentry *debugfs_parent;
int i = 0;
+ if (hwp_active)
+ return;
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
if (IS_ERR_OR_NULL(debugfs_parent))
return;
@@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
return -EPERM;
}
+
limits.no_turbo = clamp_t(int, input, 0, 1);
+ if (hwp_active)
+ intel_pstate_hwp_set();
+
return count;
}
@@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+ if (hwp_active)
+ intel_pstate_hwp_set();
return count;
}
@@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits.min_perf_pct = clamp_t(int, input, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+ if (hwp_active)
+ intel_pstate_hwp_set();
return count;
}
@@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
BUG_ON(rc);
}
-
/************************** sysfs end ************************/
+
+static void intel_pstate_hwp_enable(void)
+{
+ hwp_active++;
+ pr_info("intel_pstate HWP enabled\n");
+
+ wrmsrl( MSR_PM_ENABLE, 0x1);
+}
+
static int byt_get_min_pstate(void)
{
u64 value;
@@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
cpu->prev_mperf = mperf;
}
+static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
+{
+ int delay;
+
+ delay = msecs_to_jiffies(50);
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
int delay;
@@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
}
+static void intel_hwp_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+
+ intel_pstate_sample(cpu);
+ intel_hwp_set_sample_time(cpu);
+}
+
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
@@ -730,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x3f, core_params),
ICPU(0x45, core_params),
ICPU(0x46, core_params),
+ ICPU(0x47, core_params),
ICPU(0x4c, byt_params),
ICPU(0x4f, core_params),
ICPU(0x56, core_params),
@@ -737,6 +801,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
+ ICPU(0x56, core_params),
+ {}
+};
+
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -753,9 +822,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer);
- cpu->timer.function = intel_pstate_timer_func;
cpu->timer.data = (unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
+
+ if (!hwp_active)
+ cpu->timer.function = intel_pstate_timer_func;
+ else
+ cpu->timer.function = intel_hwp_timer_func;
+
intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu);
@@ -792,6 +866,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.no_turbo = 0;
return 0;
}
+
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +876,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+ if (hwp_active)
+ intel_pstate_hwp_set();
+
return 0;
}
@@ -823,6 +901,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_info("intel_pstate CPU %d exiting\n", cpu_num);
del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ if (hwp_active)
+ return;
+
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
@@ -866,6 +947,7 @@ static struct cpufreq_driver intel_pstate_driver = {
};
static int __initdata no_load;
+static int __initdata no_hwp;
static int intel_pstate_msrs_not_valid(void)
{
@@ -943,15 +1025,46 @@ static bool intel_pstate_no_acpi_pss(void)
return true;
}
+static bool intel_pstate_has_acpi_ppc(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+ if (acpi_has_method(pr->handle, "_PPC"))
+ return true;
+ }
+ return false;
+}
+
+enum {
+ PSS,
+ PPC,
+};
+
struct hw_vendor_info {
u16 valid;
char oem_id[ACPI_OEM_ID_SIZE];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+ int oem_pwr_table;
};
/* Hardware vendor-specific info that has its own power management modes */
static struct hw_vendor_info vendor_info[] = {
- {1, "HP ", "ProLiant"},
+ {1, "HP ", "ProLiant", PSS},
+ {1, "ORACLE", "X4-2 ", PPC},
+ {1, "ORACLE", "X4-2L ", PPC},
+ {1, "ORACLE", "X4-2B ", PPC},
+ {1, "ORACLE", "X3-2 ", PPC},
+ {1, "ORACLE", "X3-2L ", PPC},
+ {1, "ORACLE", "X3-2B ", PPC},
+ {1, "ORACLE", "X4470M2 ", PPC},
+ {1, "ORACLE", "X4270M3 ", PPC},
+ {1, "ORACLE", "X4270M2 ", PPC},
+ {1, "ORACLE", "X4170M2 ", PPC},
{0, "", ""},
};
@@ -959,6 +1072,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
{
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
+ const struct x86_cpu_id *id;
+ u64 misc_pwr;
+
+ id = x86_match_cpu(intel_pstate_cpu_oob_ids);
+ if (id) {
+ rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ if ( misc_pwr & (1 << 8))
+ return true;
+ }
if (acpi_disabled ||
ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -966,15 +1088,21 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
for (v_info = vendor_info; v_info->valid; v_info++) {
if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
- !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- intel_pstate_no_acpi_pss())
- return true;
+ !strncmp(hdr.oem_table_id, v_info->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE))
+ switch (v_info->oem_pwr_table) {
+ case PSS:
+ return intel_pstate_no_acpi_pss();
+ case PPC:
+ return intel_pstate_has_acpi_ppc();
+ }
}
return false;
}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
#endif /* CONFIG_ACPI */
static int __init intel_pstate_init(void)
@@ -982,6 +1110,7 @@ static int __init intel_pstate_init(void)
int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_info;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
if (no_load)
return -ENODEV;
@@ -1011,6 +1140,9 @@ static int __init intel_pstate_init(void)
if (!all_cpu_data)
return -ENOMEM;
+ if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
+ intel_pstate_hwp_enable();
+
rc = cpufreq_register_driver(&intel_pstate_driver);
if (rc)
goto out;
@@ -1041,6 +1173,8 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable"))
no_load = 1;
+ if (!strcmp(str, "no_hwp"))
+ no_hwp = 1;
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000000..25fbd6a1374f
--- /dev/null
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,223 @@
+/*
+ * CPU Frequency Scaling for Loongson 1 SoC
+ *
+ * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/mach-loongson1/cpufreq.h>
+#include <asm/mach-loongson1/loongson1.h>
+
+static struct {
+ struct device *dev;
+ struct clk *clk; /* CPU clk */
+ struct clk *mux_clk; /* MUX of CPU clk */
+ struct clk *pll_clk; /* PLL clk */
+ struct clk *osc_clk; /* OSC clk */
+ unsigned int max_freq;
+ unsigned int min_freq;
+} ls1x_cpufreq;
+
+static int ls1x_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ls1x_cpufreq_notifier_block = {
+ .notifier_call = ls1x_cpufreq_notifier
+};
+
+static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = policy->freq_table[index].frequency;
+
+ /*
+ * The procedure of reconfiguring CPU clk is as below.
+ *
+ * - Reparent CPU clk to OSC clk
+ * - Reset CPU clock (very important)
+ * - Reconfigure CPU DIV
+ * - Reparent CPU clk back to CPU DIV clk
+ */
+
+ dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
+ clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
+ LS1X_CLK_PLL_DIV);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
+ LS1X_CLK_PLL_DIV);
+ clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
+ clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_tbl;
+ unsigned int pll_freq, freq;
+ int steps, i, ret;
+
+ pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
+
+ steps = 1 << DIV_CPU_WIDTH;
+ freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
+ if (!freq_tbl) {
+ dev_err(ls1x_cpufreq.dev,
+ "failed to alloc cpufreq_frequency_table\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < (steps - 1); i++) {
+ freq = pll_freq / (i + 1);
+ if ((freq < ls1x_cpufreq.min_freq) ||
+ (freq > ls1x_cpufreq.max_freq))
+ freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_tbl[i].frequency = freq;
+ dev_dbg(ls1x_cpufreq.dev,
+ "cpufreq table: index %d: frequency %d\n", i,
+ freq_tbl[i].frequency);
+ }
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ policy->clk = ls1x_cpufreq.clk;
+ ret = cpufreq_generic_init(policy, freq_tbl, 0);
+ if (ret)
+ kfree(freq_tbl);
+out:
+ return ret;
+}
+
+static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+ return 0;
+}
+
+static struct cpufreq_driver ls1x_cpufreq_driver = {
+ .name = "cpufreq-ls1x",
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ls1x_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = ls1x_cpufreq_init,
+ .exit = ls1x_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int ls1x_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_probe(struct platform_device *pdev)
+{
+ struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
+ struct clk *clk;
+ int ret;
+
+ if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
+ return -EINVAL;
+
+ ls1x_cpufreq.dev = &pdev->dev;
+
+ clk = devm_clk_get(&pdev->dev, pdata->clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ pdata->clk_name);
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+ __clk_get_name(ls1x_cpufreq.clk));
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.mux_clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+ __clk_get_name(ls1x_cpufreq.mux_clk));
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.pll_clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ pdata->osc_clk_name);
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.osc_clk = clk;
+
+ ls1x_cpufreq.max_freq = pdata->max_freq;
+ ls1x_cpufreq.min_freq = pdata->min_freq;
+
+ ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
+ if (ret) {
+ dev_err(ls1x_cpufreq.dev,
+ "failed to register cpufreq driver: %d\n", ret);
+ goto out;
+ }
+
+ ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ if (!ret)
+ goto out;
+
+ dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
+ ret);
+
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+out:
+ return ret;
+}
+
+static struct platform_driver ls1x_cpufreq_platdrv = {
+ .driver = {
+ .name = "ls1x-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = ls1x_cpufreq_probe,
+ .remove = ls1x_cpufreq_remove,
+};
+
+module_platform_driver(ls1x_cpufreq_platdrv);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 4d2c8e861089..2a0d58959acf 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void)
free_percpu(pcc_cpu_info);
}
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
MODULE_VERSION(PCC_VERSION);
MODULE_DESCRIPTION("Processor Clocking Control interface driver");