summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig20
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c256
-rw-r--r--drivers/acpi/acpi_ipmi.c580
-rw-r--r--drivers/acpi/acpi_lpss.c12
-rw-r--r--drivers/acpi/acpi_memhotplug.c7
-rw-r--r--drivers/acpi/acpi_platform.c7
-rw-r--r--drivers/acpi/acpi_processor.c28
-rw-r--r--drivers/acpi/acpica/acdebug.h8
-rw-r--r--drivers/acpi/acpica/acevents.h9
-rw-r--r--drivers/acpi/acpica/acglobal.h20
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acmacros.h31
-rw-r--r--drivers/acpi/acpica/acnamesp.h6
-rw-r--r--drivers/acpi/acpica/acutils.h17
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c5
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c10
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c3
-rw-r--r--drivers/acpi/acpica/evgpeblk.c6
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c4
-rw-r--r--drivers/acpi/acpica/evhandler.c4
-rw-r--r--drivers/acpi/acpica/evmisc.c14
-rw-r--r--drivers/acpi/acpica/evregion.c29
-rw-r--r--drivers/acpi/acpica/evsci.c79
-rw-r--r--drivers/acpi/acpica/evxface.c148
-rw-r--r--drivers/acpi/acpica/evxfevnt.c3
-rw-r--r--drivers/acpi/acpica/evxfgpe.c9
-rw-r--r--drivers/acpi/acpica/evxfregn.c7
-rw-r--r--drivers/acpi/acpica/excreate.c8
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c8
-rw-r--r--drivers/acpi/acpica/exmisc.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c8
-rw-r--r--drivers/acpi/acpica/exoparg2.c10
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exregion.c1
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c3
-rw-r--r--drivers/acpi/acpica/hwxface.c43
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c7
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsdump.c143
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c7
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c3
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c23
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c7
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psxface.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c3
-rw-r--r--drivers/acpi/acpica/tbinstal.c18
-rw-r--r--drivers/acpi/acpica/tbprint.c18
-rw-r--r--drivers/acpi/acpica/tbutils.c5
-rw-r--r--drivers/acpi/acpica/tbxface.c16
-rw-r--r--drivers/acpi/acpica/tbxfload.c11
-rw-r--r--drivers/acpi/acpica/tbxfroot.c5
-rw-r--r--drivers/acpi/acpica/utalloc.c117
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c4
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/acpica/utdecode.c1
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c3
-rw-r--r--drivers/acpi/acpica/utglobal.c20
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utobject.c26
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/acpica/utstate.c1
-rw-r--r--drivers/acpi/acpica/utstring.c66
-rw-r--r--drivers/acpi/acpica/uttrack.c31
-rw-r--r--drivers/acpi/acpica/utxface.c45
-rw-r--r--drivers/acpi/acpica/utxferror.c3
-rw-r--r--drivers/acpi/acpica/utxfinit.c18
-rw-r--r--drivers/acpi/battery.c328
-rw-r--r--drivers/acpi/blacklist.c63
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/device_pm.c8
-rw-r--r--drivers/acpi/dock.c31
-rw-r--r--drivers/acpi/ec.c49
-rw-r--r--drivers/acpi/event.c5
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/internal.h10
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/osl.c144
-rw-r--r--drivers/acpi/pci_root.c51
-rw-r--r--drivers/acpi/proc.c305
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c4
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/acpi/processor_perflib.c22
-rw-r--r--drivers/acpi/sbs.c325
-rw-r--r--drivers/acpi/scan.c176
-rw-r--r--drivers/acpi/sysfs.c18
-rw-r--r--drivers/acpi/thermal.c43
-rw-r--r--drivers/acpi/utils.c21
-rw-r--r--drivers/acpi/video.c461
-rw-r--r--drivers/acpi/video_detect.c12
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/power/main.c73
-rw-r--r--drivers/base/power/opp.c115
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.arm19
-rw-r--r--drivers/cpufreq/Kconfig.powerpc6
-rw-r--r--drivers/cpufreq/Kconfig.x8613
-rw-r--r--drivers/cpufreq/Makefile6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c52
-rw-r--r--drivers/cpufreq/arm_big_little.c453
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c2
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c106
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c54
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c119
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c5
-rw-r--r--drivers/cpufreq/cpufreq.c322
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c7
-rw-r--r--drivers/cpufreq/cpufreq_governor.h5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c64
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c61
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c77
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c78
-rw-r--r--drivers/cpufreq/e_powersaver.c59
-rw-r--r--drivers/cpufreq/elanfreq.c88
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c85
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c67
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c69
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c67
-rw-r--r--drivers/cpufreq/freq_table.c59
-rw-r--r--drivers/cpufreq/gx-suspmod.c5
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c3
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c71
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c117
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c14
-rw-r--r--drivers/cpufreq/intel_pstate.c255
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c107
-rw-r--r--drivers/cpufreq/longhaul.c45
-rw-r--r--drivers/cpufreq/longrun.c4
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c57
-rw-r--r--drivers/cpufreq/maple-cpufreq.c56
-rw-r--r--drivers/cpufreq/omap-cpufreq.c143
-rw-r--r--drivers/cpufreq/p4-clockmod.c53
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c51
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c15
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c53
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c70
-rw-r--r--drivers/cpufreq/powernow-k6.c67
-rw-r--r--drivers/cpufreq/powernow-k7.c42
-rw-r--r--drivers/cpufreq/powernow-k8.c52
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c54
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c50
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c70
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c46
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c67
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c27
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c81
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c86
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c49
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c46
-rw-r--r--drivers/cpufreq/sc520_freq.c64
-rw-r--r--drivers/cpufreq/sh-cpufreq.c22
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c42
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c44
-rw-r--r--drivers/cpufreq/spear-cpufreq.c64
-rw-r--r--drivers/cpufreq/speedstep-centrino.c84
-rw-r--r--drivers/cpufreq/speedstep-ich.c85
-rw-r--r--drivers/cpufreq/speedstep-smi.c76
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c70
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c5
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c70
-rw-r--r--drivers/cpuidle/Kconfig.arm25
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-at91.c69
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c2
-rw-r--r--drivers/cpuidle/cpuidle-zynq.c17
-rw-r--r--drivers/cpuidle/cpuidle.c78
-rw-r--r--drivers/cpuidle/driver.c67
-rw-r--r--drivers/cpuidle/governor.c43
-rw-r--r--drivers/cpuidle/sysfs.c7
-rw-r--r--drivers/devfreq/devfreq.c29
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c29
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c57
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c18
-rw-r--r--drivers/i2c/i2c-core.c5
-rw-r--r--drivers/idle/intel_idle.c18
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel_irq_remapping.c8
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c18
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/platform/x86/eeepc-laptop.c8
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c44
-rw-r--r--drivers/platform/x86/ideapad-laptop.c344
-rw-r--r--drivers/platform/x86/intel-rst.c48
-rw-r--r--drivers/platform/x86/intel-smartconnect.c27
-rw-r--r--drivers/platform/x86/intel_menlow.c8
-rw-r--r--drivers/platform/x86/sony-laptop.c28
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c31
-rw-r--r--drivers/platform/x86/topstar-laptop.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c44
-rw-r--r--drivers/platform/x86/wmi.c30
-rw-r--r--drivers/pnp/pnpacpi/core.c11
-rw-r--r--drivers/powercap/Kconfig32
-rw-r--r--drivers/powercap/Makefile2
-rw-r--r--drivers/powercap/intel_rapl.c1395
-rw-r--r--drivers/powercap/powercap_sys.c685
-rw-r--r--drivers/spi/spi.c18
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/video/backlight/backlight.c31
241 files changed, 6291 insertions, 6076 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8f451449abd3..b3138fbb46a4 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -168,4 +168,6 @@ source "drivers/fmc/Kconfig"
source "drivers/phy/Kconfig"
+source "drivers/powercap/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 687da899cadb..3cc8214f9b26 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -154,3 +154,4 @@ obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_FMC) += fmc/
+obj-$(CONFIG_POWERCAP) += powercap/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e11faae81ed9..c95df0b8c880 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -56,23 +56,6 @@ config ACPI_PROCFS
Say N to delete /proc/acpi/ files that have moved to /sys/
-config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi directories"
- depends on PROC_FS
- help
- For backwards compatibility, this option allows
- deprecated power /proc/acpi/ directories to exist, even when
- they have been replaced by functions in /sys.
- The deprecated directories (and their replacements) include:
- /proc/acpi/battery/* (/sys/class/power_supply/*)
- /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
- This option has no effect on /proc/acpi/ directories
- and functions, which do not yet exist in /sys
- This option, together with the proc directories, will be
- deleted in 2.6.39.
-
- Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR
To compile this driver as a module, choose M here:
the module will be called processor.
+
config ACPI_IPMI
tristate "IPMI"
- depends on IPMI_SI && IPMI_HANDLER
+ depends on IPMI_SI
default n
help
This driver enables the ACPI to access the BMC controller. And it
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bce34afadcd0..0331f91d56e6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,7 +47,6 @@ acpi-y += sysfs.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index f37beaa32750..b9f0d5f4bba5 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,10 +30,7 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
- {"ACPI0003", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
static int ac_sleep_before_get_state_ms;
-static struct acpi_driver acpi_ac_driver = {
- .name = "ac",
- .class = ACPI_AC_CLASS,
- .ids = ac_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = acpi_ac_add,
- .remove = acpi_ac_remove,
- .notify = acpi_ac_notify,
- },
- .drv.pm = &acpi_ac_pm,
-};
-
struct acpi_ac {
struct power_supply charger;
- struct acpi_device * device;
+ struct acpi_device *adev;
+ struct platform_device *pdev;
unsigned long long state;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
- .owner = THIS_MODULE,
- .open = acpi_ac_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
/* --------------------------------------------------------------------------
AC Adapter Management
-------------------------------------------------------------------------- */
static int acpi_ac_get_state(struct acpi_ac *ac)
{
- acpi_status status = AE_OK;
-
-
- if (!ac)
- return -EINVAL;
+ acpi_status status;
- status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+ status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+ &ac->state);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Error reading AC Adapter state"));
ac->state = ACPI_AC_STATUS_UNKNOWN;
return -ENODEV;
}
@@ -160,91 +112,13 @@ static enum power_supply_property ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
- struct acpi_ac *ac = seq->private;
-
-
- if (!ac)
- return 0;
-
- if (acpi_ac_get_state(ac)) {
- seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
- return 0;
- }
-
- seq_puts(seq, "state: ");
- switch (ac->state) {
- case ACPI_AC_STATUS_OFFLINE:
- seq_puts(seq, "off-line\n");
- break;
- case ACPI_AC_STATUS_ONLINE:
- seq_puts(seq, "on-line\n");
- break;
- default:
- seq_puts(seq, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_ac_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- /* 'state' [R] */
- entry = proc_create_data(ACPI_AC_FILE_STATE,
- S_IRUGO, acpi_device_dir(device),
- &acpi_ac_fops, acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
- if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
- acpi_device_dir(device) = NULL;
- }
-
- return 0;
-}
-#endif
-
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct acpi_ac *ac = acpi_driver_data(device);
-
+ struct acpi_ac *ac = data;
if (!ac)
return;
@@ -267,10 +141,10 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event,
- (u32) ac->state);
- acpi_notifier_call_chain(device, event, (u32) ac->state);
+ acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+ dev_name(&ac->pdev->dev),
+ event, (u32) ac->state);
+ acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
}
@@ -295,53 +169,55 @@ static struct dmi_system_id ac_dmi_table[] = {
{},
};
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
{
int result = 0;
struct acpi_ac *ac = NULL;
+ struct acpi_device *adev;
-
- if (!device)
+ if (!pdev)
return -EINVAL;
+ result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (result)
+ return -ENODEV;
+
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
- ac->device = device;
- strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_AC_CLASS);
- device->driver_data = ac;
+ strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+ strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+ ac->adev = adev;
+ ac->pdev = pdev;
+ platform_set_drvdata(pdev, ac);
result = acpi_ac_get_state(ac);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_ac_add_fs(device);
-#endif
- if (result)
- goto end;
- ac->charger.name = acpi_device_bid(device);
+ ac->charger.name = acpi_device_bid(adev);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- result = power_supply_register(&ac->device->dev, &ac->charger);
+ result = power_supply_register(&pdev->dev, &ac->charger);
if (result)
goto end;
+ result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+ if (result) {
+ power_supply_unregister(&ac->charger);
+ goto end;
+ }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
+ acpi_device_name(adev), acpi_device_bid(adev),
ac->state ? "on-line" : "off-line");
- end:
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
+end:
+ if (result)
kfree(ac);
- }
dmi_check_system(ac_dmi_table);
return result;
@@ -356,7 +232,7 @@ static int acpi_ac_resume(struct device *dev)
if (!dev)
return -EINVAL;
- ac = acpi_driver_data(to_acpi_device(dev));
+ ac = platform_get_drvdata(to_platform_device(dev));
if (!ac)
return -EINVAL;
@@ -368,28 +244,44 @@ static int acpi_ac_resume(struct device *dev)
return 0;
}
#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
{
- struct acpi_ac *ac = NULL;
-
+ struct acpi_ac *ac;
- if (!device || !acpi_driver_data(device))
+ if (!pdev)
return -EINVAL;
- ac = acpi_driver_data(device);
+ acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
+ ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_remove_fs(device);
-#endif
kfree(ac);
return 0;
}
+static const struct acpi_device_id acpi_ac_match[] = {
+ { "ACPI0003", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+ .probe = acpi_ac_probe,
+ .remove = acpi_ac_remove,
+ .driver = {
+ .name = "acpi-ac",
+ .owner = THIS_MODULE,
+ .pm = &acpi_ac_pm_ops,
+ .acpi_match_table = ACPI_PTR(acpi_ac_match),
+ },
+};
+
static int __init acpi_ac_init(void)
{
int result;
@@ -397,34 +289,16 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
+ result = platform_driver_register(&acpi_ac_driver);
+ if (result < 0)
return -ENODEV;
-#endif
-
- result = acpi_bus_register_driver(&acpi_ac_driver);
- if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
- return -ENODEV;
- }
return 0;
}
static void __exit acpi_ac_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
- return;
+ platform_driver_unregister(&acpi_ac_driver);
}
-
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index a6977e12d574..ac0f52f6df2b 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -1,8 +1,9 @@
/*
* acpi_ipmi.c - ACPI IPMI opregion
*
- * Copyright (C) 2010 Intel Corporation
- * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ * Copyright (C) 2010, 2013 Intel Corporation
+ * Author: Zhao Yakui <yakui.zhao@intel.com>
+ * Lv Zheng <lv.zheng@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -23,60 +24,58 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
#include <linux/spinlock.h>
MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
MODULE_LICENSE("GPL");
-#define IPMI_FLAGS_HANDLER_INSTALL 0
-
#define ACPI_IPMI_OK 0
#define ACPI_IPMI_TIMEOUT 0x10
#define ACPI_IPMI_UNKNOWN 0x07
/* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT (5 * HZ)
+#define IPMI_TIMEOUT (5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH 64
struct acpi_ipmi_device {
/* the device list attached to driver_data.ipmi_devices */
struct list_head head;
+
/* the IPMI request message list */
struct list_head tx_msg_list;
- spinlock_t tx_msg_lock;
+
+ spinlock_t tx_msg_lock;
acpi_handle handle;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user_interface;
+ struct device *dev;
+ ipmi_user_t user_interface;
int ipmi_ifnum; /* IPMI interface number */
long curr_msgid;
- unsigned long flags;
- struct ipmi_smi_info smi_data;
+ bool dead;
+ struct kref kref;
};
struct ipmi_driver_data {
- struct list_head ipmi_devices;
- struct ipmi_smi_watcher bmc_events;
- struct ipmi_user_hndl ipmi_hndlrs;
- struct mutex ipmi_lock;
+ struct list_head ipmi_devices;
+ struct ipmi_smi_watcher bmc_events;
+ struct ipmi_user_hndl ipmi_hndlrs;
+ struct mutex ipmi_lock;
+
+ /*
+ * NOTE: IPMI System Interface Selection
+ * There is no system interface specified by the IPMI operation
+ * region access. We try to select one system interface with ACPI
+ * handle set. IPMI messages passed from the ACPI codes are sent
+ * to this selected global IPMI system interface.
+ */
+ struct acpi_ipmi_device *selected_smi;
};
struct acpi_ipmi_msg {
struct list_head head;
+
/*
* General speaking the addr type should be SI_ADDR_TYPE. And
* the addr channel should be BMC.
@@ -86,30 +85,31 @@ struct acpi_ipmi_msg {
*/
struct ipmi_addr addr;
long tx_msgid;
+
/* it is used to track whether the IPMI message is finished */
struct completion tx_complete;
+
struct kernel_ipmi_msg tx_message;
- int msg_done;
- /* tx data . And copy it from ACPI object buffer */
- u8 tx_data[64];
- int tx_len;
- u8 rx_data[64];
- int rx_len;
+ int msg_done;
+
+ /* tx/rx data . And copy it from/to ACPI object buffer */
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+ u8 rx_len;
+
struct acpi_ipmi_device *device;
+ struct kref kref;
};
/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
struct acpi_ipmi_buffer {
u8 status;
u8 length;
- u8 data[64];
+ u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
};
static void ipmi_register_bmc(int iface, struct device *dev);
static void ipmi_bmc_gone(int iface);
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
static struct ipmi_driver_data driver_data = {
.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -121,29 +121,142 @@ static struct ipmi_driver_data driver_data = {
.ipmi_hndlrs = {
.ipmi_recv_hndl = ipmi_msg_handler,
},
+ .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
};
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
+{
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
+ ipmi_user_t user;
+
+ ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+ if (!ipmi_device)
+ return NULL;
+
+ kref_init(&ipmi_device->kref);
+ INIT_LIST_HEAD(&ipmi_device->head);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ spin_lock_init(&ipmi_device->tx_msg_lock);
+ ipmi_device->handle = handle;
+ ipmi_device->dev = get_device(dev);
+ ipmi_device->ipmi_ifnum = iface;
+
+ err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+ ipmi_device, &user);
+ if (err) {
+ put_device(dev);
+ kfree(ipmi_device);
+ return NULL;
+ }
+ ipmi_device->user_interface = user;
+
+ return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+ ipmi_destroy_user(ipmi_device->user_interface);
+ put_device(ipmi_device->dev);
+ kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_device *ipmi =
+ container_of(kref, struct acpi_ipmi_device, kref);
+
+ ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+ list_del(&ipmi_device->head);
+ if (driver_data.selected_smi == ipmi_device)
+ driver_data.selected_smi = NULL;
+
+ /*
+ * Always setting dead flag after deleting from the list or
+ * list_for_each_entry() codes must get changed.
+ */
+ ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+ struct acpi_ipmi_device *ipmi_device = NULL;
+
+ mutex_lock(&driver_data.ipmi_lock);
+ if (driver_data.selected_smi) {
+ ipmi_device = driver_data.selected_smi;
+ kref_get(&ipmi_device->kref);
+ }
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+ kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
{
+ struct acpi_ipmi_device *ipmi;
struct acpi_ipmi_msg *ipmi_msg;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+ ipmi = acpi_ipmi_dev_get();
+ if (!ipmi)
+ return NULL;
ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
- if (!ipmi_msg) {
- dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+ if (!ipmi_msg) {
+ acpi_ipmi_dev_put(ipmi);
return NULL;
}
+
+ kref_init(&ipmi_msg->kref);
init_completion(&ipmi_msg->tx_complete);
INIT_LIST_HEAD(&ipmi_msg->head);
ipmi_msg->device = ipmi;
+ ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
return ipmi_msg;
}
-#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
-#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
- acpi_physical_address address,
- acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+ acpi_ipmi_dev_put(tx_msg->device);
+ kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+ struct acpi_ipmi_msg *tx_msg =
+ container_of(kref, struct acpi_ipmi_msg, kref);
+
+ ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_get(&tx_msg->kref);
+
+ return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+ kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+ acpi_physical_address address,
+ acpi_integer *value)
{
struct kernel_ipmi_msg *msg;
struct acpi_ipmi_buffer *buffer;
@@ -151,21 +264,31 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
unsigned long flags;
msg = &tx_msg->tx_message;
+
/*
* IPMI network function and command are encoded in the address
* within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
*/
msg->netfn = IPMI_OP_RGN_NETFN(address);
msg->cmd = IPMI_OP_RGN_CMD(address);
- msg->data = tx_msg->tx_data;
+ msg->data = tx_msg->data;
+
/*
* value is the parameter passed by the IPMI opregion space handler.
* It points to the IPMI request message buffer
*/
buffer = (struct acpi_ipmi_buffer *)value;
+
/* copy the tx message data */
+ if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(tx_msg->device->dev, true,
+ "Unexpected request (msg len %d).\n",
+ buffer->length);
+ return -EINVAL;
+ }
msg->data_len = buffer->length;
- memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+ memcpy(tx_msg->data, buffer->data, msg->data_len);
+
/*
* now the default type is SYSTEM_INTERFACE and channel type is BMC.
* If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -179,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
/* Get the msgid */
device = tx_msg->device;
+
spin_lock_irqsave(&device->tx_msg_lock, flags);
device->curr_msgid++;
tx_msg->tx_msgid = device->curr_msgid;
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+ return 0;
}
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
- acpi_integer *value, int rem_time)
+ acpi_integer *value)
{
struct acpi_ipmi_buffer *buffer;
@@ -195,110 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
* IPMI message returned by IPMI command.
*/
buffer = (struct acpi_ipmi_buffer *)value;
- if (!rem_time && !msg->msg_done) {
- buffer->status = ACPI_IPMI_TIMEOUT;
- return;
- }
+
/*
- * If the flag of msg_done is not set or the recv length is zero, it
- * means that the IPMI command is not executed correctly.
- * The status code will be ACPI_IPMI_UNKNOWN.
+ * If the flag of msg_done is not set, it means that the IPMI command is
+ * not executed correctly.
*/
- if (!msg->msg_done || !msg->rx_len) {
- buffer->status = ACPI_IPMI_UNKNOWN;
+ buffer->status = msg->msg_done;
+ if (msg->msg_done != ACPI_IPMI_OK)
return;
- }
+
/*
* If the IPMI response message is obtained correctly, the status code
* will be ACPI_IPMI_OK
*/
- buffer->status = ACPI_IPMI_OK;
buffer->length = msg->rx_len;
- memcpy(buffer->data, msg->rx_data, msg->rx_len);
+ memcpy(buffer->data, msg->data, msg->rx_len);
}
static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
{
- struct acpi_ipmi_msg *tx_msg, *temp;
- int count = HZ / 10;
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+ struct acpi_ipmi_msg *tx_msg;
+ unsigned long flags;
+
+ /*
+ * NOTE: On-going ipmi_recv_msg
+ * ipmi_msg_handler() may still be invoked by ipmi_si after
+ * flushing. But it is safe to do a fast flushing on module_exit()
+ * without waiting for all ipmi_recv_msg(s) to complete from
+ * ipmi_msg_handler() as it is ensured by ipmi_si that all
+ * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+ */
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ while (!list_empty(&ipmi->tx_msg_list)) {
+ tx_msg = list_first_entry(&ipmi->tx_msg_list,
+ struct acpi_ipmi_msg,
+ head);
+ list_del(&tx_msg->head);
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
- list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
/* wake up the sleep thread on the Tx msg */
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
}
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
+
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+ struct acpi_ipmi_msg *msg)
+{
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ bool msg_found = false;
+ unsigned long flags;
- /* wait for about 100ms to flush the tx message list */
- while (count--) {
- if (list_empty(&ipmi->tx_msg_list))
+ spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+ list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+ if (msg == tx_msg) {
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
- schedule_timeout(1);
+ }
}
- if (!list_empty(&ipmi->tx_msg_list))
- dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+ spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+ if (msg_found)
+ acpi_ipmi_msg_put(tx_msg);
}
static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
struct acpi_ipmi_device *ipmi_device = user_msg_data;
- int msg_found = 0;
- struct acpi_ipmi_msg *tx_msg;
- struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+ bool msg_found = false;
+ struct acpi_ipmi_msg *tx_msg, *temp;
+ struct device *dev = ipmi_device->dev;
unsigned long flags;
if (msg->user != ipmi_device->user_interface) {
- dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
- "returned user %p, expected user %p\n",
- msg->user, ipmi_device->user_interface);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response is returned. returned user %p, expected user %p\n",
+ msg->user, ipmi_device->user_interface);
+ goto out_msg;
}
+
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == tx_msg->tx_msgid) {
- msg_found = 1;
+ msg_found = true;
+ list_del(&tx_msg->head);
break;
}
}
-
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+
if (!msg_found) {
- dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
- "returned.\n", msg->msgid);
- ipmi_free_recv_msg(msg);
- return;
+ dev_warn(dev,
+ "Unexpected response (msg id %ld) is returned.\n",
+ msg->msgid);
+ goto out_msg;
}
- if (msg->msg.data_len) {
- /* copy the response data to Rx_data buffer */
- memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
- tx_msg->rx_len = msg->msg.data_len;
- tx_msg->msg_done = 1;
+ /* copy the response data to Rx_data buffer */
+ if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (msg len %d).\n",
+ msg->msg.data_len);
+ goto out_comp;
}
+
+ /* response msg is an error msg */
+ msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+ msg->msg.data_len == 1) {
+ if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+ dev_WARN_ONCE(dev, true,
+ "Unexpected response (timeout).\n");
+ tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+ }
+ goto out_comp;
+ }
+
+ tx_msg->rx_len = msg->msg.data_len;
+ memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+ tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
complete(&tx_msg->tx_complete);
+ acpi_ipmi_msg_put(tx_msg);
+out_msg:
ipmi_free_recv_msg(msg);
-};
+}
static void ipmi_register_bmc(int iface, struct device *dev)
{
struct acpi_ipmi_device *ipmi_device, *temp;
- struct pnp_dev *pnp_dev;
- ipmi_user_t user;
int err;
struct ipmi_smi_info smi_data;
acpi_handle handle;
err = ipmi_get_smi_info(iface, &smi_data);
-
if (err)
return;
- if (smi_data.addr_src != SI_ACPI) {
- put_device(smi_data.dev);
- return;
- }
-
+ if (smi_data.addr_src != SI_ACPI)
+ goto err_ref;
handle = smi_data.addr_info.acpi_info.acpi_handle;
+ if (!handle)
+ goto err_ref;
+
+ ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+ if (!ipmi_device) {
+ dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+ goto err_ref;
+ }
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -307,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev)
* to the device list, don't add it again.
*/
if (temp->handle == handle)
- goto out;
+ goto err_lock;
}
-
- ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
- if (!ipmi_device)
- goto out;
-
- pnp_dev = to_pnp_dev(smi_data.dev);
- ipmi_device->handle = handle;
- ipmi_device->pnp_dev = pnp_dev;
-
- err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
- ipmi_device, &user);
- if (err) {
- dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
- kfree(ipmi_device);
- goto out;
- }
- acpi_add_ipmi_device(ipmi_device);
- ipmi_device->user_interface = user;
- ipmi_device->ipmi_ifnum = iface;
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = ipmi_device;
+ list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
mutex_unlock(&driver_data.ipmi_lock);
- memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+ put_device(smi_data.dev);
return;
-out:
+err_lock:
mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_dev_release(ipmi_device);
+err_ref:
put_device(smi_data.dev);
return;
}
@@ -342,23 +502,29 @@ out:
static void ipmi_bmc_gone(int iface)
{
struct acpi_ipmi_device *ipmi_device, *temp;
+ bool dev_found = false;
mutex_lock(&driver_data.ipmi_lock);
list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- if (ipmi_device->ipmi_ifnum != iface)
- continue;
-
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
- break;
+ &driver_data.ipmi_devices, head) {
+ if (ipmi_device->ipmi_ifnum != iface) {
+ dev_found = true;
+ __ipmi_dev_kill(ipmi_device);
+ break;
+ }
}
+ if (!driver_data.selected_smi)
+ driver_data.selected_smi = list_first_entry_or_null(
+ &driver_data.ipmi_devices,
+ struct acpi_ipmi_device, head);
mutex_unlock(&driver_data.ipmi_lock);
+
+ if (dev_found) {
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+ }
}
-/* --------------------------------------------------------------------------
- * Address Space Management
- * -------------------------------------------------------------------------- */
+
/*
* This is the IPMI opregion space handler.
* @function: indicates the read/write. In fact as the IPMI message is driven
@@ -371,17 +537,17 @@ static void ipmi_bmc_gone(int iface)
* the response IPMI message returned by IPMI command.
* @handler_context: IPMI device context.
*/
-
static acpi_status
acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
- u32 bits, acpi_integer *value,
- void *handler_context, void *region_context)
+ u32 bits, acpi_integer *value,
+ void *handler_context, void *region_context)
{
struct acpi_ipmi_msg *tx_msg;
- struct acpi_ipmi_device *ipmi_device = handler_context;
- int err, rem_time;
+ struct acpi_ipmi_device *ipmi_device;
+ int err;
acpi_status status;
unsigned long flags;
+
/*
* IPMI opregion message.
* IPMI message is firstly written to the BMC and system software
@@ -391,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
if ((function & ACPI_IO_MASK) == ACPI_READ)
return AE_TYPE;
- if (!ipmi_device->user_interface)
+ tx_msg = ipmi_msg_alloc();
+ if (!tx_msg)
return AE_NOT_EXIST;
+ ipmi_device = tx_msg->device;
- tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
- if (!tx_msg)
- return AE_NO_MEMORY;
+ if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+ ipmi_msg_release(tx_msg);
+ return AE_TYPE;
+ }
- acpi_format_ipmi_msg(tx_msg, address, value);
+ acpi_ipmi_msg_get(tx_msg);
+ mutex_lock(&driver_data.ipmi_lock);
+ /* Do not add a tx_msg that can not be flushed. */
+ if (ipmi_device->dead) {
+ mutex_unlock(&driver_data.ipmi_lock);
+ ipmi_msg_release(tx_msg);
+ return AE_NOT_EXIST;
+ }
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ mutex_unlock(&driver_data.ipmi_lock);
+
err = ipmi_request_settime(ipmi_device->user_interface,
- &tx_msg->addr,
- tx_msg->tx_msgid,
- &tx_msg->tx_message,
- NULL, 0, 0, 0);
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+ &tx_msg->tx_message,
+ NULL, 0, 0, IPMI_TIMEOUT);
if (err) {
status = AE_ERROR;
- goto end_label;
+ goto out_msg;
}
- rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
- IPMI_TIMEOUT);
- acpi_format_ipmi_response(tx_msg, value, rem_time);
+ wait_for_completion(&tx_msg->tx_complete);
+
+ acpi_format_ipmi_response(tx_msg, value);
status = AE_OK;
-end_label:
- spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
- list_del(&tx_msg->head);
- spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
- kfree(tx_msg);
+out_msg:
+ ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+ acpi_ipmi_msg_put(tx_msg);
return status;
}
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
- if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
- return;
-
- acpi_remove_address_space_handler(ipmi->handle,
- ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
- clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+static int __init acpi_ipmi_init(void)
{
+ int result;
acpi_status status;
- if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+ if (acpi_disabled)
return 0;
- status = acpi_install_address_space_handler(ipmi->handle,
+ status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_IPMI,
&acpi_ipmi_space_handler,
- NULL, ipmi);
+ NULL, NULL);
if (ACPI_FAILURE(status)) {
- struct pnp_dev *pnp_dev = ipmi->pnp_dev;
- dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
- "handle\n");
+ pr_warn("Can't register IPMI opregion space handle\n");
return -EINVAL;
}
- set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
- return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
- INIT_LIST_HEAD(&ipmi_device->head);
-
- spin_lock_init(&ipmi_device->tx_msg_lock);
- INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
- ipmi_install_space_handler(ipmi_device);
-
- list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
- /*
- * If the IPMI user interface is created, it should be
- * destroyed.
- */
- if (ipmi_device->user_interface) {
- ipmi_destroy_user(ipmi_device->user_interface);
- ipmi_device->user_interface = NULL;
- }
- /* flush the Tx_msg list */
- if (!list_empty(&ipmi_device->tx_msg_list))
- ipmi_flush_tx_msg(ipmi_device);
-
- list_del(&ipmi_device->head);
- ipmi_remove_space_handler(ipmi_device);
-}
-
-static int __init acpi_ipmi_init(void)
-{
- int result = 0;
-
- if (acpi_disabled)
- return result;
-
- mutex_init(&driver_data.ipmi_lock);
-
result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+ if (result)
+ pr_err("Can't register IPMI system interface watcher\n");
return result;
}
static void __exit acpi_ipmi_exit(void)
{
- struct acpi_ipmi_device *ipmi_device, *temp;
+ struct acpi_ipmi_device *ipmi_device;
if (acpi_disabled)
return;
@@ -516,13 +639,22 @@ static void __exit acpi_ipmi_exit(void)
* handler and free it.
*/
mutex_lock(&driver_data.ipmi_lock);
- list_for_each_entry_safe(ipmi_device, temp,
- &driver_data.ipmi_devices, head) {
- acpi_remove_ipmi_device(ipmi_device);
- put_device(ipmi_device->smi_data.dev);
- kfree(ipmi_device);
+ while (!list_empty(&driver_data.ipmi_devices)) {
+ ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+ struct acpi_ipmi_device,
+ head);
+ __ipmi_dev_kill(ipmi_device);
+ mutex_unlock(&driver_data.ipmi_lock);
+
+ ipmi_flush_tx_msg(ipmi_device);
+ acpi_ipmi_dev_put(ipmi_device);
+
+ mutex_lock(&driver_data.ipmi_lock);
}
mutex_unlock(&driver_data.ipmi_lock);
+ acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+ ACPI_ADR_SPACE_IPMI,
+ &acpi_ipmi_space_handler);
}
module_init(acpi_ipmi_init);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index fb78bb9ad8f6..d3961014aad7 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss");
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
#define LPSS_TX_INT 0x20
@@ -68,11 +69,16 @@ struct lpss_private_data {
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
- unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ unsigned int offset;
u32 reg;
- reg = readl(pdata->mmio_base + tx_int_offset);
- writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+ offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+ offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+ reg = readl(pdata->mmio_base + offset);
+ writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
}
static struct lpss_device_desc lpt_dev_desc = {
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 999adb5499c7..551dad712ffe 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
unsigned long long current_status;
/* Get device present/absent information from the _STA */
- if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
- NULL, &current_status)))
+ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+ METHOD_NAME__STA, NULL,
+ &current_status)))
return -ENODEV;
/*
* Check for device status. Device should be
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
if (!info->enabled)
continue;
- if (nid < 0)
+ if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info, handle);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 1bde12708f9e..8a4cfc7e71f0 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform");
static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
+ { "ACPI0003" },
+ { "VPC2004" },
+ { "BCM4752" },
+
+ /* Intel Smart Sound Technology */
+ { "INT33C8" },
+ { "80860F28" },
{ }
};
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f29e06efa479..3c1d6b0c09a4 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
return 0;
}
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
{
int result = 0;
struct pci_dev *dev = NULL;
-
- if (!pr)
- return -EINVAL;
-
/*
* PIIX4
*/
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, &pr->id);
+ ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
if (ret)
goto out;
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
+ unsigned long long value;
- if (num_online_cpus() > 1)
- errata.smp = TRUE;
-
- acpi_processor_errata(pr);
+ acpi_processor_errata();
/*
* Check to see if we have bus mastering arbitration control. This
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device)
return -ENODEV;
}
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
*/
- unsigned long long value;
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
device_declaration = 1;
pr->acpi_id = value;
}
- cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+ pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+ pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
- status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+ status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
if (ACPI_SUCCESS(status))
- arch_fix_phys_package_id(pr->id, object.integer.value);
+ arch_fix_phys_package_id(pr->id, value);
return 0;
}
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 9feba08c29fe..a9fd0b872062 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -114,10 +114,12 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
acpi_db_generate_gpe(char *gpe_arg,
char *block_arg))
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
+
/*
* dbconvert - miscellaneous conversion routines
*/
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
@@ -154,6 +156,8 @@ void acpi_db_set_scope(char *name);
void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+void acpi_db_dump_namespace_paths(void);
+
void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
acpi_status acpi_db_find_name_in_namespace(char *name_arg);
@@ -240,6 +244,8 @@ void acpi_db_display_history(void);
char *acpi_db_get_from_history(char *command_num_arg);
+char *acpi_db_get_history_by_index(u32 commandd_num);
+
/*
* dbinput - user front-end to the AML debugger
*/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ab0e97710381..41abe552c7a3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -71,7 +71,8 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
- ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
+ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
acpi_status acpi_ev_remove_global_lock_handler(void);
/*
@@ -242,11 +243,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
*/
u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
-u32 acpi_ev_install_sci_handler(void);
+u32 acpi_ev_sci_dispatch(void);
-acpi_status acpi_ev_remove_sci_handler(void);
+u32 acpi_ev_install_sci_handler(void);
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 90e846f985fa..e9f1fc7f99c7 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
/* Owner ID support */
@@ -405,7 +406,9 @@ extern u32 acpi_gbl_nesting_level;
/* Event counters */
+ACPI_EXTERN u32 acpi_method_count;
ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_sci_count;
ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
/* Support for dynamic control method tracing mechanism */
@@ -445,13 +448,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
ACPI_EXTERN char *acpi_gbl_db_buffer;
ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +455,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level;
ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
/*
* Statistic globals
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0ed00669cd21..53ed1a8ba4f0 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info {
*
****************************************************************************/
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+ struct acpi_sci_handler_info *next;
+ acpi_sci_handler address; /* Address of handler */
+ void *context; /* Context to be passed to handler */
+};
+
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
struct acpi_gpe_handler_info {
@@ -1064,7 +1072,7 @@ struct acpi_db_method_info {
char *name;
u32 flags;
u32 num_loops;
- char pathname[128];
+ char pathname[ACPI_DB_LINE_BUFFER_SIZE];
char **args;
acpi_object_type *types;
@@ -1086,6 +1094,7 @@ struct acpi_integrity_info {
u32 objects;
};
+#define ACPI_DB_DISABLE_OUTPUT 0x00
#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
#define ACPI_DB_CONSOLE_OUTPUT 0x02
#define ACPI_DB_DUPLICATE_OUTPUT 0x03
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 530a2f8c1252..2a86c65d873b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -410,37 +410,6 @@
#endif
/*
- * Memory allocation tracking (DEBUG ONLY)
- */
-#define ACPI_MEM_PARAMETERS _COMPONENT, _acpi_module_name, __LINE__
-
-#ifndef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Memory allocation */
-
-#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_FREE
-#define ACPI_FREE(a) acpi_os_free(a)
-#endif
-#define ACPI_MEM_TRACKING(a)
-
-#else
-
-/* Memory allocation */
-
-#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
-#define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
-#define ACPI_MEM_TRACKING(a) a
-
-#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-
-/*
* Macros used for ACPICA utilities only
*/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 40b04bd5579e..e6138ac4a160 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type,
u8 display_type,
u32 max_depth,
acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle);
#endif /* ACPI_FUTURE_USAGE */
/*
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index d5a62a6182bb..be8180c17d7e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -628,6 +628,17 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position);
void acpi_ut_repair_name(char *name);
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length);
+#endif
+
/*
* utmutex - mutex support
*/
@@ -652,12 +663,6 @@ acpi_status
acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
acpi_size required_length);
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line);
-
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, const char *module, u32 line);
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index fb09b08d7080..afdc6df17abf 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -158,7 +158,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index d4bfe7b7f90a..2d4c07322576 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -259,7 +259,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
goto cleanup;
}
- cleanup:
+cleanup:
/* Remove local reference to the object */
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index a9ffd44c18fe..81a78ba84311 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -292,9 +292,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
* reentered one more time (even if it is the same thread)
*/
obj_desc->method.thread_count++;
+ acpi_method_count++;
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, must release the method mutex (if present) */
if (obj_desc->method.mutex) {
@@ -424,7 +425,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/* On error, we must terminate the method properly */
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 63f0d220ca3d..b1746a68dad1 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -240,7 +240,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
- exit:
+exit:
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 1fc1ff114f26..5205edcf2c01 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -257,7 +257,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
(buffer_desc->common.reference_count +
obj_desc->common.reference_count);
- cleanup:
+cleanup:
/* Always delete the operands */
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index c666fc014987..ade44e49deb4 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -299,7 +299,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
goto result_used;
}
- result_used:
+result_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -308,7 +308,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
return_UINT8(TRUE);
- result_not_used:
+result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Result of [%s] not used by Parent [%s] Op=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
@@ -752,7 +752,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- cleanup:
+cleanup:
/*
* We must undo everything done above; meaning that we must
* pop everything off of the operand stack and delete those
@@ -851,7 +851,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
goto exit;
}
- push_result:
+push_result:
walk_state->result_obj = new_obj_desc;
@@ -863,7 +863,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
op->common.flags |= ACPI_PARSEOP_IN_STACK;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 151d924817e1..1bbb22fd6fa0 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -170,7 +170,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
- cleanup:
+cleanup:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
walk_state->control_state->common.value,
@@ -335,7 +335,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
status = acpi_ds_method_error(status, walk_state);
return_ACPI_STATUS(status);
}
@@ -722,7 +722,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->result_obj = NULL;
}
- cleanup:
+cleanup:
if (walk_state->result_obj) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b1f8f4725c23..7f569d573027 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -728,7 +728,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Remove the Node pushed at the very beginning */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fdb0a76e40a3..4c67193a9fa7 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -173,7 +173,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_gbl_global_lock_pending = FALSE;
- cleanup_and_exit:
+cleanup_and_exit:
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
return (ACPI_INTERRUPT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index c8a1f7d5931f..a9cb4a1a4bb8 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -458,7 +458,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_block = gpe_block->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
@@ -522,6 +522,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(local_gpe_event_info);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index c1aa1eda26c3..a9e76bc4ad97 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -111,7 +111,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
gpe_block->xrupt_block = gpe_xrupt_block;
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -178,7 +178,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
ACPI_FREE(gpe_block->event_info);
ACPI_FREE(gpe_block);
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -302,7 +302,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
if (gpe_register_info) {
ACPI_FREE(gpe_register_info);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7842700346a4..a3e2f38aadf6 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -203,7 +203,7 @@ acpi_status acpi_ev_gpe_initialize(void)
goto cleanup;
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index b24dbb80fab8..d3f5e1e2a2b1 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -101,7 +101,7 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
gpe_xrupt_info = gpe_xrupt_info->next;
}
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
*
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 068af96134b8..e3157313eb27 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -129,7 +129,7 @@ acpi_status acpi_ev_install_region_handlers(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -531,6 +531,6 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
acpi_ev_install_handler, NULL,
handler_obj, NULL);
- unlock_and_exit:
+unlock_and_exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 1b111ef74903..a5687540e9a6 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void)
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
- /* Remove SCI handler */
-
- status = acpi_ev_remove_sci_handler();
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
- }
-
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void)
acpi_gbl_events_initialized = FALSE;
}
+ /* Remove SCI handlers */
+
+ status = acpi_ev_remove_all_sci_handlers();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+ }
+
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cea14d6fc76c..144cbb9b73bc 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
- if (region_obj2->extra.region_context) {
-
- /* The handler for this region was already installed */
-
- ACPI_FREE(region_context);
- } else {
- /*
- * Save the returned context for use in all accesses to
- * this particular region
- */
+ /*
+ * Save the returned context for use in all accesses to
+ * the handler for this particular region
+ */
+ if (!(region_obj2->extra.region_context)) {
region_obj2->extra.region_context =
region_context;
}
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj->address_space.
context, region_context);
+ /*
+ * region_context should have been released by the deactivate
+ * operation. We don't need access to it anymore here.
+ */
+ if (region_context) {
+ *region_context = NULL;
+ }
+
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
@@ -570,10 +573,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
status = acpi_ns_evaluate(info);
acpi_ut_remove_reference(args[1]);
- cleanup2:
+cleanup2:
acpi_ut_remove_reference(args[0]);
- cleanup1:
+cleanup1:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@@ -758,7 +761,7 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
- exit:
+exit:
/* We ignore all errors from above, don't care */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index b905acf7aacd..9e9e3454d893 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -54,6 +54,50 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
/*******************************************************************************
*
+ * FUNCTION: acpi_ev_sci_dispatch
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+ /* Are there any host-installed SCI handlers? */
+
+ if (!acpi_gbl_sci_handler_list) {
+ return (int_status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Invoke all host-installed SCI handlers */
+
+ sci_handler = acpi_gbl_sci_handler_list;
+ while (sci_handler) {
+
+ /* Invoke the installed handler (at interrupt level) */
+
+ int_status |= sci_handler->address(sci_handler->context);
+
+ sci_handler = sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ev_sci_xrupt_handler
*
* PARAMETERS: context - Calling Context
@@ -89,6 +133,11 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+ /* Invoke all host-installed SCI handlers */
+
+ interrupt_handled |= acpi_ev_sci_dispatch();
+
+ acpi_sci_count++;
return_UINT32(interrupt_handled);
}
@@ -112,14 +161,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
- * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * We are guaranteed by the ACPICA initialization/shutdown code that
* if this interrupt handler is installed, ACPI is enabled.
*/
/* GPEs: Check for and dispatch any GPEs that have occurred */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
return_UINT32(interrupt_handled);
}
@@ -150,15 +198,15 @@ u32 acpi_ev_install_sci_handler(void)
/******************************************************************************
*
- * FUNCTION: acpi_ev_remove_sci_handler
+ * FUNCTION: acpi_ev_remove_all_sci_handlers
*
* PARAMETERS: none
*
- * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not
* installed to begin with
*
* DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- * taken.
+ * taken. Remove all host-installed SCI handlers.
*
* Note: It doesn't seem important to disable all events or set the event
* enable registers to their original values. The OS should disable
@@ -167,11 +215,13 @@ u32 acpi_ev_install_sci_handler(void)
*
******************************************************************************/
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
{
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
acpi_status status;
- ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+ ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
/* Just let the OS remove the handler and disable the level */
@@ -179,6 +229,21 @@ acpi_status acpi_ev_remove_sci_handler(void)
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler);
+ if (!acpi_gbl_sci_handler_list) {
+ return (status);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Free all host-installed SCI handlers */
+
+ while (acpi_gbl_sci_handler_list) {
+ sci_handler = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = sci_handler->next;
+ ACPI_FREE(sci_handler);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ca5fba99c33b..23a7fadca412 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -374,7 +375,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
acpi_gbl_exception_handler = handler;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -385,6 +386,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
+ * FUNCTION: acpi_install_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ * context - Value passed to the handler on each SCI
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+ struct acpi_sci_handler_info *new_sci_handler;
+ struct acpi_sci_handler_info *sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Allocate and init a handler object */
+
+ new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+ if (!new_sci_handler) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ new_sci_handler->address = address;
+ new_sci_handler->context = context;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Lock list during installation */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ sci_handler = acpi_gbl_sci_handler_list;
+
+ /* Ensure handler does not already exist */
+
+ while (sci_handler) {
+ if (address == sci_handler->address) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ sci_handler = sci_handler->next;
+ }
+
+ /* Install the new handler into the global list (at head) */
+
+ new_sci_handler->next = acpi_gbl_sci_handler_list;
+ acpi_gbl_sci_handler_list = new_sci_handler;
+
+unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+exit:
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(new_sci_handler);
+ }
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_sci_handler
+ *
+ * PARAMETERS: address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+ struct acpi_sci_handler_info *prev_sci_handler;
+ struct acpi_sci_handler_info *next_sci_handler;
+ acpi_cpu_flags flags;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Remove the SCI handler with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ prev_sci_handler = NULL;
+ next_sci_handler = acpi_gbl_sci_handler_list;
+ while (next_sci_handler) {
+ if (next_sci_handler->address == address) {
+
+ /* Unlink and free the SCI handler info block */
+
+ if (prev_sci_handler) {
+ prev_sci_handler->next = next_sci_handler->next;
+ } else {
+ acpi_gbl_sci_handler_list =
+ next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ ACPI_FREE(next_sci_handler);
+ goto unlock_and_exit;
+ }
+
+ prev_sci_handler = next_sci_handler;
+ next_sci_handler = next_sci_handler->next;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ status = AE_NOT_EXIST;
+
+unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_install_global_event_handler
*
* PARAMETERS: handler - Pointer to the global event handler function
@@ -398,6 +537,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
* Can be used to update event counters, etc.
*
******************************************************************************/
+
acpi_status
acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
{
@@ -426,7 +566,7 @@ acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
acpi_gbl_global_event_handler = handler;
acpi_gbl_global_event_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -498,7 +638,7 @@ acpi_install_fixed_event_handler(u32 event,
handler));
}
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7039606a0ba8..39d06af5e347 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 7662f1a42ff6..5713da77c665 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -471,7 +472,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
*event_status |= ACPI_EVENT_FLAG_HANDLE;
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -624,7 +625,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc->device.gpe_block = gpe_block;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -679,7 +680,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
obj_desc->device.gpe_block = NULL;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 80cecf838591..02ed75ac56cd 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -147,7 +148,7 @@ acpi_install_address_space_handler(acpi_handle device,
status = acpi_ev_execute_reg_methods(node, space_id);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -286,7 +287,7 @@ acpi_remove_address_space_handler(acpi_handle device,
status = AE_NOT_EXIST;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 269e81d86ef4..3c2e6dcdad3e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -193,7 +193,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
operands[0], obj_desc, ACPI_TYPE_EVENT);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -248,7 +248,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
ACPI_TYPE_MUTEX);
- cleanup:
+cleanup:
/*
* Remove local reference to the object (on error, will cause deletion
* of both object and semaphore if present.)
@@ -347,7 +347,7 @@ acpi_ex_create_region(u8 * aml_start,
status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
- cleanup:
+cleanup:
/* Remove local reference to the object */
@@ -520,7 +520,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_ut_remove_reference(obj_desc);
- exit:
+exit:
/* Remove a reference to the operand */
acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index c2a65aaf29af..cfd875243421 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -197,7 +197,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
- exit:
+exit:
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(buffer_desc);
} else {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 7e0afe72487e..49fb742d61b9 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -123,12 +123,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
- /* Exit if Address/Length have been disallowed by the host OS */
-
- if (rgn_desc->common.flags & AOPOBJ_INVALID) {
- return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
- }
-
/*
* Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
@@ -1002,7 +996,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
mask, merged_datum,
field_offset);
- exit:
+exit:
/* Free temporary buffer if we used one */
if (new_buffer) {
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 00bf29877574..65d93607f368 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -388,7 +388,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
*actual_return_desc = return_desc;
- cleanup:
+cleanup:
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
@@ -718,7 +718,7 @@ acpi_ex_do_logical_op(u16 opcode,
}
}
- cleanup:
+cleanup:
/* New object was created if implicit conversion performed - delete */
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 2cdd41d8ade6..d74cea416ca0 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -115,7 +115,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
break;
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -234,7 +234,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -551,7 +551,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[1], walk_state);
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -1054,7 +1054,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d5088f7030c7..d6fa0fce1fc9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -215,7 +215,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/*
* Since the remainder is not returned indirectly, remove a reference to
* it. Only the quotient is returned indirectly.
@@ -445,7 +445,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
break;
}
- store_result_to_target:
+store_result_to_target:
if (ACPI_SUCCESS(status)) {
/*
@@ -462,7 +462,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
}
}
- cleanup:
+cleanup:
/* Delete return object on error */
@@ -553,7 +553,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- store_logical_result:
+store_logical_result:
/*
* Set return value to according to logical_result. logical TRUE (all ones)
* Default is FALSE (zero)
@@ -562,7 +562,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
return_desc->integer.value = ACPI_UINT64_MAX;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 37656f12f204..bc042adf8804 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -124,7 +124,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
return_ACPI_STATUS(status);
}
@@ -252,7 +252,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_store(return_desc, operand[3], walk_state);
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 879b6cd8319c..4459e32c683d 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
goto cleanup;
}
- cleanup:
+cleanup:
/* Delete return object on error */
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 303429bb4d5d..9d28867e60dc 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -400,6 +400,7 @@ acpi_ex_pci_config_space_handler(u32 function,
switch (function) {
case ACPI_READ:
+ *value = 0;
status = acpi_os_read_pci_configuration(pci_id, pci_register,
value, bit_width);
break;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index ac04278ad28f..1606524312e3 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -521,7 +521,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
type = obj_desc->common.type;
- exit:
+exit:
/* Convert internal types to external types */
switch (type) {
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 00e5af7129c1..be3f66973ee8 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -683,7 +683,7 @@ acpi_ex_resolve_operands(u16 opcode,
return_ACPI_STATUS(status);
}
- next_operand:
+next_operand:
/*
* If more operands needed, decrement stack_ptr to point
* to next operand on stack
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 8d2e866be15f..12e6cff54f78 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -560,7 +560,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
}
- exit:
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 2d7d22ebc782..3c498dc1636e 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 5ee7a814cd92..b4b47db2dee2 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -83,11 +84,17 @@ acpi_status acpi_reset(void)
* For I/O space, write directly to the OSL. This bypasses the port
* validation mechanism, which may block a valid write to the reset
* register.
- * Spec section 4.7.3.6 requires register width to be 8.
+ *
+ * NOTE:
+ * The ACPI spec requires the reset register width to be 8, so we
+ * hardcode it here and ignore the FADT value. This maintains
+ * compatibility with other ACPI implementations that have allowed
+ * BIOS code with bad register width values to go unnoticed.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
- acpi_gbl_FADT.reset_value, 8);
+ acpi_gbl_FADT.reset_value,
+ ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
@@ -119,7 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
- u32 value;
+ u32 value_lo;
+ u32 value_hi;
u32 width;
u64 address;
acpi_status status;
@@ -137,13 +145,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
return (status);
}
- /* Initialize entire 64-bit return value to zero */
-
- *return_value = 0;
- value = 0;
-
/*
- * Two address spaces supported: Memory or IO. PCI_Config is
+ * Two address spaces supported: Memory or I/O. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +158,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
}
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+ value_lo = 0;
+ value_hi = 0;
+
width = reg->bit_width;
if (width == 64) {
width = 32; /* Break into two 32-bit transfers */
}
status = acpi_hw_read_port((acpi_io_address)
- address, &value, width);
+ address, &value_lo, width);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value = value;
if (reg->bit_width == 64) {
/* Read the top 32 bits */
status = acpi_hw_read_port((acpi_io_address)
- (address + 4), &value, 32);
+ (address + 4), &value_hi,
+ 32);
if (ACPI_FAILURE(status)) {
return (status);
}
- *return_value |= ((u64)value << 32);
}
+
+ /* Set the return value only if status is AE_OK */
+
+ *return_value = (value_lo | ((u64)value_hi << 32));
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +195,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
- return (status);
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_read)
@@ -561,10 +570,10 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
break;
}
- cleanup1:
+cleanup1:
acpi_ut_remove_reference(info->return_object);
- cleanup:
+cleanup:
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating Sleep State [%s]",
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f2e669db8b65..15dddc10fc9b 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -166,7 +167,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+acpi_status acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
@@ -360,7 +361,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index c5316e5bd4ab..14f65f6345b9 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -240,7 +240,7 @@ acpi_status acpi_ns_root_initialize(void)
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Save a handle to "_GPE", it is always present */
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
- "ACPI path has too many parent prefixes (^) "
- "- reached beyond root node"));
+ "%s: Path has too many parent prefixes (^) "
+ "- reached beyond root node",
+ pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 7418c77fde8c..48b9c6f12643 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
#endif
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_print_pathname
@@ -609,7 +620,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
- cleanup:
+cleanup:
acpi_os_printf("\n");
return (AE_OK);
}
@@ -671,6 +682,136 @@ acpi_ns_dump_objects(acpi_object_type type,
}
#endif /* ACPI_FUTURE_USAGE */
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS: obj_handle - Node to be dumped
+ * level - Nesting level of the handle
+ * context - Passed into walk_namespace
+ * return_value - Not used
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ * computes the maximum nesting depth in the namespace tree, in
+ * order to simplify formatting in acpi_ns_dump_one_object_path.
+ * These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 max_level = *((u32 *)context);
+ char *pathname;
+ struct acpi_namespace_node *node;
+ int path_indent;
+
+ if (!obj_handle) {
+ return (AE_OK);
+ }
+
+ node = acpi_ns_validate_handle(obj_handle);
+ if (!node) {
+
+ /* Ignore bad node during namespace walk */
+
+ return (AE_OK);
+ }
+
+ pathname = acpi_ns_get_external_pathname(node);
+
+ path_indent = 1;
+ if (level <= max_level) {
+ path_indent = max_level - level + 1;
+ }
+
+ acpi_os_printf("%2d%*s%-12s%*s",
+ level, level, " ", acpi_ut_get_type_name(node->type),
+ path_indent, " ");
+
+ acpi_os_printf("%s\n", &pathname[1]);
+ ACPI_FREE(pathname);
+ return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ u32 *max_level = (u32 *)context;
+
+ if (level > *max_level) {
+ *max_level = level;
+ }
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_dump_object_paths
+ *
+ * PARAMETERS: type - Object type to be dumped
+ * display_type - 0 or ACPI_DISPLAY_SUMMARY
+ * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
+ * for an effectively unlimited depth.
+ * owner_id - Dump only objects owned by this ID. Use
+ * ACPI_UINT32_MAX to match all owners.
+ * start_handle - Where in namespace to start/end search
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+ u8 display_type,
+ u32 max_depth,
+ acpi_owner_id owner_id, acpi_handle start_handle)
+{
+ acpi_status status;
+ u32 max_level = 0;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Just lock the entire namespace for the duration of the dump.
+ * We don't want any changes to the namespace during this time,
+ * especially the temporary nodes since we are going to display
+ * them also.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not acquire namespace mutex\n");
+ return;
+ }
+
+ /* Get the max depth of the namespace tree, for formatting later */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_get_max_depth, NULL,
+ (void *)&max_level, NULL);
+
+ /* Now dump the entire namespace */
+
+ (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+ ACPI_NS_WALK_NO_UNLOCK |
+ ACPI_NS_WALK_TEMP_NODES,
+ acpi_ns_dump_one_object_path, NULL,
+ (void *)&max_level, NULL);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif /* ACPI_FUTURE_USAGE */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_entry
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 409ae80824d1..283762511b73 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -69,6 +69,7 @@ static acpi_status
acpi_ns_dump_one_device(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_buffer buffer;
struct acpi_device_info *info;
acpi_status status;
u32 i;
@@ -78,15 +79,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
- status = acpi_get_object_info(obj_handle, &info);
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(obj_handle, &buffer);
if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
for (i = 0; i < level; i++) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
" HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
- info->hardware_id.string,
+ info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 18108bc2e51c..963ceef063f8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -314,7 +314,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
"*** Completed evaluation of object %s ***\n",
info->relative_pathname));
- cleanup:
+cleanup:
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just free the pathname and return
@@ -486,7 +486,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
parent_node->type = (u8)type;
}
- exit:
+exit:
if (parent_obj) {
acpi_ut_remove_reference(parent_obj);
}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index dd2ceae3f717..3a0423af968c 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -213,7 +213,7 @@ acpi_status acpi_ns_initialize_devices(void)
return_ACPI_STATUS(status);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 0a7badc3179f..89ec645e7730 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -114,7 +114,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
(void)acpi_tb_release_owner_id(table_index);
}
- unlock:
+unlock:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 35dde8151c0d..177857340271 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -140,7 +140,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
pass_number));
status = acpi_ps_parse_aml(walk_state);
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(parse_root);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 098e7666cbc9..d2855d9857c4 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -271,7 +271,7 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
return (AE_OK); /* Successful repair */
}
- type_error_exit:
+type_error_exit:
/* Create a string with all expected types for this predefined object */
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 6d55cef7916c..3d5391f9bcb5 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -330,7 +330,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
return (status);
- package_too_small:
+package_too_small:
/* Error exit for the case with an incorrect package count */
@@ -555,7 +555,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
return (AE_OK);
- package_too_small:
+package_too_small:
/* The sub-package count was smaller than required */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index f8e71ea60319..a05afff50eb9 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -263,7 +263,7 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
return (AE_AML_OPERAND_TYPE);
- object_repaired:
+object_repaired:
/* Object was successfully repaired */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index c84603ee83ae..6a25d320b169 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -478,7 +478,7 @@ acpi_ns_repair_CST(struct acpi_evaluate_info *info,
removing = TRUE;
}
- remove_element:
+remove_element:
if (removing) {
acpi_ns_remove_element(return_object, i + 1);
outer_element_count--;
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5d43efc53a61..47420faef073 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -381,7 +381,8 @@ acpi_ns_search_and_enter(u32 target_name,
/* Node is an object defined by an External() statement */
- if (flags & ACPI_NS_EXTERNAL) {
+ if (flags & ACPI_NS_EXTERNAL ||
+ (walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 08c0b5beec88..cc2fea94c5f0 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -722,7 +722,7 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- cleanup:
+cleanup:
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index b38b4b07f86e..e973e311f856 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -138,7 +139,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
- ACPI_FREE(return_buffer->pointer);
+ ACPI_FREE_BUFFER(*return_buffer);
return_buffer->pointer = NULL;
}
@@ -441,7 +442,7 @@ acpi_evaluate_object(acpi_handle handle,
acpi_ex_exit_interpreter();
}
- cleanup:
+cleanup:
/* Free the input parameter list (if we created one) */
@@ -605,14 +606,22 @@ acpi_walk_namespace(acpi_object_type type,
goto unlock_and_exit;
}
+ /* Now we can validate the starting node */
+
+ if (!acpi_ns_validate_handle(start_object)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit2;
+ }
+
status = acpi_ns_walk_namespace(type, start_object, max_depth,
ACPI_NS_WALK_UNLOCK,
descending_callback, ascending_callback,
context, return_value);
+unlock_and_exit2:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock);
return_ACPI_STATUS(status);
}
@@ -856,7 +865,7 @@ acpi_attach_data(acpi_handle obj_handle,
status = acpi_ns_attach_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -902,7 +911,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
status = acpi_ns_detach_data(node, handler);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
@@ -949,7 +958,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
status = acpi_ns_get_attached_data(node, handler, data);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83c164434580..3a4bd3ff49a3 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -208,7 +209,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -496,7 +497,7 @@ acpi_get_object_info(acpi_handle handle,
*return_buffer = info;
status = AE_OK;
- cleanup:
+cleanup:
if (hid) {
ACPI_FREE(hid);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c0853ef294e4..0e6d79e462d4 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,7 +42,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -200,7 +201,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
status = AE_NULL_ENTRY;
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
@@ -280,7 +281,7 @@ acpi_get_next_object(acpi_object_type type,
*ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 86198a9139b5..79d9a28dedef 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -297,7 +297,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
}
}
- cleanup:
+cleanup:
/* Now we can actually delete the subtree rooted at Op */
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 11b99ab20bb3..fcb7a840e996 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -142,7 +142,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
}
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -185,7 +185,7 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
acpi_dbg_level = acpi_gbl_original_dbg_level;
acpi_dbg_layer = acpi_gbl_original_dbg_layer;
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
@@ -323,7 +323,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* walk_state was deleted by parse_aml */
- cleanup:
+cleanup:
acpi_ps_delete_parse_tree(op);
/* End optional tracing */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 80d12994e0d0..c99cec9cefde 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -440,7 +440,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
if (!flags_mode) {
/* Round the resource struct length up to the next boundary (32 or 64) */
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info++;
}
- exit:
+exit:
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 480b6b40c5ea..aef303d56d86 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -784,7 +784,7 @@ acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
acpi_ut_remove_reference(args[0]);
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 94e3517554f9..01e476988aae 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 42a13c0d7015..634357d51fe9 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
}
}
- /* FACS is the odd table, has no standard ACPI header and no checksum */
+ /* Always calculate checksum, ignore bad checksum if requested */
- if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
- /* Always calculate checksum, ignore bad checksum if requested */
-
- status =
- acpi_tb_verify_checksum(table_desc->pointer,
- table_desc->length);
- }
+ status =
+ acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
return_ACPI_STATUS(status);
}
@@ -237,10 +231,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
goto release;
}
- print_header:
+print_header:
acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
- release:
+release:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -312,7 +306,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
return (NULL); /* There was no override */
- finish_override:
+finish_override:
ACPI_INFO((AE_INFO,
"%4.4s %p %s table override, new table: %p",
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index dc963f823d2c..6866e767ba90 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -135,10 +135,10 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %05X",
+ ACPI_INFO((AE_INFO, "%4.4s %p %06X",
header->signature, ACPI_CAST_PTR(void, address),
header->length));
- } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+ } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
/* RSDP has no common fields */
@@ -147,7 +147,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
+ ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
ACPI_CAST_PTR(void, address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
@@ -162,7 +162,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
+ "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
local_header.signature, ACPI_CAST_PTR(void, address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
{
u8 checksum;
+ /*
+ * FACS/S3PT:
+ * They are the odd tables, have no standard ACPI header and no checksum
+ */
+
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ return (AE_OK);
+ }
+
/* Compute the checksum on the table */
checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index bffdfc7b8322..3d6bb83aa7e7 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -350,7 +350,7 @@ acpi_tb_install_table(acpi_physical_address address,
acpi_tb_delete_table(table_desc);
}
- unmap_and_exit:
+unmap_and_exit:
/* Always unmap the table header that we mapped above */
@@ -430,8 +430,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
*
******************************************************************************/
-acpi_status __init
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
+acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
{
struct acpi_table_rsdp *rsdp;
u32 table_entry_size;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index ad11162482ff..db826eaadd1c 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
@@ -147,6 +148,8 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
+
/*******************************************************************************
*
* FUNCTION: acpi_reallocate_root_table
@@ -161,7 +164,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
* kernel.
*
******************************************************************************/
-acpi_status acpi_reallocate_root_table(void)
+acpi_status __init acpi_reallocate_root_table(void)
{
acpi_status status;
@@ -181,6 +184,8 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL_INIT(acpi_reallocate_root_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
@@ -356,6 +361,7 @@ acpi_get_table_with_size(char *signature,
return (AE_NOT_FOUND);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
acpi_status
@@ -367,6 +373,7 @@ acpi_get_table(char *signature,
return acpi_get_table_with_size(signature,
instance, out_table, &tbl_size);
}
+
ACPI_EXPORT_SYMBOL(acpi_get_table)
/*******************************************************************************
@@ -424,7 +431,6 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
-
/*******************************************************************************
*
* FUNCTION: acpi_install_table_handler
@@ -465,7 +471,7 @@ acpi_install_table_handler(acpi_table_handler handler, void *context)
acpi_gbl_table_handler = handler;
acpi_gbl_table_handler_context = context;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
@@ -506,7 +512,7 @@ acpi_status acpi_remove_table_handler(acpi_table_handler handler)
acpi_gbl_table_handler = NULL;
- cleanup:
+cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0ba9e328d5d7..60b5a871833c 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -65,7 +66,7 @@ static acpi_status acpi_tb_load_namespace(void);
*
******************************************************************************/
-acpi_status acpi_load_tables(void)
+acpi_status __init acpi_load_tables(void)
{
acpi_status status;
@@ -82,7 +83,7 @@ acpi_status acpi_load_tables(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
+ACPI_EXPORT_SYMBOL_INIT(acpi_load_tables)
/*******************************************************************************
*
@@ -200,7 +201,7 @@ static acpi_status acpi_tb_load_namespace(void)
ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -268,7 +269,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
acpi_gbl_table_handler_context);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 948c95e80d44..e4e1468877c3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* Note: Sometimes there exists more than one RSDP in memory; the valid
* RSDP has a valid checksum, all others have an invalid checksum.
*/
- if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
- sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+ if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
/* Nope, BAD Signature */
@@ -112,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status __init acpi_find_root_pointer(acpi_size *table_address)
{
u8 *table_ptr;
u8 *mem_rover;
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index e0ffb580f4b0..814267f52715 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -48,6 +48,39 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utalloc")
+#if !defined (USE_NATIVE_ALLOCATE_ZEROED)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_allocate_zeroed
+ *
+ * PARAMETERS: size - Size of the allocation
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ * This is the default implementation. Can be overridden via the
+ * USE_NATIVE_ALLOCATE_ZEROED flag.
+ *
+ ******************************************************************************/
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+ void *allocation;
+
+ ACPI_FUNCTION_ENTRY();
+
+ allocation = acpi_os_allocate(size);
+ if (allocation) {
+
+ /* Clear the memory block */
+
+ ACPI_MEMSET(allocation, 0, size);
+ }
+
+ return (allocation);
+}
+
+#endif /* !USE_NATIVE_ALLOCATE_ZEROED */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_caches
@@ -59,6 +92,7 @@ ACPI_MODULE_NAME("utalloc")
* DESCRIPTION: Create all local caches
*
******************************************************************************/
+
acpi_status acpi_ut_create_caches(void)
{
acpi_status status;
@@ -175,10 +209,10 @@ acpi_status acpi_ut_delete_caches(void)
/* Free memory lists */
- ACPI_FREE(acpi_gbl_global_list);
+ acpi_os_free(acpi_gbl_global_list);
acpi_gbl_global_list = NULL;
- ACPI_FREE(acpi_gbl_ns_node_list);
+ acpi_os_free(acpi_gbl_ns_node_list);
acpi_gbl_ns_node_list = NULL;
#endif
@@ -302,82 +336,3 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
ACPI_MEMSET(buffer->pointer, 0, required_length);
return (AE_OK);
}
-
-#ifdef NOT_USED_BY_LINUX
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
-
- /* Check for an inadvertent size of zero bytes */
-
- if (!size) {
- ACPI_WARNING((module, line,
- "Attempt to allocate zero bytes, allocating 1 byte"));
- size = 1;
- }
-
- allocation = acpi_os_allocate(size);
- if (!allocation) {
-
- /* Report allocation error */
-
- ACPI_WARNING((module, line,
- "Could not allocate size %u", (u32) size));
-
- return_PTR(NULL);
- }
-
- return_PTR(allocation);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate_zeroed
- *
- * PARAMETERS: size - Size of the allocation
- * component - Component type of caller
- * module - Source file name of caller
- * line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
- u32 component, const char *module, u32 line)
-{
- void *allocation;
-
- ACPI_FUNCTION_ENTRY();
-
- allocation = acpi_ut_allocate(size, component, module, line);
- if (allocation) {
-
- /* Clear the memory block */
-
- ACPI_MEMSET(allocation, 0, size);
- }
-
- return (allocation);
-}
-#endif
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index a877a9647fd9..366bfec4b770 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -65,7 +65,7 @@ ACPI_MODULE_NAME("utcache")
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
- u16 max_depth, struct acpi_memory_list ** return_cache)
+ u16 max_depth, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 1731c27c36a6..edff4e653d9a 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -552,7 +552,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
*ret_internal_object = internal_object;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
acpi_ut_remove_reference(internal_object);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -899,7 +899,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
return (status);
- error_exit:
+error_exit:
acpi_ut_remove_reference(target_object);
return (status);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5796e11a0671..1a67b3944b3b 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
@@ -190,7 +191,7 @@ acpi_debug_print(u32 requested_debug_level,
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
- acpi_os_printf("%8s-%04ld ", module_name, line_number);
+ acpi_os_printf("%9s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%u] ", (u32)thread_id);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 11e2e02e1618..b3f31dd89a45 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,7 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index d6b33f29d327..c07d2227ea42 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -649,7 +649,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
return (AE_OK);
- error_exit:
+error_exit:
ACPI_EXCEPTION((AE_INFO, status,
"Could not update object reference count"));
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 4fd68971019b..16fb90506db7 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -181,7 +181,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
*return_desc = info->return_object;
- cleanup:
+cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index ff6d9e8aa842..3cf7b597edb9 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -41,8 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
+
#define ACPI_DEFINE_EXCEPTION_TABLE
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d6f26bf8a062..81f9a9584451 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -41,9 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#define EXPORT_ACPI_INTERFACES
#define DEFINE_ACPI_GLOBALS
-#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
@@ -289,9 +289,19 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+ /* Event counters */
+
+ acpi_method_count = 0;
+ acpi_sci_count = 0;
+ acpi_gpe_count = 0;
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_fixed_event_count[i] = 0;
+ }
+
#if (!ACPI_REDUCED_HARDWARE)
- /* GPE support */
+ /* GPE/SCI support */
acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +310,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_current_gpe_count = 0;
acpi_gbl_global_event_handler = NULL;
+ acpi_gbl_sci_handler_list = NULL;
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -377,6 +388,11 @@ acpi_status acpi_ut_init_globals(void)
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+
+ACPI_EXPORT_SYMBOL(acpi_gpe_count)
+
ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index fa69071db418..bfca7b4b6731 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -184,7 +184,7 @@ acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
sub->length = length;
*return_id = sub;
- cleanup:
+cleanup:
/* On exit, we must delete the return object */
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index aa61f66ee861..13e045025c33 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -180,7 +180,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
1) * sizeof(void *));
if (!package_elements) {
- acpi_ut_remove_reference(package_desc);
+ ACPI_FREE(package_desc);
return_PTR(NULL);
}
@@ -396,7 +396,6 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
/* Mark the descriptor type */
- memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
@@ -461,25 +460,28 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
+ /* Start with the length of the (external) Acpi object */
+
+ length = sizeof(union acpi_object);
+
+ /* A NULL object is allowed, can be a legal uninitialized package element */
+
+ if (!internal_object) {
/*
- * Handle a null object (Could be a uninitialized package
- * element -- which is legal)
+ * Object is NULL, just return the length of union acpi_object
+ * (A NULL union acpi_object is an object of all zeroes.)
*/
- if (!internal_object) {
- *obj_length = sizeof(union acpi_object);
+ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
return_ACPI_STATUS(AE_OK);
}
- /* Start with the length of the Acpi object */
-
- length = sizeof(union acpi_object);
+ /* A Namespace Node should never appear here */
if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
- /* Object is a named object (reference), just return the length */
+ /* A namespace node should never get here */
- *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 835340b26d37..eb3aca761369 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -148,7 +148,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
ACPI_ERROR((AE_INFO,
"Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
- exit:
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cb7fa491decf..2c2accb9e534 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -643,7 +643,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
return (AE_OK);
- invalid_resource:
+invalid_resource:
if (walk_state) {
ACPI_ERROR((AE_INFO,
@@ -652,7 +652,7 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
}
return (AE_AML_INVALID_RESOURCE_TYPE);
- bad_resource_length:
+bad_resource_length:
if (walk_state) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a6b729d4c1dc..03c4c2febd84 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -161,7 +161,6 @@ union acpi_generic_state *acpi_ut_create_generic_state(void)
if (state) {
/* Initialize */
- memset(state, 0, sizeof(union acpi_generic_state));
state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index cb1e9cc32d5f..45c0eb26b33d 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -310,7 +310,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* All done, normal exit */
- all_done:
+all_done:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(return_value)));
@@ -318,7 +318,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
*ret_integer = return_value;
return_ACPI_STATUS(AE_OK);
- error_exit:
+error_exit:
/* Base was set/validated above */
if (base == 10) {
@@ -584,3 +584,65 @@ void ut_convert_backslashes(char *pathname)
}
}
#endif
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (ACPI_STRLEN(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCPY(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((ACPI_STRLEN(dest) + ACPI_STRLEN(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRCAT(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length =
+ ACPI_MIN(max_transfer_length, ACPI_STRLEN(source));
+
+ if ((ACPI_STRLEN(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ ACPI_STRNCAT(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 160f13f4aab5..c0027773cccb 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -130,10 +130,23 @@ void *acpi_ut_allocate_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header));
if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_WARNING((module, line,
+ "Could not allocate size %u", (u32)size));
+
return (NULL);
}
@@ -179,9 +192,17 @@ void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
struct acpi_debug_mem_block *allocation;
acpi_status status;
+ /* Check for an inadvertent size of zero bytes */
+
+ if (!size) {
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
+ size = 1;
+ }
+
allocation =
- acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
+ acpi_os_allocate_zeroed(size +
+ sizeof(struct acpi_debug_mem_header));
if (!allocation) {
/* Report allocation error */
@@ -409,7 +430,7 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
element->next = allocation;
}
- unlock_and_exit:
+unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 03a211e6e26a..be322c83643a 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdebug.h"
@@ -60,7 +61,7 @@ ACPI_MODULE_NAME("utxface")
* DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
-acpi_status acpi_terminate(void)
+acpi_status __init acpi_terminate(void)
{
acpi_status status;
@@ -104,7 +105,7 @@ acpi_status acpi_terminate(void)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_terminate)
+ACPI_EXPORT_SYMBOL_INIT(acpi_terminate)
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
@@ -207,6 +208,44 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
ACPI_EXPORT_SYMBOL(acpi_get_system_info)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_statistics
+ *
+ * PARAMETERS: stats - Where the statistics are returned
+ *
+ * RETURN: status - the status of the call
+ *
+ * DESCRIPTION: Get the contents of the various system counters
+ *
+ ******************************************************************************/
+acpi_status acpi_get_statistics(struct acpi_statistics *stats)
+{
+ ACPI_FUNCTION_TRACE(acpi_get_statistics);
+
+ /* Parameter validation */
+
+ if (!stats) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Various interrupt-based event counters */
+
+ stats->sci_count = acpi_sci_count;
+ stats->gpe_count = acpi_gpe_count;
+
+ ACPI_MEMCPY(stats->fixed_event_count, acpi_fixed_event_count,
+ sizeof(acpi_fixed_event_count));
+
+ /* Other counters */
+
+ stats->method_count = acpi_method_count;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_statistics)
+
/*****************************************************************************
*
* FUNCTION: acpi_install_initialization_handler
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index e966a2e47b76..f7edb88f6054 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 41ebaaf8bb1a..75efea0539c1 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -41,7 +41,8 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/export.h>
+#define EXPORT_ACPI_INTERFACES
+
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -64,7 +65,7 @@ ACPI_MODULE_NAME("utxfinit")
* called, so any early initialization belongs here.
*
******************************************************************************/
-acpi_status acpi_initialize_subsystem(void)
+acpi_status __init acpi_initialize_subsystem(void)
{
acpi_status status;
@@ -124,7 +125,8 @@ acpi_status acpi_initialize_subsystem(void)
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem)
/*******************************************************************************
*
@@ -138,7 +140,7 @@ ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
+acpi_status __init acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
@@ -228,7 +230,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem)
/*******************************************************************************
*
@@ -242,7 +245,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
+acpi_status __init acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
@@ -314,4 +317,5 @@ acpi_status acpi_initialize_objects(u32 flags)
acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
return_ACPI_STATUS(status);
}
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c9958cd7a43..fbf1aceda8b8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,12 +36,6 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/power_supply.h>
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
- info_tag = 0,
- state_tag,
- alarm_tag,
- ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
- "mA" : "mW";
-}
-#endif
-
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -741,279 +714,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* --------------------------------------------------------------------------
- FS Interface (/proc)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
- if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design capacity: unknown\n");
- else
- seq_printf(seq, "design capacity: %d %sh\n",
- battery->design_capacity,
- acpi_battery_units(battery));
-
- if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "last full capacity: unknown\n");
- else
- seq_printf(seq, "last full capacity: %d %sh\n",
- battery->full_charge_capacity,
- acpi_battery_units(battery));
-
- seq_printf(seq, "battery technology: %srechargeable\n",
- (!battery->technology)?"non-":"");
-
- if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "design voltage: unknown\n");
- else
- seq_printf(seq, "design voltage: %d mV\n",
- battery->design_voltage);
- seq_printf(seq, "design capacity warning: %d %sh\n",
- battery->design_capacity_warning,
- acpi_battery_units(battery));
- seq_printf(seq, "design capacity low: %d %sh\n",
- battery->design_capacity_low,
- acpi_battery_units(battery));
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: %d %sh\n",
- battery->capacity_granularity_1,
- acpi_battery_units(battery));
- seq_printf(seq, "capacity granularity 2: %d %sh\n",
- battery->capacity_granularity_2,
- acpi_battery_units(battery));
- seq_printf(seq, "model number: %s\n", battery->model_number);
- seq_printf(seq, "serial number: %s\n", battery->serial_number);
- seq_printf(seq, "battery type: %s\n", battery->type);
- seq_printf(seq, "OEM info: %s\n", battery->oem_info);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery info\n");
- return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- seq_printf(seq, "present: %s\n",
- acpi_battery_present(battery) ? "yes" : "no");
- if (!acpi_battery_present(battery))
- goto end;
-
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x04) ? "critical" : "ok");
- if ((battery->state & 0x01) && (battery->state & 0x02))
- seq_printf(seq,
- "charging state: charging/discharging\n");
- else if (battery->state & 0x01)
- seq_printf(seq, "charging state: discharging\n");
- else if (battery->state & 0x02)
- seq_printf(seq, "charging state: charging\n");
- else
- seq_printf(seq, "charging state: charged\n");
-
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present rate: unknown\n");
- else
- seq_printf(seq, "present rate: %d %s\n",
- battery->rate_now, acpi_battery_units(battery));
-
- if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "remaining capacity: unknown\n");
- else
- seq_printf(seq, "remaining capacity: %d %sh\n",
- battery->capacity_now, acpi_battery_units(battery));
- if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
- seq_printf(seq, "present voltage: unknown\n");
- else
- seq_printf(seq, "present voltage: %d mV\n",
- battery->voltage_now);
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery state\n");
-
- return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
- struct acpi_battery *battery = seq->private;
-
- if (result)
- goto end;
-
- if (!acpi_battery_present(battery)) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
- seq_printf(seq, "alarm: ");
- if (!battery->alarm)
- seq_printf(seq, "unsupported\n");
- else
- seq_printf(seq, "%u %sh\n", battery->alarm,
- acpi_battery_units(battery));
- end:
- if (result)
- seq_printf(seq, "ERROR: Unable to read battery alarm\n");
- return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
- const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[12] = { '\0' };
- struct seq_file *m = file->private_data;
- struct acpi_battery *battery = m->private;
-
- if (!battery || (count > sizeof(alarm_string) - 1))
- return -EINVAL;
- if (!acpi_battery_present(battery)) {
- result = -ENODEV;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = '\0';
- battery->alarm = simple_strtol(alarm_string, NULL, 0);
- result = acpi_battery_set_alarm(battery);
- end:
- if (!result)
- return count;
- return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
- acpi_battery_print_info,
- acpi_battery_print_state,
- acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
- struct acpi_battery *battery = seq->private;
- int result = acpi_battery_update(battery);
- return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
- return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IRUGO, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-#define FILE_DESCRIPTION_RW(_name) \
- { \
- .name = __stringify(_name), \
- .mode = S_IFREG | S_IRUGO | S_IWUSR, \
- .ops = { \
- .open = acpi_battery_##_name##_open_fs, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .write = acpi_battery_write_##_name, \
- .release = single_release, \
- .owner = THIS_MODULE, \
- }, \
- }
-
-static const struct battery_file {
- struct file_operations ops;
- umode_t mode;
- const char *name;
-} acpi_battery_file[] = {
- FILE_DESCRIPTION_RO(info),
- FILE_DESCRIPTION_RO(state),
- FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
- struct proc_dir_entry *entry = NULL;
- int i;
-
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!acpi_device_dir(device)) {
- acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
- acpi_battery_dir);
- if (!acpi_device_dir(device))
- return -ENODEV;
- }
-
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
- entry = proc_create_data(acpi_battery_file[i].name,
- acpi_battery_file[i].mode,
- acpi_device_dir(device),
- &acpi_battery_file[i].ops,
- acpi_driver_data(device));
- if (!entry)
- return -ENODEV;
- }
- return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
- int i;
- if (!acpi_device_dir(device))
- return;
- for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
- remove_proc_entry(acpi_battery_file[i].name,
- acpi_device_dir(device));
-
- remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
- acpi_device_dir(device) = NULL;
-}
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device)
result = acpi_battery_update(battery);
if (result)
goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_battery_add_fs(device);
-#endif
- if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
- goto fail;
- }
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device)
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_remove_fs(device);
-#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir)
- return;
-#endif
- if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
- return;
- }
- return;
+ acpi_bus_register_driver(&acpi_battery_driver);
}
static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9515f18898b2..fb848378d582 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -274,6 +274,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba NB100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+ },
+ },
+
+ /*
+ * The following machines have broken backlight support when reporting
+ * the Windows 2012 OSI, so disable it until their support is fixed.
+ */
+ {
.callback = dmi_disable_osi_win8,
.ident = "ASUS Zenbook Prime UX31A",
.matches = {
@@ -291,12 +304,60 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Lenovo ThinkPad Edge E530",
+ .ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Acer Aspire V5-572G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T431s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ThinkPad T430",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a55773801c5f..c971929d75c2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device)
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_POWER, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_POWER);
break;
case ACPI_BUTTON_TYPE_SLEEP:
- input->evbit[0] = BIT_MASK(EV_KEY);
- set_bit(KEY_SLEEP, input->keybit);
+ input_set_capability(input, EV_KEY, KEY_SLEEP);
break;
case ACPI_BUTTON_TYPE_LID:
- input->evbit[0] = BIT_MASK(EV_SW);
- set_bit(SW_LID, input->swbit);
+ input_set_capability(input, EV_SW, SW_LID);
break;
}
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644
index 6c9ee68e46fb..000000000000
--- a/drivers/acpi/cm_sbs.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_BATTERY_CLASS "battery"
-#define _COMPONENT ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_ac_dir)
- acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
- if (acpi_ac_dir) {
- lock_ac_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_AC_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_ac_dir_param)
- lock_ac_dir_cnt--;
- if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
- remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
- acpi_ac_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
- mutex_lock(&cm_sbs_mutex);
- if (!acpi_battery_dir) {
- acpi_battery_dir =
- proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
- }
- if (acpi_battery_dir) {
- lock_battery_dir_cnt++;
- } else {
- printk(KERN_ERR PREFIX
- "Cannot create %s\n", ACPI_BATTERY_CLASS);
- }
- mutex_unlock(&cm_sbs_mutex);
- return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
- mutex_lock(&cm_sbs_mutex);
- if (acpi_battery_dir_param)
- lock_battery_dir_cnt--;
- if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
- && acpi_battery_dir) {
- remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
- acpi_battery_dir = NULL;
- }
- mutex_unlock(&cm_sbs_mutex);
- return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a94383d1f350..d42b2fb5a7e9 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -118,9 +118,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
- * to be in D0 too.
+ * to be in D0 too, except if ignore_parent is set.
*/
- if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+ if (!device->power.flags.ignore_parent && device->parent
+ && device->parent->power.state == ACPI_STATE_UNKNOWN
&& result == ACPI_STATE_D0)
device->parent->power.state = ACPI_STATE_D0;
@@ -177,7 +178,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
acpi_power_state_string(state));
return -ENODEV;
}
- if (device->parent && (state < device->parent->power.state)) {
+ if (!device->power.flags.ignore_parent &&
+ device->parent && (state < device->parent->power.state)) {
dev_warn(&device->dev,
"Cannot transition to power state %s for parent in %s\n",
acpi_power_state_string(state),
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 05ea4be01a83..dcd73ccb514c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock)
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned long long value;
acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
- status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+ status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
status);
-
- kfree(buffer.pointer);
}
static inline void dock(struct dock_station *ds)
@@ -671,39 +669,20 @@ static void dock_notify(struct dock_station *ds, u32 event)
}
}
-struct dock_data {
- struct dock_station *ds;
- u32 event;
-};
-
-static void acpi_dock_deferred_cb(void *context)
+static void acpi_dock_deferred_cb(void *data, u32 event)
{
- struct dock_data *data = context;
-
acpi_scan_lock_acquire();
- dock_notify(data->ds, data->event);
+ dock_notify(data, event);
acpi_scan_lock_release();
- kfree(data);
}
static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct dock_data *dd;
-
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return;
- dd = kmalloc(sizeof(*dd), GFP_KERNEL);
- if (dd) {
- acpi_status status;
-
- dd->ds = data;
- dd->event = event;
- status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
- if (ACPI_FAILURE(status))
- kfree(dd);
- }
+ acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
}
/**
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a06d98374705..d5309fd49458 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -28,6 +28,7 @@
/* Uncomment next line to get verbose printout */
/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -49,9 +50,6 @@
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
-#undef PREFIX
-#define PREFIX "ACPI: EC: "
-
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
{
u8 x = inb(ec->command_addr);
- pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+ pr_debug("---> status = 0x%2.2x\n", x);
return x;
}
static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
- pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+ pr_debug("---> data = 0x%2.2x\n", x);
return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
{
- pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+ pr_debug("<--- command = 0x%2.2x\n", command);
outb(command, ec->command_addr);
}
static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
{
- pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+ pr_debug("<--- data = 0x%2.2x\n", data);
outb(data, ec->data_addr);
}
@@ -241,7 +239,7 @@ static int ec_poll(struct acpi_ec *ec)
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
- pr_debug(PREFIX "controller reset, restart transaction\n");
+ pr_debug("controller reset, restart transaction\n");
spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +307,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
}
}
if (ec_wait_ibf0(ec)) {
- pr_err(PREFIX "input buffer is not empty, "
+ pr_err("input buffer is not empty, "
"aborting transaction\n");
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +329,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ec_storm_threshold) {
- pr_info(PREFIX "GPE storm detected(%d GPEs), "
+ pr_info("GPE storm detected(%d GPEs), "
"transactions will use polling mode\n",
t->irq_count);
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
- pr_debug(PREFIX "transaction end\n");
+ pr_debug("transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -570,12 +568,12 @@ static void acpi_ec_run(void *cxt)
struct acpi_ec_query_handler *handler = cxt;
if (!handler)
return;
- pr_debug(PREFIX "start query execution\n");
+ pr_debug("start query execution\n");
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
- pr_debug(PREFIX "stop query execution\n");
+ pr_debug("stop query execution\n");
kfree(handler);
}
@@ -593,7 +591,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec)
if (!copy)
return -ENOMEM;
memcpy(copy, handler, sizeof(*copy));
- pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+ pr_debug("push query execution (0x%2x) on queue\n",
+ value);
return acpi_os_execute((copy->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
acpi_ec_run, copy);
@@ -616,7 +615,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
- pr_debug(PREFIX "push gpe query to the queue\n");
+ pr_debug("push gpe query to the queue\n");
return acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_ec_gpe_query, ec);
}
@@ -630,7 +629,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
struct acpi_ec *ec = data;
u8 status = acpi_ec_read_status(ec);
- pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+ pr_debug("~~~> interrupt, status:0x%02x\n", status);
advance_transaction(ec, status);
if (ec_transaction_done(ec) &&
@@ -776,7 +775,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
* The AE_NOT_FOUND error will be ignored and OS
* continue to initialize EC.
*/
- printk(KERN_ERR "Fail in evaluating the _REG object"
+ pr_err("Fail in evaluating the _REG object"
" of EC device. Broken bios is suspected.\n");
} else {
acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +794,10 @@ static void ec_remove_handlers(struct acpi_ec *ec)
acpi_disable_gpe(NULL, ec->gpe);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
- pr_err(PREFIX "failed to remove space handler\n");
+ pr_err("failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
- pr_err(PREFIX "failed to remove gpe handler\n");
+ pr_err("failed to remove gpe handler\n");
clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
}
@@ -840,7 +839,7 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+ pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
ret = ec_install_handlers(ec);
@@ -931,7 +930,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
/* MSI EC needs special treatment, enable it */
static int ec_flag_msi(const struct dmi_system_id *id)
{
- printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+ pr_debug("Detected MSI hardware, enabling workarounds.\n");
EC_FLAGS_MSI = 1;
EC_FLAGS_VALIDATE_ECDT = 1;
return 0;
@@ -1010,7 +1009,7 @@ int __init acpi_ec_ecdt_probe(void)
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_SUCCESS(status)) {
- pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+ pr_info("EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1029,7 @@ int __init acpi_ec_ecdt_probe(void)
/* This workaround is needed only on some broken machines,
* which require early EC, but fail to provide ECDT */
- printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+ pr_debug("Look up EC in DSDT\n");
status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
boot_ec, NULL);
/* Check that acpi_get_devices actually find something */
@@ -1042,7 +1041,7 @@ int __init acpi_ec_ecdt_probe(void)
saved_ec->data_addr != boot_ec->data_addr ||
saved_ec->gpe != boot_ec->gpe ||
saved_ec->handle != boot_ec->handle)
- pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+ pr_info("ASUSTek keeps feeding us with broken "
"ECDT tables, which are very hard to workaround. "
"Trying to use DSDT EC info instead. Please send "
"output of acpidump to linux-acpi@vger.kernel.org\n");
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 8247fcdde079..fdef416c0ff6 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -127,11 +127,6 @@ int acpi_bus_generate_netlink_event(const char *device_class,
}
event = nla_data(attr);
- if (!event) {
- nlmsg_free(skb);
- return -EINVAL;
- }
-
memset(event, 0, sizeof(struct acpi_genl_event));
strcpy(event->device_class, device_class);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 41ade6570bc0..ba3da88cee45 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- end:
+end:
return result;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 20f423337e1f..a29739c0ba79 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -26,11 +26,6 @@
acpi_status acpi_os_initialize1(void);
int init_acpi_device_notify(void);
int acpi_scan_init(void);
-#ifdef CONFIG_ACPI_PCI_SLOT
-void acpi_pci_slot_init(void);
-#else
-static inline void acpi_pci_slot_init(void) { }
-#endif
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
void acpi_pci_root_hp_init(void);
@@ -92,6 +87,7 @@ void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
int acpi_bind_one(struct device *dev, acpi_handle handle);
int acpi_unbind_one(struct device *dev);
+void acpi_bus_device_eject(void *data, u32 ost_src);
/* --------------------------------------------------------------------------
Power Resource
@@ -169,9 +165,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
Video
-------------------------------------------------------------------------- */
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2e82e5d76930..a2343a1d9e0b 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
- if (node < 0) {
+ if (node == NUMA_NO_NODE) {
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h)
int acpi_get_node(acpi_handle *handle)
{
- int pxm, node = -1;
+ int pxm, node = NUMA_NO_NODE;
pxm = acpi_get_pxm(handle);
if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e5f416c7f66e..54a20ff4b864 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -61,7 +61,6 @@ struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
- int wait;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -569,8 +568,10 @@ static const char * const table_sigs[] = {
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
void __init acpi_initrd_override(void *data, size_t size)
{
@@ -579,8 +580,6 @@ void __init acpi_initrd_override(void *data, size_t size)
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
- struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
- char *p;
if (data == NULL || size == 0)
return;
@@ -625,8 +624,8 @@ void __init acpi_initrd_override(void *data, size_t size)
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
- early_initrd_files[table_nr].data = file.data;
- early_initrd_files[table_nr].size = file.size;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
@@ -652,14 +651,34 @@ void __init acpi_initrd_override(void *data, size_t size)
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
- p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
for (no = 0; no < table_nr; no++) {
- memcpy(p + total_offset, early_initrd_files[no].data,
- early_initrd_files[no].size);
- total_offset += early_initrd_files[no].size;
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
}
- early_iounmap(p, all_tables_size);
}
#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
@@ -820,7 +839,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ msleep(ms);
}
void acpi_os_stall(u32 us)
@@ -1067,9 +1086,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
- if (dpc->wait)
- acpi_os_wait_events_complete();
-
dpc->function(dpc->context);
kfree(dpc);
}
@@ -1089,8 +1105,8 @@ static void acpi_os_execute_deferred(struct work_struct *work)
*
******************************************************************************/
-static acpi_status __acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context, int hp)
+acpi_status acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
@@ -1117,20 +1133,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
dpc->context = context;
/*
- * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
- * because the hotplug code may call driver .remove() functions,
- * which invoke flush_scheduled_work/acpi_os_wait_events_complete
- * to flush these workqueues.
- *
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
- if (hp) {
- queue = kacpi_hotplug_wq;
- dpc->wait = 1;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else if (type == OSL_NOTIFY_HANDLER) {
+ if (type == OSL_NOTIFY_HANDLER) {
queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else {
@@ -1155,28 +1162,59 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
}
return status;
}
+EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context)
+void acpi_os_wait_events_complete(void)
{
- return __acpi_os_execute(type, function, context, 0);
+ flush_workqueue(kacpid_wq);
+ flush_workqueue(kacpi_notify_wq);
}
-EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
- void *context)
+struct acpi_hp_work {
+ struct work_struct work;
+ acpi_hp_callback func;
+ void *data;
+ u32 src;
+};
+
+static void acpi_hotplug_work_fn(struct work_struct *work)
{
- return __acpi_os_execute(0, function, context, 1);
+ struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
+
+ acpi_os_wait_events_complete();
+ hpw->func(hpw->data, hpw->src);
+ kfree(hpw);
}
-EXPORT_SYMBOL(acpi_os_hotplug_execute);
-void acpi_os_wait_events_complete(void)
+acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
{
- flush_workqueue(kacpid_wq);
- flush_workqueue(kacpi_notify_wq);
+ struct acpi_hp_work *hpw;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Scheduling function [%p(%p, %u)] for deferred execution.\n",
+ func, data, src));
+
+ hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
+ if (!hpw)
+ return AE_NO_MEMORY;
+
+ INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
+ hpw->func = func;
+ hpw->data = data;
+ hpw->src = src;
+ /*
+ * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
+ * the hotplug code may call driver .remove() functions, which may
+ * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
+ * these workqueues.
+ */
+ if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
+ kfree(hpw);
+ return AE_ERROR;
+ }
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_wait_events_complete);
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
@@ -1335,7 +1373,7 @@ static int __init acpi_os_name_setup(char *str)
if (!str || !*str)
return 0;
- for (; count-- && str && *str; str++) {
+ for (; count-- && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
@@ -1825,25 +1863,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
{
__acpi_os_prepare_extended_sleep = func;
}
-
-
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
- void (*func)(struct work_struct *work))
-{
- struct acpi_hp_work *hp_work;
- int ret;
-
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
- if (!hp_work)
- return;
-
- hp_work->handle = handle;
- hp_work->type = type;
- hp_work->context = context;
-
- INIT_WORK(&hp_work->work, func);
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
- if (!ret)
- kfree(hp_work);
-}
-EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d3874f425653..417876bce854 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -39,6 +39,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/apei.h>
+#include "internal.h"
+
#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PCI_COMPONENT
@@ -590,39 +592,10 @@ static void handle_root_bridge_insertion(acpi_handle handle)
acpi_handle_err(handle, "cannot add bridge to acpi list\n");
}
-static void handle_root_bridge_removal(struct acpi_device *device)
-{
- acpi_status status;
- struct acpi_eject_event *ej_event;
-
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- /* Inform firmware the hot-remove operation has error */
- (void) acpi_evaluate_hotplug_ost(device->handle,
- ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
- return;
- }
-
- ej_event->device = device;
- ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
-
- status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
- if (ACPI_FAILURE(status))
- kfree(ej_event);
-}
-
-static void _handle_hotplug_event_root(struct work_struct *work)
+static void hotplug_event_root(void *data, u32 type)
{
+ acpi_handle handle = data;
struct acpi_pci_root *root;
- struct acpi_hp_work *hp_work;
- acpi_handle handle;
- u32 type;
-
- hp_work = container_of(work, struct acpi_hp_work, work);
- handle = hp_work->handle;
- type = hp_work->type;
acpi_scan_lock_acquire();
@@ -652,9 +625,15 @@ static void _handle_hotplug_event_root(struct work_struct *work)
/* request device eject */
acpi_handle_printk(KERN_DEBUG, handle,
"Device eject notify on %s\n", __func__);
- if (root)
- handle_root_bridge_removal(root->device);
- break;
+ if (!root)
+ break;
+
+ get_device(&root->device->dev);
+
+ acpi_scan_lock_release();
+
+ acpi_bus_device_eject(root->device, ACPI_NOTIFY_EJECT_REQUEST);
+ return;
default:
acpi_handle_warn(handle,
"notify_handler: unknown event type 0x%x\n",
@@ -663,14 +642,12 @@ static void _handle_hotplug_event_root(struct work_struct *work)
}
acpi_scan_lock_release();
- kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
}
static void handle_hotplug_event_root(acpi_handle handle, u32 type,
void *context)
{
- alloc_acpi_hp_work(handle, type, context,
- _handle_hotplug_event_root);
+ acpi_hotplug_execute(hotplug_event_root, handle, type);
}
static acpi_status __init
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 04a13784dd20..6a5b152ad4d0 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -8,289 +8,17 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
#include "sleep.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
/*
* this file provides support for:
- * /proc/acpi/alarm
* /proc/acpi/wakeup
*/
ACPI_MODULE_NAME("sleep")
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
- u32 sec, min, hr;
- u32 day, mo, yr, cent = 0;
- u32 today = 0;
- unsigned char rtc_control = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
- sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
- min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
- hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
- /* If we ever get an FACP with proper values... */
- if (acpi_gbl_FADT.day_alarm) {
- /* ACPI spec: only low 6 its should be cared */
- day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- day = bcd2bin(day);
- } else
- day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
- else {
- mo = cmos_bcd_read(RTC_MONTH, rtc_control);
- today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- }
- if (acpi_gbl_FADT.century)
- cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
- yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- /* we're trusting the FADT (see above) */
- if (!acpi_gbl_FADT.century)
- /* If we're not trusting the FADT, we should at least make it
- * right for _this_ century... ehm, what is _this_ century?
- *
- * TBD:
- * ASAP: find piece of code in the kernel, e.g. star tracker driver,
- * which we can trust to determine the century correctly. Atom
- * watch driver would be nice, too...
- *
- * if that has not happened, change for first release in 2050:
- * if (yr<50)
- * yr += 2100;
- * else
- * yr += 2000; // current line of code
- *
- * if that has not happened either, please do on 2099/12/31:23:59:59
- * s/2000/2100
- *
- */
- yr += 2000;
- else
- yr += cent * 100;
-
- /*
- * Show correct dates for alarms up to a month into the future.
- * This solves issues for nearly all situations with the common
- * 30-day alarm clocks in PC hardware.
- */
- if (day < today) {
- if (mo < 12) {
- mo += 1;
- } else {
- mo = 1;
- yr += 1;
- }
- }
-
- seq_printf(seq, "%4.4u-", yr);
- (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
- (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
- (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
- (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
- (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
- return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
- char *next = NULL;
- char *string_end = NULL;
- int result = -EINVAL;
-
- /*
- * Try to find delimeter, only to insert null. The end of the
- * string won't have one, but is still valid.
- */
- if (*p == NULL)
- return result;
-
- next = strpbrk(*p, "- :");
- if (next)
- *next++ = '\0';
-
- *value = simple_strtoul(*p, &string_end, 10);
-
- /* Signal success if we got a good digit */
- if (string_end != *p)
- result = 0;
-
- if (next)
- *p = next;
- else
- *p = NULL;
-
- return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
- u32 val = CMOS_READ(offset);
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bcd2bin(val);
- return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- val = bin2bcd(val);
- CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
- const char __user * buffer, size_t count, loff_t * ppos)
-{
- int result = 0;
- char alarm_string[30] = { '\0' };
- char *p = alarm_string;
- u32 sec, min, hr, day, mo, yr;
- int adjust = 0;
- unsigned char rtc_control = 0;
-
- if (count > sizeof(alarm_string) - 1)
- return -EINVAL;
-
- if (copy_from_user(alarm_string, buffer, count))
- return -EFAULT;
-
- alarm_string[count] = '\0';
-
- /* check for time adjustment */
- if (alarm_string[0] == '+') {
- p++;
- adjust = 1;
- }
-
- if ((result = get_date_field(&p, &yr)))
- goto end;
- if ((result = get_date_field(&p, &mo)))
- goto end;
- if ((result = get_date_field(&p, &day)))
- goto end;
- if ((result = get_date_field(&p, &hr)))
- goto end;
- if ((result = get_date_field(&p, &min)))
- goto end;
- if ((result = get_date_field(&p, &sec)))
- goto end;
-
- spin_lock_irq(&rtc_lock);
-
- rtc_control = CMOS_READ(RTC_CONTROL);
-
- if (adjust) {
- yr += cmos_bcd_read(RTC_YEAR, rtc_control);
- mo += cmos_bcd_read(RTC_MONTH, rtc_control);
- day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
- hr += cmos_bcd_read(RTC_HOURS, rtc_control);
- min += cmos_bcd_read(RTC_MINUTES, rtc_control);
- sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
- }
-
- spin_unlock_irq(&rtc_lock);
-
- if (sec > 59) {
- min += sec/60;
- sec = sec%60;
- }
- if (min > 59) {
- hr += min/60;
- min = min%60;
- }
- if (hr > 23) {
- day += hr/24;
- hr = hr%24;
- }
- if (day > 31) {
- mo += day/32;
- day = day%32;
- }
- if (mo > 12) {
- yr += mo/13;
- mo = mo%13;
- }
-
- spin_lock_irq(&rtc_lock);
- /*
- * Disable alarm interrupt before setting alarm timer or else
- * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
- */
- rtc_control &= ~RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- /* write the fields the rtc knows about */
- cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
- cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
- cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
- /*
- * If the system supports an enhanced alarm it will have non-zero
- * offsets into the CMOS RAM here -- which for some reason are pointing
- * to the RTC area of memory.
- */
- if (acpi_gbl_FADT.day_alarm)
- cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
- if (acpi_gbl_FADT.month_alarm)
- cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
- if (acpi_gbl_FADT.century) {
- if (adjust)
- yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
- cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
- }
- /* enable the rtc alarm interrupt */
- rtc_control |= RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- CMOS_READ(RTC_INTR_FLAGS);
-
- spin_unlock_irq(&rtc_lock);
-
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_enable_event(ACPI_EVENT_RTC, 0);
-
- *ppos += count;
-
- result = 0;
- end:
- return result ? result : count;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
-#ifdef HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
- .owner = THIS_MODULE,
- .open = acpi_system_alarm_open_fs,
- .read = seq_read,
- .write = acpi_system_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-
- return ACPI_INTERRUPT_HANDLED;
-}
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
int __init acpi_sleep_proc_init(void)
{
-#ifdef HAVE_ACPI_LEGACY_ALARM
- /* 'alarm' [R/W] */
- proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_alarm_fops);
-
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
- /*
- * Disable the RTC event after installing RTC handler.
- * Only when RTC alarm is set will it be enabled.
- */
- acpi_clear_event(ACPI_EVENT_RTC);
- acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif /* HAVE_ACPI_LEGACY_ALARM */
-
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_fops);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cf34d903f4fb..b3171f30b319 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -162,16 +162,23 @@ exit:
return apic_id;
}
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
{
-#ifdef CONFIG_SMP
- int i;
-#endif
- int apic_id = -1;
+ int apic_id;
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
+
+ return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
if (apic_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
#endif
return -1;
}
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+ int apic_id;
+
+ apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+ return acpi_map_cpuid(apic_id, acpi_id);
+}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
static bool __init processor_physically_present(acpi_handle handle)
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e534ba66d5b8..146ab7e2b81d 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
.notifier_call = acpi_cpu_soft_notify,
};
@@ -172,7 +171,6 @@ static int __acpi_processor_start(struct acpi_device *device)
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_load_module(pr);
#endif
acpi_processor_get_throttling_info(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c7414a545a4f..644516d9bde6 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -265,9 +265,6 @@ static void tsc_check_state(int state) { return; }
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
- if (!pr)
- return -EINVAL;
-
if (!pr->pblk)
return -ENODEV;
@@ -1050,12 +1047,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
- if (nocst) {
+ if (nocst)
return -ENODEV;
- }
if (!pr->flags.power_setup_done)
return -ENODEV;
@@ -1082,9 +1075,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (!pr)
- return -EINVAL;
-
if (nocst)
return -ENODEV;
@@ -1157,9 +1147,6 @@ int acpi_processor_power_init(struct acpi_processor *pr)
first_run++;
}
- if (!pr)
- return -EINVAL;
-
if (acpi_gbl_FADT.cst_control && !nocst) {
status =
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 51d7948611da..60a7c28fc167 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -235,28 +235,6 @@ void acpi_processor_ppc_exit(void)
acpi_processor_ppc_status &= ~PPC_REGISTERED;
}
-/*
- * Do a quick check if the systems looks like it should use ACPI
- * cpufreq. We look at a _PCT method being available, but don't
- * do a whole lot of sanity checks.
- */
-void acpi_processor_load_module(struct acpi_processor *pr)
-{
- static int requested;
- acpi_status status = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!arch_has_acpi_pdc() || requested)
- return;
- status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
- if (!ACPI_FAILURE(status)) {
- printk(KERN_INFO PREFIX "Requesting acpi_cpufreq\n");
- request_module_nowait("acpi_cpufreq");
- requested = 1;
- }
- kfree(buffer.pointer);
-}
-
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index aef7e1cd1e5d..d465ae6cdd00 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -30,12 +30,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
struct acpi_battery {
struct power_supply bat;
struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *proc_entry;
-#endif
unsigned long update_time;
char name[8];
char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@ struct acpi_sbs {
struct acpi_device *device;
struct acpi_smb_hc *hc;
struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- struct proc_dir_entry *charger_entry;
-#endif
struct acpi_battery battery[MAX_SBS_BAT];
u8 batteries_supported:4;
u8 manager_present:1;
@@ -482,261 +465,6 @@ static struct device_attribute alarm_attr = {
};
/* --------------------------------------------------------------------------
- FS Interface (/proc/acpi)
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
- struct proc_dir_entry *parent_dir,
- char *dir_name,
- const struct file_operations *info_fops,
- const struct file_operations *state_fops,
- const struct file_operations *alarm_fops, void *data)
-{
- printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
- " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
- if (!*dir) {
- *dir = proc_mkdir(dir_name, parent_dir);
- if (!*dir) {
- return -ENODEV;
- }
- }
-
- /* 'info' [R] */
- if (info_fops)
- proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
- info_fops, data);
-
- /* 'state' [R] */
- if (state_fops)
- proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
- state_fops, data);
-
- /* 'alarm' [R/W] */
- if (alarm_fops)
- proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
- alarm_fops, data);
- return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
- return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- seq_printf(seq, "design capacity: %i%sh\n",
- battery->design_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "last full capacity: %i%sh\n",
- battery->full_charge_capacity * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "battery technology: rechargeable\n");
- seq_printf(seq, "design voltage: %i mV\n",
- battery->design_voltage * acpi_battery_vscale(battery));
- seq_printf(seq, "design capacity warning: unknown\n");
- seq_printf(seq, "design capacity low: unknown\n");
- seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
- seq_printf(seq, "capacity granularity 1: unknown\n");
- seq_printf(seq, "capacity granularity 2: unknown\n");
- seq_printf(seq, "model number: %s\n", battery->device_name);
- seq_printf(seq, "serial number: %i\n",
- battery->serial_number);
- seq_printf(seq, "battery type: %s\n",
- battery->device_chemistry);
- seq_printf(seq, "OEM info: %s\n",
- battery->manufacturer_name);
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int rate;
-
- mutex_lock(&sbs->lock);
- seq_printf(seq, "present: %s\n",
- (battery->present) ? "yes" : "no");
- if (!battery->present)
- goto end;
-
- acpi_battery_get_state(battery);
- seq_printf(seq, "capacity state: %s\n",
- (battery->state & 0x0010) ? "critical" : "ok");
- seq_printf(seq, "charging state: %s\n",
- (battery->rate_now < 0) ? "discharging" :
- ((battery->rate_now > 0) ? "charging" : "charged"));
- rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
- rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
- acpi_battery_vscale(battery)/1000):1;
- seq_printf(seq, "present rate: %d%s\n", rate,
- acpi_battery_units(battery));
- seq_printf(seq, "remaining capacity: %i%sh\n",
- battery->capacity_now * acpi_battery_scale(battery),
- acpi_battery_units(battery));
- seq_printf(seq, "present voltage: %i mV\n",
- battery->voltage_now * acpi_battery_vscale(battery));
-
- end:
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- int result = 0;
-
- mutex_lock(&sbs->lock);
-
- if (!battery->present) {
- seq_printf(seq, "present: no\n");
- goto end;
- }
-
- acpi_battery_get_alarm(battery);
- seq_printf(seq, "alarm: ");
- if (battery->alarm_capacity)
- seq_printf(seq, "%i%sh\n",
- battery->alarm_capacity *
- acpi_battery_scale(battery),
- acpi_battery_units(battery));
- else
- seq_printf(seq, "disabled\n");
- end:
- mutex_unlock(&sbs->lock);
- return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct seq_file *seq = file->private_data;
- struct acpi_battery *battery = seq->private;
- struct acpi_sbs *sbs = battery->sbs;
- char alarm_string[12] = { '\0' };
- int result = 0;
- mutex_lock(&sbs->lock);
- if (!battery->present) {
- result = -ENODEV;
- goto end;
- }
- if (count > sizeof(alarm_string) - 1) {
- result = -EINVAL;
- goto end;
- }
- if (copy_from_user(alarm_string, buffer, count)) {
- result = -EFAULT;
- goto end;
- }
- alarm_string[count] = 0;
- battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
- acpi_battery_scale(battery);
- acpi_battery_set_alarm(battery);
- end:
- mutex_unlock(&sbs->lock);
- if (result)
- return result;
- return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
- .open = acpi_battery_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
- .open = acpi_battery_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
- .open = acpi_battery_alarm_open_fs,
- .read = seq_read,
- .write = acpi_battery_write_alarm,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
- struct acpi_sbs *sbs = seq->private;
-
- mutex_lock(&sbs->lock);
-
- seq_printf(seq, "state: %s\n",
- sbs->charger_present ? "on-line" : "off-line");
-
- mutex_unlock(&sbs->lock);
- return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
- .open = acpi_ac_state_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-#endif
-
-/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_battery_read(struct acpi_battery *battery)
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
return result;
sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
- battery->name, &acpi_battery_info_fops,
- &acpi_battery_state_fops, &acpi_battery_alarm_fops,
- battery);
-#endif
battery->bat.name = battery->name;
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
}
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(battery->proc_entry);
- battery->proc_entry = NULL;
-#endif
}
static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
result = acpi_ac_get_present(sbs);
if (result)
goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
- ACPI_AC_DIR_NAME, NULL,
- &acpi_ac_state_fops, NULL, sbs);
- if (result)
- goto end;
-#endif
+
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
{
if (sbs->charger.dev)
power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
- proc_remove(sbs->charger_entry);
- sbs->charger_entry = NULL;
-#endif
}
static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device)
return 0;
}
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
- if (acpi_ac_dir) {
- acpi_unlock_ac_dir(acpi_ac_dir);
- acpi_ac_dir = NULL;
- }
- if (acpi_battery_dir) {
- acpi_unlock_battery_dir(acpi_battery_dir);
- acpi_battery_dir = NULL;
- }
-#endif
-}
-
#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void)
if (acpi_disabled)
return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
- acpi_ac_dir = acpi_lock_ac_dir();
- if (!acpi_ac_dir)
- return -ENODEV;
- acpi_battery_dir = acpi_lock_battery_dir();
- if (!acpi_battery_dir) {
- acpi_sbs_rmdirs();
- return -ENODEV;
- }
-#endif
+
result = acpi_bus_register_driver(&acpi_sbs_driver);
- if (result < 0) {
- acpi_sbs_rmdirs();
+ if (result < 0)
return -ENODEV;
- }
+
return 0;
}
static void __exit acpi_sbs_exit(void)
{
acpi_bus_unregister_driver(&acpi_sbs_driver);
- acpi_sbs_rmdirs();
return;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fee8a297c7d9..55f9dedbbf9f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -125,8 +125,8 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
- void *data, void **ret_p)
+static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
+ void **ret_p)
{
struct acpi_device *device = NULL;
struct acpi_device_physical_node *pn;
@@ -136,6 +136,11 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
if (acpi_bus_get_device(handle, &device))
return AE_OK;
+ if (device->handler && !device->handler->hotplug.enabled) {
+ *ret_p = &device->dev;
+ return AE_SUPPORT;
+ }
+
mutex_lock(&device->physical_node_lock);
list_for_each_entry(pn, &device->physical_node_list, node) {
@@ -168,8 +173,8 @@ static acpi_status acpi_bus_offline_companions(acpi_handle handle, u32 lvl,
return status;
}
-static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
- void *data, void **ret_p)
+static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
+ void **ret_p)
{
struct acpi_device *device = NULL;
struct acpi_device_physical_node *pn;
@@ -214,26 +219,32 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
* If the first pass is successful, the second one isn't needed, though.
*/
errdev = NULL;
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- NULL, acpi_bus_offline_companions,
- (void *)false, (void **)&errdev);
- acpi_bus_offline_companions(handle, 0, (void *)false, (void **)&errdev);
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ NULL, acpi_bus_offline, (void *)false,
+ (void **)&errdev);
+ if (status == AE_SUPPORT) {
+ dev_warn(errdev, "Offline disabled.\n");
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_online, NULL, NULL, NULL);
+ put_device(&device->dev);
+ return -EPERM;
+ }
+ acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
if (errdev) {
errdev = NULL;
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- NULL, acpi_bus_offline_companions,
- (void *)true , (void **)&errdev);
+ NULL, acpi_bus_offline, (void *)true,
+ (void **)&errdev);
if (!errdev || acpi_force_hot_remove)
- acpi_bus_offline_companions(handle, 0, (void *)true,
- (void **)&errdev);
+ acpi_bus_offline(handle, 0, (void *)true,
+ (void **)&errdev);
if (errdev && !acpi_force_hot_remove) {
dev_warn(errdev, "Offline failed.\n");
- acpi_bus_online_companions(handle, 0, NULL, NULL);
+ acpi_bus_online(handle, 0, NULL, NULL);
acpi_walk_namespace(ACPI_TYPE_ANY, handle,
- ACPI_UINT32_MAX,
- acpi_bus_online_companions, NULL,
- NULL, NULL);
+ ACPI_UINT32_MAX, acpi_bus_online,
+ NULL, NULL, NULL);
put_device(&device->dev);
return -EBUSY;
}
@@ -274,10 +285,10 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-static void acpi_bus_device_eject(void *context)
+void acpi_bus_device_eject(void *data, u32 ost_src)
{
- acpi_handle handle = context;
- struct acpi_device *device = NULL;
+ struct acpi_device *device = data;
+ acpi_handle handle = device->handle;
struct acpi_scan_handler *handler;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
@@ -285,38 +296,41 @@ static void acpi_bus_device_eject(void *context)
lock_device_hotplug();
mutex_lock(&acpi_scan_lock);
- acpi_bus_get_device(handle, &device);
- if (!device)
- goto err_out;
-
handler = device->handler;
if (!handler || !handler->hotplug.enabled) {
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- goto err_out;
+ put_device(&device->dev);
+ goto err_support;
}
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
- ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+
+ if (ost_src == ACPI_NOTIFY_EJECT_REQUEST)
+ acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+ ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+
if (handler->hotplug.mode == AHM_CONTAINER)
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- get_device(&device->dev);
error = acpi_scan_hot_remove(device);
- if (error)
+ if (error == -EPERM) {
+ goto err_support;
+ } else if (error) {
goto err_out;
+ }
out:
mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug();
return;
+ err_support:
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
err_out:
- acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
- NULL);
+ acpi_evaluate_hotplug_ost(handle, ost_src, ost_code, NULL);
goto out;
}
-static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+static void acpi_scan_bus_device_check(void *data, u32 ost_source)
{
+ acpi_handle handle = data;
struct acpi_device *device = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
int error;
@@ -331,8 +345,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
goto out;
}
}
- acpi_evaluate_hotplug_ost(handle, ost_source,
- ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
error = acpi_bus_scan(handle);
if (error) {
acpi_handle_warn(handle, "Namespace scan failure\n");
@@ -353,18 +365,6 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
unlock_device_hotplug();
}
-static void acpi_scan_bus_check(void *context)
-{
- acpi_scan_bus_device_check((acpi_handle)context,
- ACPI_NOTIFY_BUS_CHECK);
-}
-
-static void acpi_scan_device_check(void *context)
-{
- acpi_scan_bus_device_check((acpi_handle)context,
- ACPI_NOTIFY_DEVICE_CHECK);
-}
-
static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
{
u32 ost_status;
@@ -395,8 +395,8 @@ static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
- acpi_osd_exec_callback callback;
struct acpi_scan_handler *handler = data;
+ struct acpi_device *adev;
acpi_status status;
if (!handler->hotplug.enabled)
@@ -405,56 +405,35 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
- callback = acpi_scan_bus_check;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
- callback = acpi_scan_device_check;
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- callback = acpi_bus_device_eject;
- break;
+ status = acpi_bus_get_device(handle, &adev);
+ if (ACPI_FAILURE(status))
+ goto err_out;
+
+ get_device(&adev->dev);
+ status = acpi_hotplug_execute(acpi_bus_device_eject, adev, type);
+ if (ACPI_SUCCESS(status))
+ return;
+
+ put_device(&adev->dev);
+ goto err_out;
default:
/* non-hotplug event; possibly handled by other handler */
return;
}
- status = acpi_os_hotplug_execute(callback, handle);
- if (ACPI_FAILURE(status))
- acpi_evaluate_hotplug_ost(handle, type,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
-}
-
-/**
- * acpi_bus_hot_remove_device: hot-remove a device and its children
- * @context: struct acpi_eject_event pointer (freed in this func)
- *
- * Hot-remove a device and its children. This function frees up the
- * memory space passed by arg context, so that the caller may call
- * this function asynchronously through acpi_os_hotplug_execute().
- */
-void acpi_bus_hot_remove_device(void *context)
-{
- struct acpi_eject_event *ej_event = context;
- struct acpi_device *device = ej_event->device;
- acpi_handle handle = device->handle;
- int error;
-
- lock_device_hotplug();
- mutex_lock(&acpi_scan_lock);
-
- error = acpi_scan_hot_remove(device);
- if (error && handle)
- acpi_evaluate_hotplug_ost(handle, ej_event->event,
- ACPI_OST_SC_NON_SPECIFIC_FAILURE,
- NULL);
+ status = acpi_hotplug_execute(acpi_scan_bus_device_check, handle, type);
+ if (ACPI_SUCCESS(status))
+ return;
- mutex_unlock(&acpi_scan_lock);
- unlock_device_hotplug();
- kfree(context);
+ err_out:
+ acpi_evaluate_hotplug_ost(handle, type,
+ ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
-EXPORT_SYMBOL(acpi_bus_hot_remove_device);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -487,10 +466,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
- struct acpi_eject_event *ej_event;
acpi_object_type not_used;
acpi_status status;
- int ret;
if (!count || buf[0] != '1')
return -EINVAL;
@@ -503,28 +480,18 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
- ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
- if (!ej_event) {
- ret = -ENOMEM;
- goto err_out;
- }
acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- ej_event->device = acpi_device;
- ej_event->event = ACPI_OST_EC_OSPM_EJECT;
get_device(&acpi_device->dev);
- status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+ status = acpi_hotplug_execute(acpi_bus_device_eject, acpi_device,
+ ACPI_OST_EC_OSPM_EJECT);
if (ACPI_SUCCESS(status))
return count;
put_device(&acpi_device->dev);
- kfree(ej_event);
- ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-
- err_out:
acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
- return ret;
+ return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -1676,7 +1643,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
void acpi_device_add_finalize(struct acpi_device *device)
{
- device->flags.match_driver = true;
dev_set_uevent_suppress(&device->dev, false);
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
}
@@ -1915,8 +1881,12 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
ret = acpi_scan_attach_handler(device);
- if (ret)
- return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
+ if (ret < 0)
+ return AE_CTRL_DEPTH;
+
+ device->flags.match_driver = true;
+ if (ret > 0)
+ return AE_OK;
ret = device_attach(&device->dev);
return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 05306a59aedc..db5293650f62 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj,
acpi_event_status status;
acpi_handle handle;
int result = 0;
+ unsigned long tmp;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj,
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
+ else if (!kstrtoul(buf, 0, &tmp))
+ all_counters[index].count = tmp;
else
- all_counters[index].count = strtoul(buf, NULL, 0);
+ result = -EINVAL;
} else
all_counters[index].count = strtoul(buf, NULL, 0);
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
if (!hotplug_kobj)
goto err_out;
- kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
- error = kobject_set_name(&hotplug->kobj, "%s", name);
- if (error)
- goto err_out;
-
- hotplug->kobj.parent = hotplug_kobj;
- error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+ error = kobject_init_and_add(&hotplug->kobj,
+ &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
if (error)
goto err_out;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6a0329340b42..e600b5dbfcb6 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No critical threshold\n"));
} else if (tmp <= 2732) {
- printk(KERN_WARNING FW_BUG "Invalid critical threshold "
- "(%llu)\n", tmp);
+ pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+ tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* Allow override critical threshold
*/
if (crt_k > tz->trips.critical.temperature)
- printk(KERN_WARNING PREFIX
- "Critical threshold %d C\n", crt);
+ pr_warn(PREFIX "Critical threshold %d C\n",
+ crt);
tz->trips.critical.temperature = crt_k;
}
}
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid passive threshold\n");
+ pr_warn(PREFIX "Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
}
else
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING PREFIX
- "Invalid active%d threshold\n", i);
+ pr_warn(PREFIX "Invalid active%d threshold\n",
+ i);
tz->trips.active[i].flags.valid = 0;
}
else
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
valid |= tz->trips.active[i].flags.valid;
if (!valid) {
- printk(KERN_WARNING FW_BUG "No valid trip found\n");
+ pr_warn(FW_BUG "No valid trip found\n");
return -ENODEV;
}
return 0;
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
acpi_bus_private_data_handler,
tz->thermal_zone);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Error attaching device data\n");
+ pr_err(PREFIX "Error attaching device data\n");
return -ENODEV;
}
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
- acpi_device_name(device), acpi_device_bid(device),
- KELVIN_TO_CELSIUS(tz->temperature));
+ pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+ acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
goto end;
free_memory:
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev)
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all active thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all active thermal trip points\n", d->ident);
act = -1;
}
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all critical thermal trip point actions.\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all critical thermal trip point actions.\n", d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
if (tzp == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "enabling thermal zone polling\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "enabling thermal zone polling\n", d->ident);
tzp = 300; /* 300 dS = 30 Seconds */
}
return 0;
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
static int thermal_psv(const struct dmi_system_id *d) {
if (psv == 0) {
- printk(KERN_NOTICE "ACPI: %s detected: "
- "disabling all passive thermal trip points\n", d->ident);
+ pr_notice(PREFIX "%s detected: "
+ "disabling all passive thermal trip points\n", d->ident);
psv = -1;
}
return 0;
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void)
dmi_check_system(thermal_dmi_table);
if (off) {
- printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+ pr_notice(PREFIX "thermal control disabled\n");
return -ENODEV;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 552248b0005b..6d408bfbbb1d 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -121,7 +121,7 @@ acpi_extract_package(union acpi_object *package,
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
- " [%d]: got number, expecing"
+ " [%d]: got number, expecting"
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
@@ -148,7 +148,7 @@ acpi_extract_package(union acpi_object *package,
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d] got string/buffer,"
- " expecing [%c]\n",
+ " expecting [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package,
/*
* Validate output buffer.
*/
- if (buffer->length < size_required) {
+ if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+ buffer->pointer = ACPI_ALLOCATE(size_required);
+ if (!buffer->pointer)
+ return AE_NO_MEMORY;
buffer->length = size_required;
- return AE_BUFFER_OVERFLOW;
- } else if (buffer->length != size_required || !buffer->pointer) {
- return AE_BAD_PARAMETER;
+ memset(buffer->pointer, 0, size_required);
+ } else {
+ if (buffer->length < size_required) {
+ buffer->length = size_required;
+ return AE_BUFFER_OVERFLOW;
+ } else if (buffer->length != size_required ||
+ !buffer->pointer) {
+ return AE_BAD_PARAMETER;
+ }
}
head = buffer->pointer;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index aebcf6355df4..18dbdff4656e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,16 @@ module_param(allow_duplicates, bool, 0644);
static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
+/*
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
+ */
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
+
static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +166,7 @@ struct acpi_video_bus {
struct acpi_video_bus_flags flags;
struct list_head video_device_list;
struct mutex device_list_lock; /* protects video_device_list */
+ struct list_head entry;
struct input_dev *input;
char phys[32]; /* for input device */
struct notifier_block pm_nb;
@@ -229,6 +239,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_verify_backlight_support(void)
+{
+ if (acpi_osi_is_win8() && use_native_backlight &&
+ backlight_device_registered(BACKLIGHT_RAW))
+ return false;
+ return acpi_video_backlight_support();
+}
+
/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
{
@@ -830,9 +848,9 @@ acpi_video_init_brightness(struct acpi_video_device *device)
* or an index). Set the backlight to max_level in this case.
*/
for (i = 2; i < br->count; i++)
- if (level_old == br->levels[i])
+ if (level == br->levels[i])
break;
- if (i == br->count)
+ if (i == br->count || !level)
level = max_level;
}
@@ -884,79 +902,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_has_method(device->dev->handle, "_DDC"))
device->cap._DDC = 1;
-
- if (acpi_video_backlight_support()) {
- struct backlight_properties props;
- struct pci_dev *pdev;
- acpi_handle acpi_parent;
- struct device *parent = NULL;
- int result;
- static int count;
- char *name;
-
- result = acpi_video_init_brightness(device);
- if (result)
- return;
- name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
- if (!name)
- return;
- count++;
-
- acpi_get_parent(device->dev->handle, &acpi_parent);
-
- pdev = acpi_get_pci_dev(acpi_parent);
- if (pdev) {
- parent = &pdev->dev;
- pci_dev_put(pdev);
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_FIRMWARE;
- props.max_brightness = device->brightness->count - 3;
- device->backlight = backlight_device_register(name,
- parent,
- device,
- &acpi_backlight_ops,
- &props);
- kfree(name);
- if (IS_ERR(device->backlight))
- return;
-
- /*
- * Save current brightness level in case we have to restore it
- * before acpi_video_device_lcd_set_level() is called next time.
- */
- device->backlight->props.brightness =
- acpi_video_get_brightness(device->backlight);
-
- device->cooling_dev = thermal_cooling_device_register("LCD",
- device->dev, &video_cooling_ops);
- if (IS_ERR(device->cooling_dev)) {
- /*
- * Set cooling_dev to NULL so we don't crash trying to
- * free it.
- * Also, why the hell we are returning early and
- * not attempt to register video output if cooling
- * device registration failed?
- * -- dtor
- */
- device->cooling_dev = NULL;
- return;
- }
-
- dev_info(&device->dev->dev, "registered as cooling_device%d\n",
- device->cooling_dev->id);
- result = sysfs_create_link(&device->dev->dev.kobj,
- &device->cooling_dev->device.kobj,
- "thermal_cooling");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- result = sysfs_create_link(&device->cooling_dev->device.kobj,
- &device->dev->dev.kobj, "device");
- if (result)
- printk(KERN_ERR PREFIX "Create sysfs link\n");
-
- }
}
/*
@@ -1143,13 +1088,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
- status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
- acpi_video_device_notify, data);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev, "Error installing notify handler\n");
- else
- data->flags.notify = 1;
-
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1271,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
- /* no warning message if acpi_backlight=vendor is used */
- if (!acpi_video_backlight_support())
+ /* no warning message if acpi_backlight=vendor or a quirk is used */
+ if (!acpi_video_verify_backlight_support())
return 0;
if (!device->brightness)
@@ -1454,64 +1392,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
return status;
}
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
- acpi_status status;
-
- if (!device || !device->video)
- return -ENOENT;
-
- if (device->flags.notify) {
- status = acpi_remove_notify_handler(device->dev->handle,
- ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
- if (ACPI_FAILURE(status))
- dev_err(&device->dev->dev,
- "Can't remove video notify handler\n");
- }
-
- if (device->backlight) {
- backlight_device_unregister(device->backlight);
- device->backlight = NULL;
- }
- if (device->cooling_dev) {
- sysfs_remove_link(&device->dev->dev.kobj,
- "thermal_cooling");
- sysfs_remove_link(&device->cooling_dev->device.kobj,
- "device");
- thermal_cooling_device_unregister(device->cooling_dev);
- device->cooling_dev = NULL;
- }
-
- return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
- int status;
- struct acpi_video_device *dev, *next;
-
- mutex_lock(&video->device_list_lock);
-
- list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
- status = acpi_video_bus_put_one_device(dev);
- if (ACPI_FAILURE(status))
- printk(KERN_WARNING PREFIX
- "hhuuhhuu bug in acpi video driver.\n");
-
- if (dev->brightness) {
- kfree(dev->brightness->levels);
- kfree(dev->brightness);
- }
- list_del(&dev->entry);
- kfree(dev);
- }
-
- mutex_unlock(&video->device_list_lock);
-
- return 0;
-}
-
/* acpi_video interface */
/*
@@ -1521,13 +1401,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 1 : 0);
+ acpi_osi_is_win8() ? 1 : 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
{
return acpi_video_bus_DOS(video, 0,
- acpi_video_backlight_quirks() ? 0 : 1);
+ acpi_osi_is_win8() ? 0 : 1);
}
static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1416,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
struct input_dev *input;
int keycode = 0;
- if (!video)
+ if (!video || !video->input)
return;
input = video->input;
@@ -1691,12 +1571,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
return AE_OK;
}
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+ if (acpi_video_verify_backlight_support()) {
+ struct backlight_properties props;
+ struct pci_dev *pdev;
+ acpi_handle acpi_parent;
+ struct device *parent = NULL;
+ int result;
+ static int count;
+ char *name;
+
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+ if (!name)
+ return;
+ count++;
+
+ acpi_get_parent(device->dev->handle, &acpi_parent);
+
+ pdev = acpi_get_pci_dev(acpi_parent);
+ if (pdev) {
+ parent = &pdev->dev;
+ pci_dev_put(pdev);
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_FIRMWARE;
+ props.max_brightness = device->brightness->count - 3;
+ device->backlight = backlight_device_register(name,
+ parent,
+ device,
+ &acpi_backlight_ops,
+ &props);
+ kfree(name);
+ if (IS_ERR(device->backlight))
+ return;
+
+ /*
+ * Save current brightness level in case we have to restore it
+ * before acpi_video_device_lcd_set_level() is called next time.
+ */
+ device->backlight->props.brightness =
+ acpi_video_get_brightness(device->backlight);
+
+ device->cooling_dev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (IS_ERR(device->cooling_dev)) {
+ /*
+ * Set cooling_dev to NULL so we don't crash trying to
+ * free it.
+ * Also, why the hell we are returning early and
+ * not attempt to register video output if cooling
+ * device registration failed?
+ * -- dtor
+ */
+ device->cooling_dev = NULL;
+ return;
+ }
+
+ dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+ device->cooling_dev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cooling_dev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cooling_dev->device.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ }
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_register_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ video->pm_nb.notifier_call = acpi_video_resume;
+ video->pm_nb.priority = 0;
+ return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+ if (device->backlight) {
+ backlight_device_unregister(device->backlight);
+ device->backlight = NULL;
+ }
+ if (device->brightness) {
+ kfree(device->brightness->levels);
+ kfree(device->brightness);
+ device->brightness = NULL;
+ }
+ if (device->cooling_dev) {
+ sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+ thermal_cooling_device_unregister(device->cooling_dev);
+ device->cooling_dev = NULL;
+ }
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+ int error = unregister_pm_notifier(&video->pm_nb);
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_unregister_backlight(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+ acpi_status status;
+ struct acpi_device *adev = device->dev;
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify, device);
+ if (ACPI_FAILURE(status))
+ dev_err(&adev->dev, "Error installing notify handler\n");
+ else
+ device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+ struct input_dev *input;
+ struct acpi_video_device *dev;
+ int error;
+
+ video->input = input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = acpi_video_bus_start_devices(video);
+ if (error)
+ goto err_free_input;
+
+ snprintf(video->phys, sizeof(video->phys),
+ "%s/video/input0", acpi_device_hid(video->device));
+
+ input->name = acpi_device_name(video->device);
+ input->phys = video->phys;
+ input->id.bustype = BUS_HOST;
+ input->id.product = 0x06;
+ input->dev.parent = &video->device->dev;
+ input->evbit[0] = BIT(EV_KEY);
+ set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+ set_bit(KEY_VIDEO_NEXT, input->keybit);
+ set_bit(KEY_VIDEO_PREV, input->keybit);
+ set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+ set_bit(KEY_BRIGHTNESSUP, input->keybit);
+ set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+ set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+ set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+ error = input_register_device(input);
+ if (error)
+ goto err_stop_dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_add_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+
+err_stop_dev:
+ acpi_video_bus_stop_devices(video);
+err_free_input:
+ input_free_device(input);
+ video->input = NULL;
+out:
+ return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+ if (dev->flags.notify) {
+ acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+ acpi_video_device_notify);
+ dev->flags.notify = 0;
+ }
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry(dev, &video->video_device_list, entry)
+ acpi_video_dev_remove_notify_handler(dev);
+ mutex_unlock(&video->device_list_lock);
+
+ acpi_video_bus_stop_devices(video);
+ input_unregister_device(video->input);
+ video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+ struct acpi_video_device *dev, *next;
+
+ mutex_lock(&video->device_list_lock);
+ list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+ list_del(&dev->entry);
+ kfree(dev);
+ }
+ mutex_unlock(&video->device_list_lock);
+
+ return 0;
+}
+
static int instance;
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
- struct input_dev *input;
int error;
acpi_status status;
@@ -1748,62 +1852,24 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
- video->input = input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_put_video;
- }
-
- error = acpi_video_bus_start_devices(video);
- if (error)
- goto err_free_input_dev;
-
- snprintf(video->phys, sizeof(video->phys),
- "%s/video/input0", acpi_device_hid(video->device));
-
- input->name = acpi_device_name(video->device);
- input->phys = video->phys;
- input->id.bustype = BUS_HOST;
- input->id.product = 0x06;
- input->dev.parent = &device->dev;
- input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
- set_bit(KEY_VIDEO_NEXT, input->keybit);
- set_bit(KEY_VIDEO_PREV, input->keybit);
- set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
- set_bit(KEY_BRIGHTNESSUP, input->keybit);
- set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
- set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
- set_bit(KEY_DISPLAY_OFF, input->keybit);
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
video->flags.rom ? "yes" : "no",
video->flags.post ? "yes" : "no");
+ mutex_lock(&video_list_lock);
+ list_add_tail(&video->entry, &video_bus_head);
+ mutex_unlock(&video_list_lock);
- video->pm_nb.notifier_call = acpi_video_resume;
- video->pm_nb.priority = 0;
- error = register_pm_notifier(&video->pm_nb);
- if (error)
- goto err_stop_video;
-
- error = input_register_device(input);
- if (error)
- goto err_unregister_pm_notifier;
+ acpi_video_bus_register_backlight(video);
+ acpi_video_bus_add_notify_handler(video);
return 0;
- err_unregister_pm_notifier:
- unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
- acpi_video_bus_stop_devices(video);
- err_free_input_dev:
- input_free_device(input);
- err_put_video:
+err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
- err_free_video:
+err_free_video:
kfree(video);
device->driver_data = NULL;
@@ -1820,12 +1886,14 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
- unregister_pm_notifier(&video->pm_nb);
-
- acpi_video_bus_stop_devices(video);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
- input_unregister_device(video->input);
+ mutex_lock(&video_list_lock);
+ list_del(&video->entry);
+ mutex_unlock(&video_list_lock);
+
kfree(video->attached_array);
kfree(video);
@@ -1874,6 +1942,9 @@ int acpi_video_register(void)
return 0;
}
+ mutex_init(&video_list_lock);
+ INIT_LIST_HEAD(&video_bus_head);
+
result = acpi_bus_register_driver(&acpi_video_bus);
if (result < 0)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 940edbf2fe8f..84875fd4c74f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
{ },
};
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void)
acpi_video_get_capabilities(NULL);
}
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
{
return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
/* Promote the vendor interface instead of the generic video module.
* This function allow DMI blacklists to be implemented by externals
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 848ebbd25717..f48370dfc908 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
int from_nid, to_nid;
- int ret = -ENODEV;
-
- cpu_hotplug_driver_lock();
+ int ret;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
- goto out;
+ return -ENODEV;
ret = cpu_up(cpuid);
/*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
- out:
- cpu_hotplug_driver_unlock();
return ret;
}
static int cpu_subsys_offline(struct device *dev)
{
- int ret;
-
- cpu_hotplug_driver_lock();
- ret = cpu_down(dev->id);
- cpu_hotplug_driver_unlock();
- return ret;
+ return cpu_down(dev->id);
}
void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_probe(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_probe(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_release(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_release(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa36898916..06051767393f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev)
BUS_NOTIFY_UNBIND_DRIVER,
dev);
- pm_runtime_put(dev);
+ pm_runtime_put_sync(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a82cf04..ee039afe9078 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
return error;
}
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+ struct dpm_watchdog *wd = (void *)data;
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ init_timer_on_stack(timer);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->function = dpm_watchdog_handler;
+ timer->data = (unsigned long)wd;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
dpm_wait(dev->parent, async);
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
dpm_wait_for_children(dev, async);
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897c6043..fa4187418440 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
@@ -42,7 +42,7 @@
*/
/**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
* @node: opp list node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
@@ -59,7 +59,7 @@
*
* This structure stores the OPP information for a given device.
*/
-struct opp {
+struct dev_pm_opp {
struct list_head node;
bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
}
/**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
* Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
* Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
* This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
*/
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
{
struct device_opp *dev_opp;
- struct opp *temp_opp;
+ struct dev_pm_opp *temp_opp;
int count = 0;
dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
* @available: true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * opp_add() - Add an OPP table from a table definitions
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
struct device_opp *dev_opp = NULL;
- struct opp *opp, *new_opp;
+ struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
/* allocate new OPP node */
- new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
- struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
/* keep the node allocated */
- new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
}
/**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
*
* Enables a provided opp. If the operation is valid, this returns 0, else the
* corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
/**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to disable
*
* Disables a provided opp. If the operation is valid, this returns
* 0, else the corresponding error value. It is meant to be a temporary
* control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
* @dev: device for which we do this operation
* @table: Cpufreq table returned back to caller
*
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
* Callers should ensure that this function is *NOT* called under RCU protection
* or in contexts where mutex locking cannot be used.
*/
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
struct device_opp *dev_opp;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table;
int i = 0;
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
}
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
- (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+ (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
if (!freq_table) {
mutex_unlock(&dev_opp_list_lock);
dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
/**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
* @dev: device for which we do this operation
* @table: table to free
*
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
*/
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
struct device_opp *dev_opp = find_device_opp(dev);
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (opp_add(dev, freq, volt)) {
+ if (dev_pm_opp_add(dev, freq, volt)) {
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a35097578..72e00e66ecc5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval ? retval : rpm_suspend(dev, rpmflags);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 534fcb825153..38093e272377 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -17,15 +17,11 @@ config CPU_FREQ
if CPU_FREQ
-config CPU_FREQ_TABLE
- tristate
-
config CPU_FREQ_GOV_COMMON
bool
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
- select CPU_FREQ_TABLE
default y
help
This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
- select CPU_FREQ_TABLE
select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
- select CPU_FREQ_TABLE
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@ depends on IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@ depends on MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
- select CPU_FREQ_TABLE
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
depends on SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-III processors.
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
config SPARC_US2E_CPUFREQ
tristate "UltraSPARC-IIe CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
depends on SUPERH
config SH_CPU_FREQ
tristate "SuperH CPU Frequency driver"
- select CPU_FREQ_TABLE
help
This adds the cpufreq driver for SuperH. Any CPU that supports
clock rate rounding through the clock framework can use this
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0fa204b244bd..ce52ed949249 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,6 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
- select CPU_FREQ_TABLE
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
config ARM_EXYNOS_CPUFREQ
bool
- select CPU_FREQ_TABLE
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
default y
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
- select CPU_FREQ_TABLE
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
- select CPU_FREQ_TABLE
config ARM_S3C_CPUFREQ
bool
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for the Samsung S3C2416 and
S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,14 @@ config ARM_SPEAR_CPUFREQ
config ARM_TEGRA_CPUFREQ
bool "TEGRA CPUFreq support"
depends on ARCH_TEGRA
- select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ select ARM_BIG_LITTLE_CPUFREQ
+ depends on ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 25ca9db62e09..ca0021a96e19 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,7 +1,6 @@
config CPU_FREQ_CBE
tristate "CBE frequency scaling"
depends on CBE_RAS && PPC_CELL
- select CPU_FREQ_TABLE
default m
help
This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
config CPU_FREQ_MAPLE
bool "Support for Maple 970FX Evaluation Board"
depends on PPC_MAPLE
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
config PPC_CORENET_CPUFREQ
tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
depends on PPC_E500MC && OF && COMMON_CLK
- select CPU_FREQ_TABLE
select CLK_PPC_CORENET
help
This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple PowerBooks,
this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on PPC_PMAC && PPC64
- select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
config PPC_PASEMI_CPUFREQ
bool "Support for PA Semi PWRficient"
depends on PPC_PASEMI
- select CPU_FREQ_TABLE
default y
help
This adds the support for frequency switching on PA Semi
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index e2b6eabef221..6897ad85b046 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
- select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
config SC520_CPUFREQ
tristate "AMD Elan SC520"
- select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@ config SC520_CPUFREQ
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
config X86_POWERNOW_K7
tristate "AMD Mobile Athlon/Duron PowerNow!"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
- select CPU_FREQ_TABLE
depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,7 +126,6 @@ config X86_POWERNOW_K8
config X86_AMD_FREQ_SENSITIVITY
tristate "AMD frequency sensitivity feedback powersave bias"
depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
- select CPU_FREQ_TABLE
help
This adds AMD-specific powersave bias function to the ondemand
governor, which allows it to make more power-conscious frequency
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
config X86_SPEEDSTEP_CENTRINO
tristate "Intel Enhanced SpeedStep (deprecated)"
- select CPU_FREQ_TABLE
select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
help
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
config X86_SPEEDSTEP_ICH
tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
- select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
- select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
- select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ad5866c2ada0..74945652dd7a 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,5 +1,5 @@
# CPUfreq core
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
-
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
@@ -77,6 +74,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 506fd23c7550..caf41ebea184 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -424,34 +424,21 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
}
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
- struct cpufreq_freqs freqs;
struct drv_cmd cmd;
- unsigned int next_state = 0; /* Index into freq_table */
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
-
if (unlikely(data == NULL ||
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV;
}
perf = data->acpi_data;
- result = cpufreq_frequency_table_target(policy,
- data->freq_table,
- target_freq,
- relation, &next_state);
- if (unlikely(result)) {
- result = -ENODEV;
- goto out;
- }
-
- next_perf_state = data->freq_table[next_state].driver_data;
+ next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -492,23 +479,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
else
cmd.mask = cpumask_of(policy->cpu);
- freqs.old = perf->states[perf->state].core_frequency * 1000;
- freqs.new = data->freq_table[next_state].frequency;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
drv_write(&cmd);
if (acpi_pstate_strict) {
- if (!check_freqs(cmd.mask, freqs.new, data)) {
+ if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
- freqs.new = freqs.old;
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (!result)
perf->state = next_perf_state;
@@ -516,15 +497,6 @@ out:
return result;
}
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
- pr_debug("acpi_cpufreq_verify\n");
-
- return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
@@ -837,7 +809,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result)
goto err_freqfree;
@@ -846,12 +818,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
- /* Current speed is unknown and not detectable by IO port */
+ /*
+ * The core will not set policy->cur, because
+ * cpufreq_driver->get is NULL, so we need to set it here.
+ * However, we have to guess it, because the current speed is
+ * unknown and not detectable via IO ports.
+ */
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
- policy->cur = get_cur_freq_on_cpu(cpu);
break;
default:
break;
@@ -868,8 +844,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
@@ -929,8 +903,8 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
};
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 3549f0784af1..5519933813ea 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -24,110 +24,323 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
+#include <asm/bL_switcher.h>
#include "arm_big_little.h"
/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
-static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-static unsigned int bL_cpufreq_get(unsigned int cpu)
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
{
- u32 cur_cluster = cpu_to_cluster(cpu);
+ return topology_physical_package_id(cpu);
+}
- return clk_get_rate(clk[cur_cluster]) / 1000;
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+static unsigned int find_cluster_maxfreq(int cluster)
{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
+
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled()) {
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+ cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+ } else {
+ return clk_get_cpu_rate(cpu);
+ }
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
- return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
}
/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
- int ret = 0;
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs_new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs_new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
- cur_cluster = cpu_to_cluster(policy->cpu);
+ return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+}
- freqs.old = bL_cpufreq_get(policy->cpu);
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
- /* Determine valid target frequency using freq_table */
- cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
- target_freq, relation, &freq_tab_idx);
- freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
- pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
- __func__, cpu, cur_cluster, freqs.old, target_freq,
- freqs.new);
+ return count;
+}
- if (freqs.old == freqs.new)
- return 0;
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t min_freq = ~0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency < min_freq)
+ min_freq = table[i].frequency;
+ return min_freq;
+}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ int i;
+ uint32_t max_freq = 0;
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency > max_freq)
+ max_freq = table[i].frequency;
+ return max_freq;
+}
- ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
- if (ret) {
- pr_err("clk_set_rate failed: %d\n", ret);
- freqs.new = freqs.old;
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
- return ret;
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev);
- if (!atomic_dec_return(&cluster_usage[cluster])) {
- clk_put(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
}
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
char name[14] = "cpu-cluster.";
int ret;
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ if (freq_table[cluster])
return 0;
ret = arm_bL_ops->init_opp_table(cpu_dev);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret) {
dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
- goto atomic_dec;
+ goto out;
}
name[12] = cluster + '0';
- clk[cluster] = clk_get_sys(name, NULL);
+ clk[cluster] = clk_get(cpu_dev, name);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
@@ -138,15 +351,74 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
__func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
- opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-atomic_dec:
- atomic_dec(&cluster_usage[cluster]);
+out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ ret = _get_cluster_clk_and_freq_table(cdev);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
+}
+
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
@@ -165,7 +437,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
@@ -173,7 +445,14 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
@@ -181,9 +460,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = bL_cpufreq_get(policy->cpu);
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
@@ -200,33 +478,60 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
+ cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
}
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
- .flags = CPUFREQ_STICKY,
- .verify = bL_cpufreq_verify_policy,
- .target = bL_cpufreq_set_target,
- .get = bL_cpufreq_get,
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
- .have_governor_per_policy = true,
- .attr = bL_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
- int ret;
+ int ret, i;
if (arm_bL_ops) {
pr_debug("%s: Already registered: %s, exiting\n", __func__,
@@ -241,16 +546,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
ret = cpufreq_register_driver(&bL_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
- pr_info("%s: Registered platform driver: %s\n", __func__,
- ops->name);
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
}
+ bL_switcher_put_enabled();
return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@@ -263,7 +581,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
return;
}
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
arm_bL_ops->name);
arm_bL_ops = NULL;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 79b2ce17884d..70f18fc12d4a 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops {
int (*init_opp_table)(struct device *cpu_dev);
};
-static inline int cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 480c0bd0468d..8d9d59108906 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index e0c38d938997..856ad80418ae 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -19,18 +19,10 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/slab.h>
static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
static unsigned int at32_get_speed(unsigned int cpu)
{
@@ -43,74 +35,94 @@ static unsigned int at32_get_speed(unsigned int cpu)
static unsigned int ref_freq;
static unsigned long loops_per_jiffy_ref;
-static int at32_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- long freq;
-
- /* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
-
- /* Check if policy->min <= new_freq <= policy->max */
- if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+ unsigned int old_freq, new_freq;
- freqs.old = at32_get_speed(0);
- freqs.new = (freq + 500) / 1000;
- freqs.flags = 0;
+ old_freq = at32_get_speed(0);
+ new_freq = freq_table[index].frequency;
if (!ref_freq) {
- ref_freq = freqs.old;
+ ref_freq = old_freq;
loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- if (freqs.old < freqs.new)
+ if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- clk_set_rate(cpuclk, freq);
- if (freqs.new < freqs.old)
+ loops_per_jiffy_ref, ref_freq, new_freq);
+ clk_set_rate(cpuclk, new_freq * 1000);
+ if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, freqs.new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %lu Hz\n", freq);
+ loops_per_jiffy_ref, ref_freq, new_freq);
return 0;
}
static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
+ unsigned int frequency, rate, min_freq;
+ int retval, steps, i;
+
if (policy->cpu != 0)
return -EINVAL;
cpuclk = clk_get(NULL, "cpu");
if (IS_ERR(cpuclk)) {
pr_debug("cpufreq: could not get CPU clk\n");
- return PTR_ERR(cpuclk);
+ retval = PTR_ERR(cpuclk);
+ goto out_err;
}
- policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
policy->cpuinfo.transition_latency = 0;
- policy->cur = at32_get_speed(0);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- printk("cpufreq: AT32AP CPU frequency driver\n");
+ /*
+ * AVR32 CPU frequency rate scales in power of two between maximum and
+ * minimum, also add space for the table end marker.
+ *
+ * Further validate that the frequency is usable, and append it to the
+ * frequency table.
+ */
+ steps = fls(frequency / min_freq) + 1;
+ freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ retval = -ENOMEM;
+ goto out_err_put_clk;
+ }
- return 0;
+ for (i = 0; i < (steps - 1); i++) {
+ rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
+
+ if (rate != frequency)
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_table[i].frequency = frequency;
+
+ frequency /= 2;
+ }
+
+ freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+ retval = cpufreq_table_validate_and_show(policy, freq_table);
+ if (!retval) {
+ printk("cpufreq: AT32AP CPU frequency driver\n");
+ return 0;
+ }
+
+ kfree(freq_table);
+out_err_put_clk:
+ clk_put(cpuclk);
+out_err:
+ return retval;
}
static struct cpufreq_driver at32_driver = {
.name = "at32ap",
.init = at32_cpufreq_driver_init,
- .verify = at32_verify_speed,
- .target = at32_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = at32_set_target,
.get = at32_get_speed,
.flags = CPUFREQ_STICKY,
};
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index ef05978a7237..e9e63fc9c2c9 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -127,41 +127,28 @@ unsigned long cpu_set_cclk(int cpu, unsigned long new)
}
#endif
-static int bfin_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
{
#ifndef CONFIG_BF60x
unsigned int plldiv;
#endif
- unsigned int index;
- unsigned long cclk_hz;
- struct cpufreq_freqs freqs;
static unsigned long lpj_ref;
static unsigned int lpj_ref_freq;
+ unsigned int old_freq, new_freq;
int ret = 0;
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
#endif
- if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
- relation, &index))
- return -EINVAL;
+ old_freq = bfin_getfreq_khz(0);
+ new_freq = bfin_freq_table[index].frequency;
- cclk_hz = bfin_freq_table[index].frequency;
-
- freqs.old = bfin_getfreq_khz(0);
- freqs.new = cclk_hz;
-
- pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
- cclk_hz, target_freq, freqs.old);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifndef CONFIG_BF60x
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
bfin_write_PLL_DIV(plldiv);
#else
- ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+ ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
if (ret != 0) {
WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
return ret;
@@ -177,25 +164,16 @@ static int bfin_target(struct cpufreq_policy *policy,
#endif
if (!lpj_ref_freq) {
lpj_ref = loops_per_jiffy;
- lpj_ref_freq = freqs.old;
+ lpj_ref_freq = old_freq;
}
- if (freqs.new != freqs.old) {
+ if (new_freq != old_freq) {
loops_per_jiffy = cpufreq_scale(lpj_ref,
- lpj_ref_freq, freqs.new);
+ lpj_ref_freq, new_freq);
}
- /* TODO: just test case for cycles clock source, remove later */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: done\n");
return ret;
}
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
static int __bfin_cpu_init(struct cpufreq_policy *policy)
{
@@ -209,23 +187,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
- policy->cur = cclk;
- cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+ return cpufreq_table_validate_and_show(policy, bfin_freq_table);
}
-static struct freq_attr *bfin_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver bfin_driver = {
- .verify = bfin_verify_speed,
- .target = bfin_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
- .attr = bfin_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init bfin_cpu_init(void)
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index c522a95c0e16..d4585ce2346c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -30,73 +30,51 @@ static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int cpu0_get_speed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
-static int cpu0_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
- unsigned int index;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- pr_err("failed to match target freqency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
- freq_exact = freq_Hz;
- freqs.new = freq_Hz / 1000;
- freqs.old = clk_get_rate(cpu_clk) / 1000;
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
- freqs.new = freqs.old;
- ret = PTR_ERR(opp);
- goto post_notify;
+ return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -105,72 +83,35 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
pr_err("failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
+ if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
- clk_set_rate(cpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
+ clk_set_rate(cpu_clk, old_freq * 1000);
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- pr_err("invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(cpu_clk) / 1000;
-
- /*
- * The driver only supports the SMP configuartion where all processors
- * share the clock and voltage and clock. Use cpufreq affected_cpus
- * interface to have all CPUs scaled together.
- */
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
-
- return 0;
-}
-
-static struct freq_attr *cpu0_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
- .verify = cpu0_verify_speed,
- .target = cpu0_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpu0_set_target,
.get = cpu0_get_speed,
.init = cpu0_cpufreq_init,
- .exit = cpu0_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "generic_cpu0",
- .attr = cpu0_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -218,7 +159,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_put_node;
@@ -230,7 +171,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) {
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_uV, max_uV;
int i;
@@ -242,12 +183,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_uV = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_uV = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
- max_uV = opp_get_voltage(opp);
+ max_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
@@ -264,7 +205,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
return 0;
out_free_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
return ret;
@@ -273,7 +214,7 @@ out_put_node:
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index b83d45f68574..a05b876f375e 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
if (policy->min < (fsb_pol_max * fid * 100))
policy->max = (fsb_pol_max + 1) * fid * 100;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = nforce2_get(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04548f7023af..02d534da22dd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -47,49 +47,11 @@ static LIST_HEAD(cpufreq_policy_list);
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
-/*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
- * all cpufreq/hotplug/workqueue/etc related lock issues.
- *
- * The rules for this semaphore:
- * - Any routine that wants to read from the policy structure will
- * do a down_read on this semaphore.
- * - Any routine that will write to the policy structure and/or may take away
- * the policy altogether (eg. CPU hotplug), will hold this lock in write
- * mode before doing so.
- *
- * Additional rules:
- * - Governor routines that can be called in cpufreq hotplug path should not
- * take this sem as top level hotplug notifier handler takes this.
- * - Lock should not be held across
- * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- */
-static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
-
-#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
- \
- return 0; \
-}
-
-lock_policy_rwsem(read, cpu);
-lock_policy_rwsem(write, cpu);
-
-#define unlock_policy_rwsem(mode, cpu) \
-static void unlock_policy_rwsem_##mode(int cpu) \
-{ \
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
- BUG_ON(!policy); \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
+static inline bool has_target(void)
+{
+ return cpufreq_driver->target_index || cpufreq_driver->target;
}
-unlock_policy_rwsem(read, cpu);
-unlock_policy_rwsem(write, cpu);
-
/*
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
* sections
@@ -135,7 +97,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
{
- return cpufreq_driver->have_governor_per_policy;
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
@@ -183,6 +145,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
+{
+ int ret;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ /*
+ * The driver only supports the SMP configuartion where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
@@ -363,7 +356,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
- } else if (cpufreq_driver->target) {
+ } else if (has_target()) {
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
@@ -414,7 +407,7 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
/**
@@ -435,7 +428,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = __cpufreq_set_policy(policy, &new_policy); \
+ ret = cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
\
return ret ? ret : count; \
@@ -493,11 +486,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
- /*
- * Do not use cpufreq_set_policy here or the user_policy.max
- * will be wrongly overridden
- */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -525,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
ssize_t i = 0;
struct cpufreq_governor *t;
- if (!cpufreq_driver->target) {
+ if (!has_target()) {
i += sprintf(buf, "performance powersave");
goto out;
}
@@ -653,24 +642,21 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EINVAL;
+ ssize_t ret;
if (!down_read_trylock(&cpufreq_rwsem))
- goto exit;
+ return -EINVAL;
- if (lock_policy_rwsem_read(policy->cpu) < 0)
- goto up_read;
+ down_read(&policy->rwsem);
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
- unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
-exit:
+
return ret;
}
@@ -689,17 +675,15 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!down_read_trylock(&cpufreq_rwsem))
goto unlock;
- if (lock_policy_rwsem_write(policy->cpu) < 0)
- goto up_read;
+ down_write(&policy->rwsem);
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-up_read:
up_read(&cpufreq_rwsem);
unlock:
put_online_cpus();
@@ -815,7 +799,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
if (ret)
goto err_out_kobj_put;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
goto err_out_kobj_put;
@@ -844,11 +828,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
- /* assure that the starting sequence is run in __cpufreq_set_policy */
+ /* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
@@ -864,10 +848,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
unsigned int cpu, struct device *dev,
bool frozen)
{
- int ret = 0, has_target = !!cpufreq_driver->target;
+ int ret = 0;
unsigned long flags;
- if (has_target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -875,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- lock_policy_rwsem_write(policy->cpu);
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -883,9 +867,9 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
- if (has_target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n", __func__);
@@ -930,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
goto err_free_cpumask;
INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+
return policy;
err_free_cpumask:
@@ -949,26 +935,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
- if (cpu == policy->cpu)
+ if (WARN_ON(cpu == policy->cpu))
return;
- /*
- * Take direct locks as lock_policy_rwsem_write wouldn't work here.
- * Also lock for last cpu is enough here as contention will happen only
- * after policy->cpu is changed and after it is changed, other threads
- * will try to acquire lock for new cpu. And policy is already updated
- * by then.
- */
- down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+ down_write(&policy->rwsem);
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
- up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+ up_write(&policy->rwsem);
-#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
@@ -1053,6 +1030,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
+ if (cpufreq_driver->get) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
@@ -1107,6 +1092,9 @@ err_out_unregister:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+err_get_freq:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
err_set_policy_cpu:
cpufreq_policy_free(policy);
nomem_out:
@@ -1147,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
- WARN_ON(lock_policy_rwsem_write(old_cpu));
+ down_write(&policy->rwsem);
cpumask_set_cpu(old_cpu, policy->cpus);
- unlock_policy_rwsem_write(old_cpu);
+ up_write(&policy->rwsem);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
@@ -1186,7 +1174,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
return -EINVAL;
}
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
@@ -1200,22 +1188,21 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy->governor->name, CPUFREQ_NAME_LEN);
#endif
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
if (cpu != policy->cpu) {
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
-
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
if (!frozen) {
- pr_debug("%s: policy Kobject moved to cpu: %d "
- "from: %d\n",__func__, new_cpu, cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
}
}
}
@@ -1243,16 +1230,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
return -EINVAL;
}
- WARN_ON(lock_policy_rwsem_write(cpu));
+ down_write(&policy->rwsem);
cpus = cpumask_weight(policy->cpus);
if (cpus > 1)
cpumask_clear_cpu(cpu, policy->cpus);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- if (cpufreq_driver->target) {
+ if (has_target()) {
ret = __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
@@ -1263,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
if (!frozen) {
- lock_policy_rwsem_read(cpu);
+ down_read(&policy->rwsem);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
- unlock_policy_rwsem_read(cpu);
+ up_read(&policy->rwsem);
kobject_put(kobj);
/*
@@ -1295,7 +1282,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
if (!frozen)
cpufreq_policy_free(policy);
} else {
- if (cpufreq_driver->target) {
+ if (has_target()) {
if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
(ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n",
@@ -1310,36 +1297,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
/**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
*/
-static inline int __cpufreq_remove_dev(struct device *dev,
- struct subsys_interface *sif,
- bool frozen)
-{
- int ret;
-
- ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
- if (!ret)
- ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
- return ret;
-}
-
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int retval;
+ int ret;
if (cpu_is_offline(cpu))
return 0;
- retval = __cpufreq_remove_dev(dev, sif, false);
- return retval;
+ ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+ return ret;
}
static void handle_update(struct work_struct *work)
@@ -1458,22 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (cpufreq_disabled() || !cpufreq_driver)
return -ENOENT;
+ BUG_ON(!policy);
+
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
- if (unlikely(lock_policy_rwsem_read(cpu)))
- goto out_policy;
+ down_read(&policy->rwsem);
ret_freq = __cpufreq_get(cpu);
- unlock_policy_rwsem_read(cpu);
-
-out_policy:
+ up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
return ret_freq;
@@ -1681,12 +1656,75 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
if (target_freq == policy->cur)
return 0;
if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ else if (cpufreq_driver->target_index) {
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_freqs freqs;
+ bool notify;
+ int index;
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ goto out;
+ }
+
+ retval = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ goto out;
+ }
+
+ if (freq_table[index].frequency == policy->cur) {
+ retval = 0;
+ goto out;
+ }
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+
+ if (notify) {
+ freqs.old = policy->cur;
+ freqs.new = freq_table[index].frequency;
+ freqs.flags = 0;
+
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old,
+ freqs.new);
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n",
+ __func__, retval);
+
+ if (notify) {
+ /*
+ * Notify with old freq in case we failed to change
+ * frequency
+ */
+ if (retval)
+ freqs.new = freqs.old;
+
+ cpufreq_notify_transition(policy, &freqs,
+ CPUFREQ_POSTCHANGE);
+ }
+ }
+out:
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1697,14 +1735,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
- if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- goto fail;
+ down_write(&policy->rwsem);
ret = __cpufreq_driver_target(policy, target_freq, relation);
- unlock_policy_rwsem_write(policy->cpu);
+ up_write(&policy->rwsem);
-fail:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1907,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);
/*
- * data : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
int ret = 0, failed = 1;
@@ -1934,10 +1970,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
/* end old governor */
if (policy->governor) {
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
/* start new governor */
@@ -1946,10 +1982,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
failed = 0;
} else {
- unlock_policy_rwsem_write(new_policy->cpu);
+ up_write(&policy->rwsem);
__cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(new_policy->cpu);
+ down_write(&policy->rwsem);
}
}
@@ -1995,10 +2031,7 @@ int cpufreq_update_policy(unsigned int cpu)
goto no_policy;
}
- if (unlikely(lock_policy_rwsem_write(cpu))) {
- ret = -EINVAL;
- goto fail;
- }
+ down_write(&policy->rwsem);
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
@@ -2017,17 +2050,16 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
policy->cur = new_policy.cur;
} else {
- if (policy->cur != new_policy.cur && cpufreq_driver->target)
+ if (policy->cur != new_policy.cur && has_target())
cpufreq_out_of_sync(cpu, policy->cur,
new_policy.cur);
}
}
- ret = __cpufreq_set_policy(policy, &new_policy);
+ ret = cpufreq_set_policy(policy, &new_policy);
- unlock_policy_rwsem_write(cpu);
+ up_write(&policy->rwsem);
-fail:
cpufreq_cpu_put(policy);
no_policy:
return ret;
@@ -2096,7 +2128,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -ENODEV;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- ((!driver_data->setpolicy) && (!driver_data->target)))
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2183,14 +2216,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
static int __init cpufreq_core_init(void)
{
- int cpu;
-
if (cpufreq_disabled())
return -ENODEV;
- for_each_possible_cpu(cpu)
- init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index f62d822048e6..218460fcd2e4 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -80,13 +80,18 @@ static void cs_check_cpu(int cpu, unsigned int load)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
return;
- dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
+ freq_target = get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq > freq_target)
+ dbs_info->requested_freq -= freq_target;
+ else
+ dbs_info->requested_freq = policy->min;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 88cd39f7b0e9..b5f2b8618949 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -191,7 +191,10 @@ struct common_dbs_data {
struct attribute_group *attr_group_gov_sys; /* one governor - system */
struct attribute_group *attr_group_gov_pol; /* one governor - policy */
- /* Common data for platforms that don't set have_governor_per_policy */
+ /*
+ * Common data for platforms that don't set
+ * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+ */
struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 32f26f6e17c5..18d409189092 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
- return;
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 03078090b5f7..4dbf1db16aca 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- /*
- * We're safe from concurrent calls to ->target() here
- * as we hold the userspace_mutex lock. If we were calling
- * cpufreq_driver_target, a deadlock situation might occur:
- * A: cpufreq_set (lock userspace_mutex) ->
- * cpufreq_driver_target(lock policy->lock)
- * B: cpufreq_set_policy(lock policy->lock) ->
- * __cpufreq_governor ->
- * cpufreq_governor_userspace (lock userspace_mutex)
- */
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
-
err:
mutex_unlock(&userspace_mutex);
return ret;
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index cb8276dd19ca..86559040c54c 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,67 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, cris_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
-}
-
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 72328f77dc53..26d940d40b1d 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -27,18 +27,11 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
return clk_ctrl.pll ? 200000 : 6000;
}
-static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
reg_config_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
- freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
- freqs.new = cris_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@@ -51,64 +44,22 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
-static int cris_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target
- (policy, cris_freq_table, target_freq, relation, &newstate))
- return -EINVAL;
-
- cris_freq_set_cpu_state(policy, newstate);
-
return 0;
}
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = cris_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
- if (result)
- return (result);
-
- cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *cris_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
- .verify = cris_freq_verify,
- .target = cris_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
.init = cris_freq_cpu_init,
- .exit = cris_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cris_freq",
- .attr = cris_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 551dd655c6f2..5e8a854381b7 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -68,58 +66,38 @@ static unsigned int davinci_getspeed(unsigned int cpu)
return clk_get_rate(cpufreq.armclk) / 1000;
}
-static int davinci_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
- int ret = 0;
- unsigned int idx;
- struct cpufreq_freqs freqs;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
- freqs.old = davinci_getspeed(0);
- freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
-
- if (freqs.old == freqs.new)
- return ret;
-
- dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
-
- ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
- freqs.new, relation, &idx);
- if (ret)
- return -EINVAL;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = davinci_getspeed(0);
+ new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
- if (pdata->set_voltage && freqs.new > freqs.old) {
+ if (pdata->set_voltage && new_freq > old_freq) {
ret = pdata->set_voltage(idx);
if (ret)
- goto out;
+ return ret;
}
ret = clk_set_rate(armclk, idx);
if (ret)
- goto out;
+ return ret;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
- goto out;
+ return ret;
}
/* if moving to lower freq, lower the voltage after lowering freq */
- if (pdata->set_voltage && freqs.new < freqs.old)
+ if (pdata->set_voltage && new_freq < old_freq)
pdata->set_voltage(idx);
-out:
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return 0;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)
@@ -138,47 +116,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
- policy->cur = davinci_getspeed(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result) {
- pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
- __func__);
- return result;
- }
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
- policy->cpuinfo.transition_latency = 2000 * 1000;
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
}
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY,
.verify = davinci_verify_speed,
- .target = davinci_target,
+ .target_index = davinci_target,
.get = davinci_getspeed,
.init = davinci_cpu_init,
- .exit = davinci_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "davinci",
- .attr = davinci_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 26321cdc1946..0e67ab96321a 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -19,51 +19,11 @@
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *dbx500_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int idx;
- int ret;
-
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx))
- return -EINVAL;
-
- freqs.old = policy->cur;
- freqs.new = freq_table[idx].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- /* pre-change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* update armss clk frequency */
- ret = clk_set_rate(armss_clk, freqs.new * 1000);
-
- if (ret) {
- pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
- freqs.new * 1000, ret);
- freqs.new = freqs.old;
- }
-
- /* post change notification */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
+ return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
@@ -84,43 +44,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int res;
-
- /* get policy fields based on the table */
- res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!res)
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- else {
- pr_err("dbx500-cpufreq: Failed to read policy table\n");
- return res;
- }
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /*
- * FIXME : Need to take time measurement across the target()
- * function with no/some/all drivers in the notification
- * list.
- */
- policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
- /* policy sharing between dual CPUs */
- cpumask_setall(policy->cpus);
-
- return 0;
+ return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
- .verify = dbx500_cpufreq_verify_speed,
- .target = dbx500_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = dbx500_cpufreq_target,
.get = dbx500_cpufreq_getspeed,
.init = dbx500_cpufreq_init,
.name = "DBX500",
- .attr = dbx500_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int dbx500_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 09f64cc83019..9012b8bb6b64 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
struct cpufreq_policy *policy,
u32 dest_state)
{
- struct cpufreq_freqs freqs;
u32 lo, hi;
- int err = 0;
int i;
- freqs.old = eps_get(policy->cpu);
- freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
@@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
}
/* Set new multiplier and voltage */
@@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
- err = -ENODEV;
- goto postchange;
+ return -ENODEV;
}
} while (lo & ((1 << 16) | (1 << 17)));
- /* Return current frequency */
-postchange:
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
-
#ifdef DEBUG
{
u8 current_multiplier, current_voltage;
@@ -161,19 +148,12 @@ postchange:
current_multiplier);
}
#endif
- if (err)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return err;
+ return 0;
}
-static int eps_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
{
struct eps_cpu_data *centaur;
- unsigned int newstate = 0;
unsigned int cpu = policy->cpu;
unsigned int dest_state;
int ret;
@@ -182,28 +162,14 @@ static int eps_target(struct cpufreq_policy *policy,
return -ENODEV;
centaur = eps_cpu[cpu];
- if (unlikely(cpufreq_frequency_table_target(policy,
- &eps_cpu[cpu]->freq_table[0],
- target_freq,
- relation,
- &newstate))) {
- return -EINVAL;
- }
-
/* Make frequency transition */
- dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
+ dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
return ret;
}
-static int eps_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &eps_cpu[policy->cpu]->freq_table[0]);
-}
-
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
@@ -401,15 +367,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
- policy->cur = fsb * current_multiplier;
- ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+ ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
- cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
return 0;
}
@@ -424,19 +388,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *eps_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver eps_driver = {
- .verify = eps_verify,
- .target = eps_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = eps_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 823a400d98fd..de08acff5101 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -105,32 +105,9 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
}
-/**
- * elanfreq_set_cpu_frequency: Change the CPU core frequency
- * @cpu: cpu number
- * @freq: frequency in kHz
- *
- * This function takes a frequency value and changes the CPU frequency
- * according to this. Note that the frequency has to be checked by
- * elanfreq_validatespeed() for correctness!
- *
- * There is no return value.
- */
-
-static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int elanfreq_target(struct cpufreq_policy *policy,
+ unsigned int state)
{
- struct cpufreq_freqs freqs;
-
- freqs.old = elanfreq_get_cpu_frequency(0);
- freqs.new = elan_multiplier[state].clock;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
- elan_multiplier[state].clock);
-
-
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
@@ -161,39 +138,8 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
udelay(10000);
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-
-/**
- * elanfreq_validatespeed: test if frequency range is valid
- * @policy: the policy to validate
- *
- * This function checks if a given frequency range in kHz is valid
- * for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
-static int elanfreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &elanfreq_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- elanfreq_set_cpu_state(policy, newstate);
-
return 0;
}
-
-
/*
* Module init and exit code
*/
@@ -202,7 +148,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i;
- int result;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +166,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = elanfreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
- if (result)
- return result;
- cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
- return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
@@ -261,20 +193,14 @@ __setup("elanfreq=", elanfreq_setup);
#endif
-static struct freq_attr *elanfreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
- .verify = elanfreq_verify,
- .target = elanfreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = elanfreq_target,
.init = elanfreq_cpu_init,
- .exit = elanfreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "elanfreq",
- .attr = elanfreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 0fac34439e31..7b6dc06b1bd4 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -25,18 +25,11 @@
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
-static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- exynos_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -65,21 +58,18 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+ unsigned int old_freq;
int index, old_index;
int ret = 0;
- freqs.old = policy->cur;
- freqs.new = target_freq;
-
- if (freqs.new == freqs.old)
- goto out;
+ old_freq = policy->cur;
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
* policy and get the index from the raw freqeuncy table.
*/
- old_index = exynos_cpufreq_get_index(freqs.old);
+ old_index = exynos_cpufreq_get_index(old_freq);
if (old_index < 0) {
ret = old_index;
goto out;
@@ -104,17 +94,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
}
arm_volt = volt_table[index];
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* When the new frequency is higher than current frequency */
- if ((freqs.new > freqs.old) && !safe_arm_volt) {
+ if ((target_freq > old_freq) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
@@ -124,24 +111,17 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, safe_arm_volt);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
}
exynos_info->set_freq(old_index, index);
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (ret)
- goto out;
-
/* When the new frequency is lower than current frequency */
- if ((freqs.new < freqs.old) ||
- ((freqs.new > freqs.old) && safe_arm_volt)) {
+ if ((target_freq < old_freq) ||
+ ((target_freq > old_freq) && safe_arm_volt)) {
/* down the voltage after frequency change */
- regulator_set_voltage(arm_regulator, arm_volt,
+ ret = regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
@@ -151,19 +131,14 @@ post_notify:
}
out:
-
cpufreq_cpu_put(policy);
return ret;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
- unsigned int index;
- unsigned int new_freq;
int ret = 0;
mutex_lock(&cpufreq_lock);
@@ -171,15 +146,7 @@ static int exynos_target(struct cpufreq_policy *policy,
if (frequency_locked)
goto out;
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto out;
- }
-
- new_freq = freq_table[index].frequency;
-
- ret = exynos_cpufreq_scale(new_freq);
+ ret = exynos_cpufreq_scale(freq_table[index].frequency);
out:
mutex_unlock(&cpufreq_lock);
@@ -247,38 +214,18 @@ static struct notifier_block exynos_cpufreq_nb = {
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- cpumask_setall(policy->cpus);
-
- return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
-}
-
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
-static struct freq_attr *exynos_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
- .exit = exynos_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
- .attr = exynos_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index add7fbec4fc9..f2c75065ce19 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
static void exynos4210_set_apll(unsigned int index)
{
- unsigned int tmp;
+ unsigned int tmp, freq = apll_freq_4210[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4210[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
+ clk_set_rate(mout_apll, freq * 1000);
- /* 4. wait_lock_time */
- do {
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
-
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- }
+ exynos4210_set_clkdiv(new_index);
+ exynos4210_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4210_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4210[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4210_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4210_set_clkdiv(new_index);
- }
+ exynos4210_set_apll(new_index);
+ exynos4210_set_clkdiv(new_index);
}
}
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
- info->need_apll_change = exynos4210_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 08b7477b0aa2..8683304ce62c 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
static void exynos4x12_set_apll(unsigned int index)
{
- unsigned int tmp, pdiv;
+ unsigned int tmp, freq = apll_freq_4x12[index].freq;
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
- /* 2. Set APLL Lock time */
- pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+ clk_set_rate(mout_apll, freq * 1000);
- __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= apll_freq_4x12[index].mps;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- cpu_relax();
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
+ /* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
- unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
- unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
- return (old_pm == new_pm) ? 0 : 1;
-}
-
static void exynos4x12_set_frequency(unsigned int old_index,
unsigned int new_index)
{
- unsigned int tmp;
-
if (old_index > new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- /* 2. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- }
+ exynos4x12_set_clkdiv(new_index);
+ exynos4x12_set_apll(new_index);
} else if (old_index < new_index) {
- if (!exynos4x12_pms_change(old_index, new_index)) {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(EXYNOS4_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= apll_freq_4x12[new_index].mps & 0x7;
- __raw_writel(tmp, EXYNOS4_APLL_CON0);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- } else {
- /* Clock Configuration Procedure */
- /* 1. Change the apll m,p,s value */
- exynos4x12_set_apll(new_index);
- /* 2. Change the system clock divider values */
- exynos4x12_set_clkdiv(new_index);
- }
+ exynos4x12_set_apll(new_index);
+ exynos4x12_set_clkdiv(new_index);
}
}
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
- info->need_apll_change = exynos4x12_pms_change;
return 0;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index be5380ecdcd4..76bef8b078cb 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -118,12 +118,12 @@ static int init_div_table(void)
struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
unsigned int tmp, clk_div, ema_div, freq, volt_id;
int i = 0;
- struct opp *opp;
+ struct dev_pm_opp *opp;
rcu_read_lock();
for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
- opp = opp_find_freq_exact(dvfs_info->dev,
+ opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
freq_tbl[i].frequency * 1000, true);
if (IS_ERR(opp)) {
rcu_read_unlock();
@@ -142,7 +142,7 @@ static int init_div_table(void)
<< P0_7_CSCLKDEV_SHIFT;
/* Calculate EMA */
- volt_id = opp_get_voltage(opp);
+ volt_id = dev_pm_opp_get_voltage(opp);
volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
if (volt_id < PMIC_HIGH_VOLT) {
ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,38 +209,22 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- dvfs_info->freq_table);
-}
-
static unsigned int exynos_getspeed(unsigned int cpu)
{
return dvfs_info->cur_frequency;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int index, tmp;
- int ret = 0, i;
+ unsigned int tmp;
+ int i;
struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
mutex_lock(&cpufreq_lock);
- ret = cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index);
- if (ret)
- goto out;
-
freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency;
- if (freqs.old == freqs.new)
- goto out;
-
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */
@@ -251,9 +235,8 @@ static int exynos_target(struct cpufreq_policy *policy,
__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
}
-out:
mutex_unlock(&cpufreq_lock);
- return ret;
+ return 0;
}
static void exynos_cpufreq_work(struct work_struct *work)
@@ -324,30 +307,19 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
- if (ret) {
- dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cur = dvfs_info->cur_frequency;
- policy->cpuinfo.transition_latency = dvfs_info->latency;
- cpumask_setall(policy->cpus);
-
- cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
- return 0;
+ return cpufreq_generic_init(policy, dvfs_info->freq_table,
+ dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos_verify_speed,
- .target = exynos_target,
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +371,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
goto err_put_node;
}
- ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+ &dvfs_info->freq_table);
if (ret) {
dev_err(dvfs_info->dev,
"failed to init cpufreq table: %d\n", ret);
goto err_put_node;
}
- dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+ dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
exynos_sort_descend_freq_table();
if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,7 +427,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
return 0;
err_free_table:
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
err_put_node:
of_node_put(np);
dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -464,7 +437,7 @@ err_put_node:
static int exynos_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&exynos_driver);
- opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
return 0;
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f111454a7aea..3458d27f63b4 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- unsigned int next_larger = ~0;
- unsigned int i;
- unsigned int count = 0;
+ unsigned int next_larger = ~0, freq, i = 0;
+ bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- unsigned int freq = table[i].frequency;
+ for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
- if ((freq >= policy->min) && (freq <= policy->max))
- count++;
- else if ((next_larger > freq) && (freq > policy->max))
+ if ((freq >= policy->min) && (freq <= policy->max)) {
+ found = true;
+ break;
+ }
+
+ if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
- if (!count)
+ if (!found) {
policy->max = next_larger;
-
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
+ }
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return -ENODEV;
+
+ return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
};
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
/*
* if you use these, you must assure that the frequency table is valid
* all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+ if (!ret)
+ cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
{
pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 70442c7b5e71..d83e8266a58e 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int maxfreq, curfreq;
+ unsigned int maxfreq;
if (!policy || policy->cpu != 0)
return -ENODEV;
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
stock_freq = maxfreq;
- curfreq = gx_get_cpuspeed(0);
pr_debug("cpu max frequency is %d.\n", maxfreq);
- pr_debug("cpu current frequency is %dkHz.\n", curfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
else
policy->min = maxfreq / POLICY_MIN_DIV;
policy->max = maxfreq;
- policy->cur = curfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 794123fcf3e3..bf8902a0866d 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
struct device_node *np;
int ret;
- if (!of_machine_is_compatible("calxeda,highbank"))
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
return -ENODEV;
cpu_dev = get_cpu_device(0);
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 3e14f0317175..53c6ac637e10 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -141,7 +141,6 @@ processor_set_freq (
{
int ret = 0;
u32 value = 0;
- struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int retval;
@@ -168,13 +167,6 @@ processor_set_freq (
pr_debug("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
- /* cpufreq frequency struct */
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
-
/*
* First we write the target state's 'control' value to the
* control_register.
@@ -186,22 +178,11 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &cpufreq_freqs,
- CPUFREQ_POSTCHANGE);
printk(KERN_WARNING "Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
- cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
data->acpi_data.state = state;
retval = 0;
@@ -227,42 +208,11 @@ acpi_cpufreq_get (
static int
acpi_cpufreq_target (
struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
- unsigned int next_state = 0;
- unsigned int result = 0;
-
- pr_debug("acpi_cpufreq_setpolicy\n");
-
- result = cpufreq_frequency_table_target(policy,
- data->freq_table, target_freq, relation, &next_state);
- if (result)
- return (result);
-
- result = processor_set_freq(data, policy, next_state);
-
- return (result);
-}
-
-
-static int
-acpi_cpufreq_verify (
- struct cpufreq_policy *policy)
+ unsigned int index)
{
- unsigned int result = 0;
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
- pr_debug("acpi_cpufreq_verify\n");
-
- result = cpufreq_frequency_table_verify(policy,
- data->freq_table);
-
- return (result);
+ return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
}
-
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -321,7 +271,6 @@ acpi_cpufreq_cpu_init (
data->acpi_data.states[i].transition_latency * 1000;
}
}
- policy->cur = processor_get_freq(data, policy->cpu);
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +284,7 @@ acpi_cpufreq_cpu_init (
}
}
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) {
goto err_freqfree;
}
@@ -356,8 +305,6 @@ acpi_cpufreq_cpu_init (
(u32) data->acpi_data.states[i].status,
(u32) data->acpi_data.states[i].control);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
@@ -396,20 +343,14 @@ acpi_cpufreq_cpu_exit (
}
-static struct freq_attr* acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
.get = acpi_cpufreq_get,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
- .attr = acpi_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c3fd2a101ca0..4b3f18e5f36b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -35,73 +35,52 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int imx6q_get_speed(unsigned int cpu)
{
return clk_get_rate(arm_clk) / 1000;
}
-static int imx6q_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
- unsigned int index;
+ unsigned int old_freq, new_freq;
int ret;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (ret) {
- dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
- target_freq, ret);
- return ret;
- }
-
- freqs.new = freq_table[index].frequency;
- freq_hz = freqs.new * 1000;
- freqs.old = clk_get_rate(arm_clk) / 1000;
-
- if (freqs.old == freqs.new)
- return 0;
+ new_freq = freq_table[index].frequency;
+ freq_hz = new_freq * 1000;
+ old_freq = clk_get_rate(arm_clk) / 1000;
rcu_read_lock();
- opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old / 1000,
- freqs.new / 1000, volt / 1000);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old / 1000,
+ new_freq / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/*
* Need to increase vddpu and vddsoc for safety
* if we are about to run at 1.2 GHz.
*/
- if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ if (new_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_HIGH, 0);
regulator_set_voltage_tol(soc_reg,
@@ -121,21 +100,20 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
- freqs.new = freqs.old;
- goto post_notify;
+ return ret;
}
/* scaling down? scale voltage after frequency */
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_warn(cpu_dev,
@@ -143,7 +121,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
ret = 0;
}
- if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ if (old_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
regulator_set_voltage_tol(soc_reg,
@@ -151,55 +129,28 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
}
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (ret) {
- dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
- return ret;
- }
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = clk_get_rate(arm_clk) / 1000;
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
return 0;
}
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
}
-static struct freq_attr *imx6q_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver imx6q_cpufreq_driver = {
- .verify = imx6q_verify_speed,
- .target = imx6q_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = imx6q_set_target,
.get = imx6q_get_speed,
.init = imx6q_cpufreq_init,
- .exit = imx6q_cpufreq_exit,
+ .exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
- .attr = imx6q_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
@@ -237,14 +188,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
/* We expect an OPP table supplied by platform */
- num = opp_get_opp_count(cpu_dev);
+ num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto put_node;
}
- ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto put_node;
@@ -259,12 +210,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
* same order.
*/
rcu_read_lock();
- opp = opp_find_freq_exact(cpu_dev,
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
- min_volt = opp_get_voltage(opp);
- opp = opp_find_freq_exact(cpu_dev,
+ min_volt = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[--num].frequency * 1000, true);
- max_volt = opp_get_voltage(opp);
+ max_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
@@ -292,7 +243,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return 0;
free_freq_table:
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
of_node_put(np);
return ret;
@@ -301,7 +252,7 @@ put_node:
static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
- opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 3d79bca47433..7d8ab000d317 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -60,9 +60,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
{
struct icst_vco vco;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -70,10 +68,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
policy->min = icst_hz(&cclk_params, vco) / 1000;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -187,10 +182,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
/* set default policy and cpuinfo */
- policy->cpuinfo.max_freq = 160000;
- policy->cpuinfo.min_freq = 12000;
+ policy->max = policy->cpuinfo.max_freq = 160000;
+ policy->min = policy->cpuinfo.min_freq = 12000;
policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
- policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index eb3fdc755000..5f1cbae36961 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
+#include <linux/acpi.h>
#include <trace/events/power.h>
#include <asm/div64.h>
@@ -33,6 +34,8 @@
#define SAMPLE_COUNT 3
+#define BYT_RATIOS 0x66a
+
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -78,7 +81,6 @@ struct cpudata {
struct timer_list timer;
- struct pstate_adjust_policy *pstate_policy;
struct pstate_data pstate;
struct _pid pid;
@@ -100,15 +102,21 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
-static struct pstate_adjust_policy default_policy = {
- .sample_rate_ms = 10,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
+struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
+ void (*set)(int pstate);
+};
+
+struct cpu_defaults {
+ struct pstate_adjust_policy pid_policy;
+ struct pstate_funcs funcs;
};
+static struct pstate_adjust_policy pid_params;
+static struct pstate_funcs pstate_funcs;
+
struct perf_limits {
int no_turbo;
int max_perf_pct;
@@ -185,14 +193,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
- pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
- pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
- pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+ pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
+ pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
+ pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
pid_reset(&cpu->pid,
- cpu->pstate_policy->setpoint,
+ pid_params.setpoint,
100,
- cpu->pstate_policy->deadband,
+ pid_params.deadband,
0);
}
@@ -226,12 +234,12 @@ struct pid_param {
};
static struct pid_param pid_files[] = {
- {"sample_rate_ms", &default_policy.sample_rate_ms},
- {"d_gain_pct", &default_policy.d_gain_pct},
- {"i_gain_pct", &default_policy.i_gain_pct},
- {"deadband", &default_policy.deadband},
- {"setpoint", &default_policy.setpoint},
- {"p_gain_pct", &default_policy.p_gain_pct},
+ {"sample_rate_ms", &pid_params.sample_rate_ms},
+ {"d_gain_pct", &pid_params.d_gain_pct},
+ {"i_gain_pct", &pid_params.i_gain_pct},
+ {"deadband", &pid_params.deadband},
+ {"setpoint", &pid_params.setpoint},
+ {"p_gain_pct", &pid_params.p_gain_pct},
{NULL, NULL}
};
@@ -336,33 +344,92 @@ static void intel_pstate_sysfs_expose_params(void)
}
/************************** sysfs end ************************/
+static int byt_get_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return value & 0xFF;
+}
-static int intel_pstate_min_pstate(void)
+static int byt_get_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_RATIOS, value);
+ return (value >> 16) & 0xFF;
+}
+
+static int core_get_min_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
-static int intel_pstate_max_pstate(void)
+static int core_get_max_pstate(void)
{
u64 value;
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
-static int intel_pstate_turbo_pstate(void)
+static int core_get_turbo_pstate(void)
{
u64 value;
int nont, ret;
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
- nont = intel_pstate_max_pstate();
+ nont = core_get_max_pstate();
ret = ((value) & 255);
if (ret <= nont)
ret = nont;
return ret;
}
+static void core_set_pstate(int pstate)
+{
+ u64 val;
+
+ val = pstate << 8;
+ if (limits.no_turbo)
+ val |= (u64)1 << 32;
+
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+}
+
+static struct cpu_defaults core_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+static struct cpu_defaults byt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = byt_get_max_pstate,
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_max_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
@@ -383,7 +450,6 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
int max_perf, min_perf;
- u64 val;
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
@@ -395,11 +461,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
trace_cpu_frequency(pstate * 100000, cpu->cpu);
cpu->pstate.current_pstate = pstate;
- val = pstate << 8;
- if (limits.no_turbo)
- val |= (u64)1 << 32;
- wrmsrl(MSR_IA32_PERF_CTL, val);
+ pstate_funcs.set(pstate);
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -421,9 +484,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
sprintf(cpu->name, "Intel 2nd generation core");
- cpu->pstate.min_pstate = intel_pstate_min_pstate();
- cpu->pstate.max_pstate = intel_pstate_max_pstate();
- cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
/*
* goto max pstate so we don't slow up boot if we are built-in if we are
@@ -465,7 +528,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
int sample_time, delay;
- sample_time = cpu->pstate_policy->sample_rate_ms;
+ sample_time = pid_params.sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
@@ -521,14 +584,15 @@ static void intel_pstate_timer_func(unsigned long __data)
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(0x2a, default_policy),
- ICPU(0x2d, default_policy),
- ICPU(0x3a, default_policy),
- ICPU(0x3c, default_policy),
- ICPU(0x3e, default_policy),
- ICPU(0x3f, default_policy),
- ICPU(0x45, default_policy),
- ICPU(0x46, default_policy),
+ ICPU(0x2a, core_params),
+ ICPU(0x2d, core_params),
+ ICPU(0x37, byt_params),
+ ICPU(0x3a, core_params),
+ ICPU(0x3c, core_params),
+ ICPU(0x3e, core_params),
+ ICPU(0x3f, core_params),
+ ICPU(0x45, core_params),
+ ICPU(0x46, core_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -552,8 +616,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_get_cpu_pstates(cpu);
cpu->cpu = cpunum;
- cpu->pstate_policy =
- (struct pstate_adjust_policy *)id->driver_data;
+
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
cpu->timer.data =
@@ -613,9 +676,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -683,9 +744,9 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- if (!intel_pstate_min_pstate() ||
- !intel_pstate_max_pstate() ||
- !intel_pstate_turbo_pstate())
+ if (!pstate_funcs.get_max() ||
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
@@ -698,10 +759,96 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
+
+static void copy_pid_params(struct pstate_adjust_policy *policy)
+{
+ pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.p_gain_pct = policy->p_gain_pct;
+ pid_params.i_gain_pct = policy->i_gain_pct;
+ pid_params.d_gain_pct = policy->d_gain_pct;
+ pid_params.deadband = policy->deadband;
+ pid_params.setpoint = policy->setpoint;
+}
+
+static void copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
+ pstate_funcs.set = funcs->set;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
+
+static bool intel_pstate_no_acpi_pss(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ acpi_status status;
+ union acpi_object *pss;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+
+ status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ pss = buffer.pointer;
+ if (pss && pss->type == ACPI_TYPE_PACKAGE) {
+ kfree(pss);
+ return false;
+ }
+
+ kfree(pss);
+ }
+
+ return true;
+}
+
+struct hw_vendor_info {
+ u16 valid;
+ char oem_id[ACPI_OEM_ID_SIZE];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+};
+
+/* Hardware vendor-specific info that has its own power management modes */
+static struct hw_vendor_info vendor_info[] = {
+ {1, "HP ", "ProLiant"},
+ {0, "", ""},
+};
+
+static bool intel_pstate_platform_pwr_mgmt_exists(void)
+{
+ struct acpi_table_header hdr;
+ struct hw_vendor_info *v_info;
+
+ if (acpi_disabled
+ || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ return false;
+
+ for (v_info = vendor_info; v_info->valid; v_info++) {
+ if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
+ && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
+ && intel_pstate_no_acpi_pss())
+ return true;
+ }
+
+ return false;
+}
+#else /* CONFIG_ACPI not enabled */
+static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+#endif /* CONFIG_ACPI */
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
const struct x86_cpu_id *id;
+ struct cpu_defaults *cpu_info;
if (no_load)
return -ENODEV;
@@ -710,6 +857,18 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
+ cpu_info = (struct cpu_defaults *)id->driver_data;
+
+ copy_pid_params(&cpu_info->pid_policy);
+ copy_cpu_funcs(&cpu_info->funcs);
+
if (intel_pstate_msrs_not_valid())
return -ENODEV;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index ba10658a9394..0767a4e29dfe 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,69 +55,37 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
return kirkwood_freq_table[0].frequency;
}
-static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int index)
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
- freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
- freqs.new = kirkwood_freq_table[index].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
- kirkwood_freq_table[index].frequency);
- dev_dbg(priv.dev, "old frequency was %i KHz\n",
- kirkwood_cpufreq_get_cpu_frequency(0));
-
- if (freqs.old != freqs.new) {
- local_irq_disable();
-
- /* Disable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg |= CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- switch (state) {
- case STATE_CPU_FREQ:
- clk_disable(priv.powersave_clk);
- break;
- case STATE_DDR_FREQ:
- clk_enable(priv.powersave_clk);
- break;
- }
+ local_irq_disable();
- /* Wait-for-Interrupt, while the hardware changes frequency */
- cpu_do_idle();
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- /* Enable interrupts to the CPU */
- reg = readl_relaxed(priv.base);
- reg &= ~CPU_SW_INT_BLK;
- writel_relaxed(reg, priv.base);
-
- local_irq_enable();
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
-static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index = 0;
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
- if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
- kirkwood_cpufreq_set_cpu_state(policy, index);
+ local_irq_enable();
return 0;
}
@@ -125,40 +93,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int result;
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = 5000; /* 5uS */
- policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
}
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.get = kirkwood_cpufreq_get_cpu_frequency,
- .verify = kirkwood_cpufreq_verify,
- .target = kirkwood_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
- .exit = kirkwood_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
- .attr = kirkwood_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 4ada1cccb052..45bafddfd8ea 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -625,28 +625,13 @@ static void longhaul_setup_voltagescaling(void)
}
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
static int longhaul_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int table_index)
{
- unsigned int table_index = 0;
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
- if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq,
- relation, &table_index))
- return -EINVAL;
-
- /* Don't set same frequency again */
- if (longhaul_index == table_index)
- return 0;
-
if (!can_scale_voltage)
longhaul_setstate(policy, table_index);
else {
@@ -919,36 +904,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->cpuinfo.transition_latency = 200000; /* nsec */
- policy->cur = calc_speed(longhaul_get_cpu_mult());
-
- ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
- if (ret)
- return ret;
-
- cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, longhaul_table);
}
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver longhaul_driver = {
- .verify = longhaul_verify,
- .target = longhaul_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = longhaul_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "longhaul",
- .attr = longhaul_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 5aa031612d53..074971b12635 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 7bc3c44d34e2..a43609218105 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -53,51 +53,24 @@ static unsigned int loongson2_cpufreq_get(unsigned int cpu)
* Here we notify other drivers of the proposed change and the final change.
*/
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
unsigned int cpu = policy->cpu;
- unsigned int newstate = 0;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
- if (cpufreq_frequency_table_target
- (policy, &loongson2_clockmod_table[0], target_freq, relation,
- &newstate))
- return -EINVAL;
-
freq =
((cpu_clock_freq / 1000) *
- loongson2_clockmod_table[newstate].driver_data) / 8;
- if (freq < policy->min || freq > policy->max)
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
- freqs.old = loongson2_cpufreq_get(cpu);
- freqs.new = freq;
- freqs.flags = 0;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ loongson2_clockmod_table[index].driver_data) / 8;
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %u kHz\n", freq);
-
return 0;
}
@@ -131,40 +104,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- policy->cur = loongson2_cpufreq_get(policy->cpu);
-
- cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
- policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &loongson2_clockmod_table[0]);
+ return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
clk_put(cpuclk);
return 0;
}
-static struct freq_attr *loongson2_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
- .verify = loongson2_cpufreq_verify,
- .target = loongson2_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = loongson2_cpufreq_target,
.get = loongson2_cpufreq_get,
.exit = loongson2_cpufreq_exit,
- .attr = loongson2_table_attr,
+ .attr = cpufreq_generic_attr,
};
static struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 6168d77b296d..c4dfa42a75ac 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -64,18 +64,11 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *maple_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
static int maple_pmode_cur;
-static DEFINE_MUTEX(maple_switch_mutex);
-
static const u32 *maple_pmode_data;
static int maple_pmode_max;
@@ -135,37 +128,10 @@ static int maple_scom_query_freq(void)
* Common interface to the cpufreq core
*/
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (maple_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&maple_switch_mutex);
-
- freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
- freqs.new = maple_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = maple_scom_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&maple_switch_mutex);
-
- return rc;
+ return maple_scom_switch_freq(index);
}
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
@@ -175,27 +141,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = 12000;
- policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- maple_cpu_freqs);
+ return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
}
-
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
- .verify = maple_cpufreq_verify,
- .target = maple_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = maple_cpufreq_target,
.get = maple_cpufreq_get_speed,
- .attr = maple_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init maple_cpufreq_init(void)
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index f31fcfcad514..be6d14307aa8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
- if (!freq_table)
- return -EINVAL;
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -58,42 +51,16 @@ static unsigned int omap_getspeed(unsigned int cpu)
return rate;
}
-static int omap_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int i;
- int r, ret = 0;
- struct cpufreq_freqs freqs;
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
- if (!freq_table) {
- dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
- policy->cpu);
- return -EINVAL;
- }
+ old_freq = omap_getspeed(policy->cpu);
+ new_freq = freq_table[index].frequency;
- ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &i);
- if (ret) {
- dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
- __func__, policy->cpu, target_freq, ret);
- return ret;
- }
- freqs.new = freq_table[i].frequency;
- if (!freqs.new) {
- dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
- policy->cpu, target_freq);
- return -EINVAL;
- }
-
- freqs.old = omap_getspeed(policy->cpu);
-
- if (freqs.old == freqs.new && policy->cur == freqs.new)
- return ret;
-
- freq = freqs.new * 1000;
+ freq = new_freq * 1000;
ret = clk_round_rate(mpu_clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
@@ -105,143 +72,103 @@ static int omap_target(struct cpufreq_policy *policy,
if (mpu_reg) {
rcu_read_lock();
- opp = opp_find_freq_ceil(mpu_dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
- __func__, freqs.new);
+ __func__, new_freq);
return -EINVAL;
}
- volt = opp_get_voltage(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
- freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
- freqs.new / 1000, volt ? volt / 1000 : -1);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (mpu_reg && (freqs.new > freqs.old)) {
+ if (mpu_reg && (new_freq > old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
__func__);
- freqs.new = freqs.old;
- goto done;
+ return r;
}
}
- ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(mpu_clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
- if (mpu_reg && (freqs.new < freqs.old)) {
+ if (mpu_reg && (new_freq < old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
- ret = clk_set_rate(mpu_clk, freqs.old * 1000);
- freqs.new = freqs.old;
- goto done;
+ clk_set_rate(mpu_clk, old_freq * 1000);
+ return r;
}
}
- freqs.new = omap_getspeed(policy->cpu);
-
-done:
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return ret;
}
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
- opp_free_cpufreq_table(mpu_dev, &freq_table);
+ dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int omap_cpu_init(struct cpufreq_policy *policy)
{
- int result = 0;
+ int result;
mpu_clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
- if (policy->cpu >= NR_CPUS) {
- result = -EINVAL;
- goto fail_ck;
- }
-
- policy->cur = omap_getspeed(policy->cpu);
-
- if (!freq_table)
- result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
- if (result) {
- dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ if (!freq_table) {
+ result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+ if (result) {
+ dev_err(mpu_dev,
+ "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
- goto fail_ck;
+ goto fail;
+ }
}
atomic_inc_return(&freq_table_users);
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (result)
- goto fail_table;
-
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
- policy->cur = omap_getspeed(policy->cpu);
-
- /*
- * On OMAP SMP configuartion, both processors share the voltage
- * and clock. So both CPUs needs to be scaled together and hence
- * needs software co-ordination. Use cpufreq affected_cpus
- * interface to handle this scenario. Additional is_smp() check
- * is to keep SMP_ON_UP build working.
- */
- if (is_smp())
- cpumask_setall(policy->cpus);
-
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- return 0;
+ result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (!result)
+ return 0;
-fail_table:
freq_table_free();
-fail_ck:
+fail:
clk_put(mpu_clk);
return result;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
+ cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
clk_put(mpu_clk);
return 0;
}
-static struct freq_attr *omap_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_STICKY,
- .verify = omap_verify_speed,
- .target = omap_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = omap_target,
.get = omap_getspeed,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
- .attr = omap_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 2f0a2a65c37f..3d1cba9fd5f9 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -105,47 +105,21 @@ static struct cpufreq_frequency_table p4clockmod_table[] = {
};
-static int cpufreq_p4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = DC_RESV;
- struct cpufreq_freqs freqs;
int i;
- if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = cpufreq_p4_get(policy->cpu);
- freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
- cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
-
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
return 0;
}
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
@@ -230,25 +204,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
- cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
- policy->cur = stock_freq;
- return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+ return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
}
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
u32 l, h;
@@ -267,19 +233,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
return stock_freq;
}
-static struct freq_attr *p4clockmod_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver p4clockmod_driver = {
- .verify = cpufreq_p4_verify,
- .target = cpufreq_p4_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
- .exit = cpufreq_p4_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = p4clockmod_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index f4ec8145b3d1..0426008380d8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -52,8 +52,6 @@
static void __iomem *sdcpwr_mapbase;
static void __iomem *sdcasr_mapbase;
-static DEFINE_MUTEX(pas_switch_mutex);
-
/* Current astate, is used when waking up from power savings on
* one core, in case the other core has switched states during
* the idle time.
@@ -70,11 +68,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr *pas_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/*
* hardware specific functions
*/
@@ -210,22 +203,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
}
- policy->cpuinfo.transition_latency = get_gizmo_latency();
-
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- cpumask_copy(policy->cpus, cpu_online_mask);
-
ppc_proc_freq = policy->cur * 1000ul;
- cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
- /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
- * are set correctly
- */
- return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase);
@@ -254,31 +238,11 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
static int pas_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int pas_astate_new)
{
- struct cpufreq_freqs freqs;
- int pas_astate_new;
int i;
- cpufreq_frequency_table_target(policy,
- pas_freqs,
- target_freq,
- relation,
- &pas_astate_new);
-
- freqs.old = policy->cur;
- freqs.new = pas_freqs[pas_astate_new].frequency;
-
- mutex_lock(&pas_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
pas_freqs[pas_astate_new].frequency,
@@ -289,10 +253,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&pas_switch_mutex);
-
- ppc_proc_freq = freqs.new * 1000ul;
+ ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
return 0;
}
@@ -301,9 +262,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
- .verify = pas_cpufreq_verify,
- .target = pas_cpufreq_target,
- .attr = pas_cpu_freqs_attr,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pas_cpufreq_target,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index d81c4e5ea0ad..e2b4f40ff69a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
{
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void)
struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member;
- acpi_handle handle, osc_handle, pcch_handle;
+ acpi_handle handle, osc_handle;
int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
- status = acpi_get_handle(handle, "PCCH", &pcch_handle);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "PCCH"))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
- policy->cur = pcc_get_freq(cpu);
-
- if (!policy->cur) {
- pr_debug("init: Unable to get current CPU frequency\n");
- result = -EINVAL;
- goto out;
- }
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index a096cd3fa23d..cf55d202f332 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
@@ -336,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
-static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
- int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
{
- struct cpufreq_freqs freqs;
unsigned long l3cr;
static unsigned long prev_l3cr;
- freqs.old = cur_freq;
- freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
-
- if (freqs.old == freqs.new)
- return 0;
-
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@@ -366,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
- if (notify)
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@@ -378,23 +361,12 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
return cur_freq;
}
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- unsigned int newstate = 0;
int rc;
- if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- rc = do_set_cpu_speed(policy, newstate, 1);
+ rc = do_set_cpu_speed(policy, index);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@@ -402,14 +374,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -ENODEV;
-
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = cur_freq;
-
- cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+ return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
}
static u32 read_gpio(struct device_node *np)
@@ -443,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
- do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
+ do_set_cpu_speed(policy, CPUFREQ_HIGH);
return 0;
}
@@ -460,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(policy, sleep_freq == low_freq ?
- CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+ CPUFREQ_LOW : CPUFREQ_HIGH);
ppc_proc_freq = cur_freq * 1000ul;
@@ -469,14 +434,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pmac_cpufreq_driver = {
- .verify = pmac_cpufreq_verify,
- .target = pmac_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_PM_NO_WARN,
- .attr = pmac_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 3a51ad7e47c8..6a338f8c3860 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct freq_attr* g5_cpu_freqs_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
@@ -84,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
-static DEFINE_MUTEX(g5_switch_mutex);
-
static unsigned long transition_latency;
#ifdef CONFIG_PMAC_SMU
@@ -142,7 +135,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
pmf_call_one(pfunc_vdnap0_complete, &args);
if (done)
break;
- msleep(1);
+ usleep_range(1000, 1000);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +234,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
if (pfunc_cpu1_volt_low)
pmf_call_one(pfunc_cpu1_volt_low, NULL);
}
- msleep(10); /* should be faster , to fix */
+ usleep_range(10000, 10000); /* should be faster , to fix */
}
/*
@@ -286,7 +279,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
pmf_call_one(pfunc_slewing_done, &args);
if (done)
break;
- msleep(1);
+ usleep_range(500, 500);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,37 +310,9 @@ static int g5_pfunc_query_freq(void)
* Common interface to the cpufreq core
*/
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
{
- return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
-static int g5_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
-{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
- int rc;
-
- if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- if (g5_pmode_cur == newstate)
- return 0;
-
- mutex_lock(&g5_switch_mutex);
-
- freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
- freqs.new = g5_cpu_freqs[newstate].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- rc = g5_switch_freq(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- mutex_unlock(&g5_switch_mutex);
-
- return rc;
+ return g5_switch_freq(index);
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -357,27 +322,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- policy->cpuinfo.transition_latency = transition_latency;
- policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
- /* secondary CPUs are tied to the primary one by the
- * cpufreq core if in the secondary policy we tell it that
- * it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
- cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy,
- g5_cpu_freqs);
+ return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
}
-
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
- .verify = g5_cpufreq_verify,
- .target = g5_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = g5_cpu_freqs_attr,
+ .attr = cpufreq_generic_attr,
};
@@ -397,7 +352,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
/* Check supported platforms */
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
- of_machine_is_compatible("PowerMac9,1"))
+ of_machine_is_compatible("PowerMac9,1") ||
+ of_machine_is_compatible("PowerMac12,1"))
use_volts_smu = 1;
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
@@ -647,8 +603,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = min_freq;
+ /* Based on a measurement on Xserve G5, rounded up. */
+ transition_latency = 10 * NSEC_PER_MSEC;
+
/* Set callbacks */
- transition_latency = CPUFREQ_ETERNAL;
g5_switch_volt = g5_pfunc_switch_volt;
g5_switch_freq = g5_pfunc_switch_freq;
g5_query_freq = g5_pfunc_query_freq;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 85f1c8c25ddc..643e7952cad3 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -63,12 +63,12 @@ static int powernow_k6_get_cpu_multiplier(void)
/**
- * powernow_k6_set_state - set the PowerNow! multiplier
+ * powernow_k6_target - set the PowerNow! multiplier
* @best_i: clock_ratio[best_i] is the target multiplier
*
* Tries to change the PowerNow! multiplier
*/
-static void powernow_k6_set_state(struct cpufreq_policy *policy,
+static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
unsigned long outvalue = 0, invalue = 0;
@@ -77,7 +77,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
- return;
+ return -EINVAL;
}
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
@@ -100,44 +100,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return;
-}
-
-
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
-/**
- * powernow_k6_setpolicy - sets a new CPUFreq policy
- * @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
- *
- * sets a new CPUFreq policy
- */
-static int powernow_k6_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, &clock_ratio[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- powernow_k6_set_state(policy, newstate);
-
return 0;
}
@@ -145,7 +107,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
- int result;
if (policy->cpu != 0)
return -ENODEV;
@@ -165,15 +126,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 200000;
- policy->cur = busfreq * max_multiplier;
-
- result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, clock_ratio);
}
@@ -182,7 +136,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
unsigned int i;
for (i = 0; i < 8; i++) {
if (i == max_multiplier)
- powernow_k6_set_state(policy, i);
+ powernow_k6_target(policy, i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
@@ -195,19 +149,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
return ret;
}
-static struct freq_attr *powernow_k6_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_k6_driver = {
- .verify = powernow_k6_verify,
- .target = powernow_k6_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = powernow_k6_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 14ce480be8ab..946708a1d745 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@ static void change_VID(int vid)
}
-static void change_speed(struct cpufreq_policy *policy, unsigned int index)
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
{
u8 fid, vid;
struct cpufreq_freqs freqs;
@@ -291,6 +291,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
}
@@ -533,27 +535,6 @@ static int powernow_decode_bios(int maxfid, int startvid)
}
-static int powernow_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate;
-
- if (cpufreq_frequency_table_target(policy, powernow_table, target_freq,
- relation, &newstate))
- return -EINVAL;
-
- change_speed(policy, newstate);
-
- return 0;
-}
-
-
-static int powernow_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
/*
* We use the fact that the bus frequency is somehow
* a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +659,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
- policy->cur = powernow_get(0);
-
- cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
- return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+ return cpufreq_table_validate_and_show(policy, powernow_table);
}
static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,14 +678,9 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *powernow_table_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver powernow_driver = {
- .verify = powernow_verify,
- .target = powernow_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_target,
.get = powernow_get,
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
.bios_limit = acpi_processor_get_bios_limit,
@@ -716,7 +688,7 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = powernow_table_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 2344a9ed17f3..0023c7d40a51 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -977,20 +977,17 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
struct powernowk8_target_arg {
struct cpufreq_policy *pol;
- unsigned targfreq;
- unsigned relation;
+ unsigned newstate;
};
static long powernowk8_target_fn(void *arg)
{
struct powernowk8_target_arg *pta = arg;
struct cpufreq_policy *pol = pta->pol;
- unsigned targfreq = pta->targfreq;
- unsigned relation = pta->relation;
+ unsigned newstate = pta->newstate;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
- unsigned int newstate;
int ret;
if (!data)
@@ -1004,8 +1001,9 @@ static long powernowk8_target_fn(void *arg)
return -EIO;
}
- pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
- pol->cpu, targfreq, pol->min, pol->max, relation);
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+ pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+ pol->max);
if (query_current_values_with_pending_wait(data))
return -EIO;
@@ -1021,10 +1019,6 @@ static long powernowk8_target_fn(void *arg)
checkvid, data->currvid);
}
- if (cpufreq_frequency_table_target(pol, data->powernow_table,
- targfreq, relation, &newstate))
- return -EIO;
-
mutex_lock(&fidvid_mutex);
powernow_k8_acpi_pst_values(data, newstate);
@@ -1044,26 +1038,13 @@ static long powernowk8_target_fn(void *arg)
}
/* Driver entry point to switch to the target frequency */
-static int powernowk8_target(struct cpufreq_policy *pol,
- unsigned targfreq, unsigned relation)
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
{
- struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
- .relation = relation };
+ struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
- struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
- if (!data)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
struct init_on_cpu {
struct powernow_k8_data *data;
int rc;
@@ -1152,11 +1133,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
data->available_cores = pol->cpus;
- pol->cur = find_khz_freq_from_fid(data->currfid);
- pr_debug("policy current frequency %d kHz\n", pol->cur);
-
/* min/max the cpu is capable of */
- if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+ if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
powernow_k8_cpu_exit_acpi(data);
kfree(data->powernow_table);
@@ -1164,8 +1142,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
- cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1227,20 +1203,16 @@ out:
return khz;
}
-static struct freq_attr *powernow_k8_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver cpufreq_amd64_driver = {
- .verify = powernowk8_verify,
- .target = powernowk8_target,
+ .flags = CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = powernow_k8_attr,
+ .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 60e81d524ea8..3f7be46d2b27 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -69,8 +69,6 @@ static const struct soc_data sdata[] = {
static u32 min_cpufreq;
static const u32 *fmask;
-/* serialize frequency changes */
-static DEFINE_MUTEX(cpufreq_lock);
static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
/* cpumask in a cluster */
@@ -202,7 +200,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
table[i].frequency = CPUFREQ_TABLE_END;
/* set the min and max frequency properly */
- ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
goto err_nomem1;
@@ -217,9 +215,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(cpu_data, i) = data;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(table, cpu);
of_node_put(np);
return 0;
@@ -253,60 +248,25 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *table =
- per_cpu(cpu_data, policy->cpu)->table;
-
- return cpufreq_frequency_table_verify(policy, table);
-}
-
static int corenet_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
- unsigned int new;
struct clk *parent;
- int ret;
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
- cpufreq_frequency_table_target(policy, data->table,
- target_freq, relation, &new);
-
- if (policy->cur == data->table[new].frequency)
- return 0;
-
- freqs.old = policy->cur;
- freqs.new = data->table[new].frequency;
-
- mutex_lock(&cpufreq_lock);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- parent = of_clk_get(data->parent, data->table[new].driver_data);
- ret = clk_set_parent(data->clk, parent);
- if (ret)
- freqs.new = freqs.old;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cpufreq_lock);
-
- return ret;
+ parent = of_clk_get(data->parent, data->table[index].driver_data);
+ return clk_set_parent(data->clk, parent);
}
-static struct freq_attr *corenet_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit),
- .verify = corenet_cpufreq_verify,
- .target = corenet_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = corenet_cpufreq_target,
.get = corenet_cpufreq_get_speed,
- .attr = corenet_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct of_device_id node_matches[] __initdata = {
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 2e448f0bbdc5..e42ca9c31cea 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -30,9 +30,6 @@
#include "ppc_cbe_cpufreq.h"
-static DEFINE_MUTEX(cbe_switch_mutex);
-
-
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
{1, 0},
@@ -123,63 +120,28 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
/* this ensures that policy->cpuinfo_min
* and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, cbe_freqs);
+ return cpufreq_table_validate_and_show(policy, cbe_freqs);
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int cbe_pmode_new)
{
- int rc;
- struct cpufreq_freqs freqs;
- unsigned int cbe_pmode_new;
-
- cpufreq_frequency_table_target(policy,
- cbe_freqs,
- target_freq,
- relation,
- &cbe_pmode_new);
-
- freqs.old = policy->cur;
- freqs.new = cbe_freqs[cbe_pmode_new].frequency;
-
- mutex_lock(&cbe_switch_mutex);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
pr_debug("setting frequency for cpu %d to %d kHz, " \
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
cbe_freqs[cbe_pmode_new].driver_data);
- rc = set_pmode(policy->cpu, cbe_pmode_new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- mutex_unlock(&cbe_switch_mutex);
-
- return rc;
+ return set_pmode(policy->cpu, cbe_pmode_new);
}
static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cbe_cpufreq_verify,
- .target = cbe_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 8749eaf18793..0a0f4369636a 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -262,36 +262,15 @@ static u32 mdrefr_dri(unsigned int freq)
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pxa_freqs_table;
- pxa_freqs_t *pxa_freqs;
- int ret;
-
- find_freq_tables(&pxa_freqs_table, &pxa_freqs);
- ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
- if (freq_debug)
- pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
- policy->min, policy->max);
-
- return ret;
-}
-
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
}
-static int pxa_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings;
- struct cpufreq_freqs freqs;
- unsigned int idx;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@@ -300,32 +279,19 @@ static int pxa_set_target(struct cpufreq_policy *policy,
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
- target_freq, relation, &idx)) {
- return -EINVAL;
- }
-
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
- freqs.old = policy->cur;
- freqs.new = new_freq_cpu;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
- freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
+ new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
- if (vcc_core && freqs.new > freqs.old)
+ if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
- if (ret)
- return ret;
- /*
- * Tell everyone what we're about to do...
- * you should add a notify client with any platform specific
- * Vcc changing capability
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ if (ret)
+ return ret;
+ }
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
@@ -376,13 +342,6 @@ static int pxa_set_target(struct cpufreq_policy *policy,
local_irq_restore(flags);
/*
- * Tell everyone what we've just done...
- * you should add a notify client with any platform specific
- * SDRAM refresh timer adjustments
- */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- /*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
* only power savings will suffer from this.
@@ -391,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
- if (vcc_core && freqs.new < freqs.old)
+ if (vcc_core && new_freq_cpu < policy->cur)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;
@@ -414,8 +373,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->cur = get_clk_frequency_khz(0); /* current freq */
- policy->min = policy->max = policy->cur;
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +410,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
- cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+ cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+ }
+ else if (cpu_is_pxa27x()) {
+ cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
- else if (cpu_is_pxa27x())
- cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
@@ -464,9 +423,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
- .verify = pxa_verify_policy,
- .target = pxa_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa_set_target,
.init = pxa_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index d26306fb00d2..93840048dd11 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
pxa3xx_freqs_num = num;
pxa3xx_freqs_table = table;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,54 +150,26 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
cpu_relax();
}
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
{
return pxa3xx_get_clk_frequency_khz(0);
}
-static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
{
struct pxa3xx_freq_info *next;
- struct cpufreq_freqs freqs;
unsigned long flags;
- int idx;
if (policy->cpu != 0)
return -EINVAL;
- /* Lookup the next frequency */
- if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
- target_freq, relation, &idx))
- return -EINVAL;
-
- next = &pxa3xx_freqs[idx];
-
- freqs.old = policy->cur;
- freqs.new = next->cpufreq_mhz * 1000;
-
- pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
- freqs.old / 1000, freqs.new / 1000,
- (freqs.old == freqs.new) ? " (skipped)" : "");
-
- if (freqs.old == target_freq)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ next = &pxa3xx_freqs[index];
local_irq_save(flags);
__update_core_freq(next);
__update_bus_freq(next);
local_irq_restore(flags);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
@@ -206,11 +178,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
int ret = -EINVAL;
/* set default policy and cpuinfo */
- policy->cpuinfo.min_freq = 104000;
- policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->min = policy->cpuinfo.min_freq = 104000;
+ policy->max = policy->cpuinfo.max_freq =
+ (cpu_is_pxa320()) ? 806000 : 624000;
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
- policy->max = pxa3xx_get_clk_frequency_khz(0);
- policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
- .verify = pxa3xx_cpufreq_verify,
- .target = pxa3xx_cpufreq_set,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
+ .exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 22dcb81ef9d0..8d904a00027b 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
{ 0, CPUFREQ_TABLE_END },
};
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -227,24 +217,15 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
}
static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_freqs freqs;
+ unsigned int new_freq;
int idx, ret, to_dvs = 0;
- unsigned int i;
mutex_lock(&cpufreq_lock);
- pr_debug("cpufreq: to %dKHz, relation %d\n", target_freq, relation);
-
- ret = cpufreq_frequency_table_target(policy, s3c_freq->freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- goto out;
-
- idx = s3c_freq->freq_table[i].driver_data;
+ idx = s3c_freq->freq_table[index].driver_data;
if (idx == SOURCE_HCLK)
to_dvs = 1;
@@ -256,24 +237,13 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
goto out;
}
- freqs.flags = 0;
- freqs.old = s3c_freq->is_dvs ? FREQ_DVS
- : clk_get_rate(s3c_freq->armclk) / 1000;
-
/* When leavin dvs mode, always switch the armdiv to the hclk rate
* The S3C2416 has stability issues when switching directly to
* higher frequencies.
*/
- freqs.new = (s3c_freq->is_dvs && !to_dvs)
+ new_freq = (s3c_freq->is_dvs && !to_dvs)
? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[i].frequency;
-
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- if (!to_dvs && freqs.old == freqs.new)
- goto out;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ : s3c_freq->freq_table[index].frequency;
if (to_dvs) {
pr_debug("cpufreq: enter dvs\n");
@@ -282,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
pr_debug("cpufreq: leave dvs\n");
ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
} else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
+ pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
out:
mutex_unlock(&cpufreq_lock);
@@ -486,20 +454,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
/* Datasheet says PLL stabalisation time must be at least 300us,
* so but add some fudge. (reference in LOCKCON0 register description)
*/
- policy->cpuinfo.transition_latency = (500 * 1000) +
- s3c_freq->regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+ ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+ (500 * 1000) + s3c_freq->regulator_latency);
if (ret)
goto err_freq_table;
- cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
return 0;
@@ -518,19 +480,14 @@ err_hclk:
return ret;
}
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver s3c2416_cpufreq_driver = {
.flags = 0,
- .verify = s3c2416_cpufreq_verify_speed,
- .target = s3c2416_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
.init = s3c2416_cpufreq_driver_init,
.name = "s3c2416",
- .attr = s3c2416_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init s3c2416_cpufreq_init(void)
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index b0f343fcb7ee..485088253358 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
- printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- policy->cur = s3c_cpufreq_get(0);
- policy->min = policy->cpuinfo.min_freq = 0;
- policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
- /* feed the latency information from the cpu driver */
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
- if (ftab)
- cpufreq_frequency_table_cpuinfo(policy, ftab);
-
- return 0;
+ return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
return 0;
}
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return 0;
-}
-
#ifdef CONFIG_PM
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c24xx_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s3c_cpufreq_verify,
.target = s3c_cpufreq_target,
.get = s3c_cpufreq_get,
.init = s3c_cpufreq_init,
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 15631f92ab7d..67e302eeefec 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
@@ -71,66 +63,48 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int index)
{
- int ret;
- unsigned int i;
- struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq, new_freq;
+ int ret;
- ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- return ret;
-
- freqs.old = clk_get_rate(armclk) / 1000;
- freqs.new = s3c64xx_freq_table[i].frequency;
- freqs.flags = 0;
- dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
-
- if (freqs.old == freqs.new)
- return 0;
-
- pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ old_freq = clk_get_rate(armclk) / 1000;
+ new_freq = s3c64xx_freq_table[index].frequency;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new > freqs.old) {
+ if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
- goto post_notify;
+ new_freq, ret);
+ return ret;
}
}
#endif
- ret = clk_set_rate(armclk, freqs.new * 1000);
+ ret = clk_set_rate(armclk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
- freqs.new, ret);
- freqs.new = freqs.old;
+ new_freq, ret);
+ return ret;
}
-post_notify:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- if (ret)
- goto err;
-
#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new < freqs.old) {
+ if (vddarm && new_freq < old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- goto err_clk;
+ new_freq, ret);
+ if (clk_set_rate(armclk, old_freq * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+
+ return ret;
}
}
#endif
@@ -139,14 +113,6 @@ post_notify:
clk_get_rate(armclk) / 1000);
return 0;
-
-err_clk:
- if (clk_set_rate(armclk, freqs.old * 1000) < 0)
- pr_err("Failed to restore original clock rate\n");
-err:
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
}
#ifdef CONFIG_REGULATOR
@@ -243,15 +209,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
- policy->cur = clk_get_rate(armclk) / 1000;
-
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
- policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+ (500 * 1000) + regulator_latency);
if (ret != 0) {
pr_err("Failed to configure frequency table: %d\n",
ret);
@@ -264,8 +227,8 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.flags = 0,
- .verify = s3c64xx_cpufreq_verify_speed,
- .target = s3c64xx_cpufreq_set_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5c7757073793..e3973dae28a7 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -26,7 +26,6 @@
static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
-static struct cpufreq_freqs freqs;
static DEFINE_MUTEX(set_freq_lock);
/* APLL M,P,S values for 1G/800Mhz */
@@ -36,16 +35,7 @@ static DEFINE_MUTEX(set_freq_lock);
/* Use 800MHz when entering sleep mode */
#define SLEEP_FREQ (800 * 1000)
-/*
- * relation has an additional symantics other than the standard of cpufreq
- * DISALBE_FURTHER_CPUFREQ: disable further access to target
- * ENABLE_FURTUER_CPUFREQ: enable access to target
- */
-enum cpufreq_access {
- DISABLE_FURTHER_CPUFREQ = 0x10,
- ENABLE_FURTHER_CPUFREQ = 0x20,
-};
-
+/* Tracks if cpu freqency can be updated anymore */
static bool no_cpufreq_access;
/*
@@ -174,14 +164,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
static unsigned int s5pv210_getspeed(unsigned int cpu)
{
if (cpu)
@@ -190,22 +172,18 @@ static unsigned int s5pv210_getspeed(unsigned int cpu)
return clk_get_rate(cpu_clk) / 1000;
}
-static int s5pv210_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned long reg;
- unsigned int index, priv_index;
+ unsigned int priv_index;
unsigned int pll_changing = 0;
unsigned int bus_speed_changing = 0;
+ unsigned int old_freq, new_freq;
int arm_volt, int_volt;
int ret = 0;
mutex_lock(&set_freq_lock);
- if (relation & ENABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = false;
-
if (no_cpufreq_access) {
#ifdef CONFIG_PM_VERBOSE
pr_err("%s:%d denied access to %s as it is disabled"
@@ -215,27 +193,13 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- if (relation & DISABLE_FURTHER_CPUFREQ)
- no_cpufreq_access = true;
-
- relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
-
- freqs.old = s5pv210_getspeed(0);
-
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
- goto exit;
- }
-
- freqs.new = s5pv210_freq_table[index].frequency;
-
- if (freqs.new == freqs.old)
- goto exit;
+ old_freq = s5pv210_getspeed(0);
+ new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- freqs.old, relation, &priv_index)) {
+ old_freq, CPUFREQ_RELATION_H,
+ &priv_index)) {
ret = -EINVAL;
goto exit;
}
@@ -243,7 +207,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
- if (freqs.new > freqs.old) {
+ if (new_freq > old_freq) {
ret = regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
if (ret)
@@ -255,8 +219,6 @@ static int s5pv210_target(struct cpufreq_policy *policy,
goto exit;
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
pll_changing = 1;
@@ -467,9 +429,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
- if (freqs.new < freqs.old) {
+ if (new_freq < old_freq) {
regulator_set_voltage(int_regulator,
int_volt, int_volt_max);
@@ -551,13 +511,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
- policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
- cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
- policy->cpuinfo.transition_latency = 40000;
-
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
clk_put(dmc0_clk);
@@ -573,16 +527,18 @@ static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
switch (event) {
case PM_SUSPEND_PREPARE:
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ /* Disable updation of cpu frequency */
+ no_cpufreq_access = true;
return NOTIFY_OK;
case PM_POST_RESTORE:
case PM_POST_SUSPEND:
- cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- ENABLE_FURTHER_CPUFREQ);
+ /* Enable updation of cpu frequency */
+ no_cpufreq_access = false;
+ cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
return NOTIFY_OK;
}
@@ -595,18 +551,18 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
{
int ret;
- ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
- DISABLE_FURTHER_CPUFREQ);
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
if (ret < 0)
return NOTIFY_BAD;
+ no_cpufreq_access = true;
return NOTIFY_DONE;
}
static struct cpufreq_driver s5pv210_driver = {
.flags = CPUFREQ_STICKY,
- .verify = s5pv210_verify_speed,
- .target = s5pv210_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s5pv210_target,
.get = s5pv210_getspeed,
.init = s5pv210_cpu_init,
.name = "s5pv210",
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index cff18e87ca58..623da742f8e7 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -177,60 +177,33 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed)
}
}
-static int sa1100_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_ppcr;
- struct cpufreq_freqs freqs;
-
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
- new_ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
- new_ppcr--;
- break;
- }
-
- freqs.old = cur;
- freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
+ unsigned int new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ new_freq = sa11x0_freq_table[ppcr].frequency;
- if (freqs.new > cur)
- sa1100_update_dram_timings(cur, freqs.new);
+ if (new_freq > cur)
+ sa1100_update_dram_timings(cur, new_freq);
- PPCR = new_ppcr;
+ PPCR = ppcr;
- if (freqs.new < cur)
- sa1100_update_dram_timings(cur, freqs.new);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ if (new_freq < cur)
+ sa1100_update_dram_timings(cur, new_freq);
return 0;
}
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
static struct cpufreq_driver sa1100_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1100_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1100_target,
.get = sa11x0_getspeed,
.init = sa1100_cpu_init,
.name = "sa1100",
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 39c90b6f4286..2c2b2e601d13 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -229,36 +229,14 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
/*
* Ok, set the CPU frequency.
*/
-static int sa1110_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
struct sdram_params *sdram = &sdram_params;
- struct cpufreq_freqs freqs;
struct sdram_info sd;
unsigned long flags;
- unsigned int ppcr, unused;
-
- switch (relation) {
- case CPUFREQ_RELATION_L:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
- ppcr--;
- break;
- case CPUFREQ_RELATION_H:
- ppcr = sa11x0_freq_to_ppcr(target_freq);
- if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
- (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
- ppcr--;
- break;
- default:
- return -EINVAL;
- }
-
- freqs.old = sa11x0_getspeed(0);
- freqs.new = sa11x0_ppcr_to_freq(ppcr);
+ unsigned int unused;
- sdram_calculate_timing(&sd, freqs.new, sdram);
+ sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
#if 0
/*
@@ -277,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy,
sd.mdcas[2] = 0xaaaaaaaa;
#endif
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
/*
* The clock could be going away for some time. Set the SDRAMs
* to refresh rapidly (every 64 memory clock cycles). To get
@@ -323,30 +299,22 @@ static int sa1110_target(struct cpufreq_policy *policy,
/*
* Now, return the SDRAM refresh back to normal.
*/
- sdram_update_refresh(freqs.new, sdram);
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
return 0;
}
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
- if (policy->cpu != 0)
- return -EINVAL;
- policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
- policy->cpuinfo.min_freq = 59000;
- policy->cpuinfo.max_freq = 287000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- return 0;
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
.flags = CPUFREQ_STICKY,
- .verify = sa11x0_verify_speed,
- .target = sa1110_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1110_target,
.get = sa11x0_getspeed,
.init = sa1110_cpu_init,
.name = "sa1110",
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index d6f6c6f4efa7..6adb354e359c 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,21 +53,11 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
}
}
-static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
- unsigned int state)
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
- struct cpufreq_freqs freqs;
u8 clockspeed_reg;
- freqs.old = sc520_freq_get_cpu_frequency(0);
- freqs.new = sc520_freq_table[state].frequency;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- pr_debug("attempting to set frequency to %i kHz\n",
- sc520_freq_table[state].frequency);
-
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
@@ -75,30 +65,9 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_enable();
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-};
-
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
-static int sc520_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int newstate = 0;
-
- if (cpufreq_frequency_table_target(policy, sc520_freq_table,
- target_freq, relation, &newstate))
- return -EINVAL;
-
- sc520_freq_set_cpu_state(policy, newstate);
-
return 0;
}
-
/*
* Module init and exit code
*/
@@ -106,7 +75,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
- int result;
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +83,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
- policy->cur = sc520_freq_get_cpu_frequency(0);
-
- result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, sc520_freq_table);
}
-static struct freq_attr *sc520_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
- .verify = sc520_freq_verify,
- .target = sc520_freq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
- .exit = sc520_freq_cpu_exit,
+ .exit = cpufreq_generic_exit,
.name = "sc520_freq",
- .attr = sc520_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index ffc6d24b0cfb..387af12503a6 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ cpufreq_verify_within_cpu_limits(policy);
policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
- policy->cur = sh_cpufreq_get(cpu);
-
freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
if (freq_table) {
int result;
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
- if (!result)
- cpufreq_frequency_table_get_attr(freq_table, cpu);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
+ if (result)
+ return result;
} else {
dev_notice(dev, "no frequency table found, falling back "
"to rate rounding.\n");
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct freq_attr *sh_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.get = sh_cpufreq_get,
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = sh_freq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index cf5bc2ca16fa..62aa23e219d4 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -245,14 +245,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
return clock_tick / estar_to_divisor(estar);
}
-static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -266,41 +264,15 @@ static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
old_divisor = estar_to_divisor(estar);
- freqs.old = clock_tick / old_divisor;
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us2e_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us2e_freq_table[policy->cpu].table[0],
- target_freq, relation, &new_index))
- return -EINVAL;
-
- us2e_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us2e_freq_table[policy->cpu].table[0]);
-}
-
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -324,13 +296,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us2e_driver)
- us2e_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us2e_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us2e_freq_target(policy, 0);
+ }
return 0;
}
@@ -361,8 +335,8 @@ static int __init us2e_freq_init(void)
goto err_out;
driver->init = us2e_freq_cpu_init;
- driver->verify = us2e_freq_verify;
- driver->target = us2e_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-IIe");
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index ac76b489979d..724ffbd7105d 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -93,13 +93,11 @@ static unsigned int us3_freq_get(unsigned int cpu)
return ret;
}
-static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
- unsigned int index)
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -125,43 +123,15 @@ static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
reg = read_safari_cfg();
- freqs.old = get_current_freq(cpu, reg);
- freqs.new = new_freq;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= new_bits;
write_safari_cfg(reg);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
set_cpus_allowed_ptr(current, &cpus_allowed);
-}
-
-static int us3_freq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int new_index = 0;
-
- if (cpufreq_frequency_table_target(policy,
- &us3_freq_table[policy->cpu].table[0],
- target_freq,
- relation,
- &new_index))
- return -EINVAL;
-
- us3_set_cpu_divider_index(policy, new_index);
return 0;
}
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- &us3_freq_table[policy->cpu].table[0]);
-}
-
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
@@ -181,13 +151,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
- return cpufreq_frequency_table_cpuinfo(policy, table);
+ return cpufreq_table_validate_and_show(policy, table);
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
- if (cpufreq_us3_driver)
- us3_set_cpu_divider_index(policy, 0);
+ if (cpufreq_us3_driver) {
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ us3_freq_target(policy, 0);
+ }
return 0;
}
@@ -222,8 +194,8 @@ static int __init us3_freq_init(void)
goto err_out;
driver->init = us3_freq_cpu_init;
- driver->verify = us3_freq_verify;
- driver->target = us3_freq_target;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
strcpy(driver->name, "UltraSPARC-III");
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 3f418166ce02..d02ccd19c9c4 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,11 +30,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
static unsigned int spear_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -110,20 +105,14 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
}
static int spear_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int index)
{
- struct cpufreq_freqs freqs;
long newfreq;
struct clk *srcclk;
- int index, ret, mult = 1;
-
- if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
- target_freq, relation, &index))
- return -EINVAL;
-
- freqs.old = spear_cpufreq_get(0);
+ int ret, mult = 1;
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
if (of_machine_is_compatible("st,spear1340")) {
/*
* SPEAr1340 is special in the sense that due to the possibility
@@ -154,65 +143,32 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
return newfreq;
}
- freqs.new = newfreq / 1000;
- freqs.new /= mult;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
else
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
- /* Get current rate after clk_set_rate, in case of failure */
- if (ret) {
+ if (ret)
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
- freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
- }
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
- if (ret) {
- pr_err("cpufreq_frequency_table_cpuinfo() failed");
- return ret;
- }
-
- cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
- policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
- policy->cur = spear_cpufreq_get(0);
-
- cpumask_setall(policy->cpus);
-
- return 0;
-}
-
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+ spear_cpufreq.transition_latency);
}
-static struct freq_attr *spear_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
.flags = CPUFREQ_STICKY,
- .verify = spear_cpufreq_verify,
- .target = spear_cpufreq_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = spear_cpufreq_target,
.get = spear_cpufreq_get,
.init = spear_cpufreq_init,
- .exit = spear_cpufreq_exit,
- .attr = spear_cpufreq_attr,
+ .exit = cpufreq_generic_exit,
+ .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_driver_init(void)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index f897d5105842..4e1daca5ce3b 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
- unsigned freq;
unsigned l, h;
- int ret;
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (centrino_cpu_init_table(policy)) {
+ if (centrino_cpu_init_table(policy))
return -ENODEV;
- }
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
}
}
- freq = get_cur_freq(policy->cpu);
policy->cpuinfo.transition_latency = 10000;
/* 10uS transition latency */
- policy->cur = freq;
-
- pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
- ret = cpufreq_frequency_table_cpuinfo(policy,
+ return cpufreq_table_validate_and_show(policy,
per_cpu(centrino_model, policy->cpu)->op_points);
- if (ret)
- return (ret);
-
- cpufreq_frequency_table_get_attr(
- per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
- return 0;
}
static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -428,36 +414,18 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy,
- per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
-/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int centrino_target (struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
- struct cpufreq_freqs freqs;
int retval = 0;
- unsigned int j, first_cpu, tmp;
+ unsigned int j, first_cpu;
+ struct cpufreq_frequency_table *op_points;
cpumask_var_t covered_cpus;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -468,16 +436,8 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- if (unlikely(cpufreq_frequency_table_target(policy,
- per_cpu(centrino_model, cpu)->op_points,
- target_freq,
- relation,
- &newstate))) {
- retval = -EINVAL;
- goto out;
- }
-
first_cpu = 1;
+ op_points = &per_cpu(centrino_model, cpu)->op_points[index];
for_each_cpu(j, policy->cpus) {
int good_cpu;
@@ -501,7 +461,7 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
+ msr = op_points->driver_data;
if (first_cpu) {
rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
@@ -512,15 +472,6 @@ static int centrino_target (struct cpufreq_policy *policy,
goto out;
}
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
-
- pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
-
- cpufreq_notify_transition(policy, &freqs,
- CPUFREQ_PRECHANGE);
-
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
@@ -535,8 +486,6 @@ static int centrino_target (struct cpufreq_policy *policy,
cpumask_set_cpu(j, covered_cpus);
}
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
@@ -547,12 +496,6 @@ static int centrino_target (struct cpufreq_policy *policy,
for_each_cpu(j, covered_cpus)
wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
-
- tmp = freqs.new;
- freqs.new = freqs.old;
- freqs.old = tmp;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
retval = 0;
@@ -561,20 +504,15 @@ out:
return retval;
}
-static struct freq_attr* centrino_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
.exit = centrino_cpu_exit,
- .verify = centrino_verify,
- .target = centrino_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = centrino_target,
.get = get_cur_freq,
- .attr = centrino_attr,
+ .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 5355abb69afc..7639b2be2a90 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -251,56 +251,23 @@ static unsigned int speedstep_get(unsigned int cpu)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: the target frequency
- * @relation: how that frequency relates to achieved frequency
- * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ * @index: index of target frequency
*
* Sets a new CPUFreq policy.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0, policy_cpu;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
+ unsigned int policy_cpu;
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
- freqs.old = speedstep_get(policy_cpu);
- freqs.new = speedstep_freqs[newstate].frequency;
-
- pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
-
- /* no transition necessary */
- if (freqs.old == freqs.new)
- return 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
- smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
true);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
-
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
@@ -320,8 +287,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
- int result;
- unsigned int policy_cpu, speed;
+ unsigned int policy_cpu;
struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
@@ -336,49 +302,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (gf.ret)
return gf.ret;
- /* get current speed setting */
- speed = speedstep_get(policy_cpu);
- if (!speed)
- return -EIO;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index abfba4f731eb..0f5326d6f79f 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -235,52 +235,21 @@ static void speedstep_set_state(unsigned int state)
/**
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
- * @target_freq: new freq
- * @relation:
+ * @index: index of new freq
*
* Sets a new CPUFreq policy/freq.
*/
-static int speedstep_target(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int newstate = 0;
- struct cpufreq_freqs freqs;
-
- if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
- target_freq, relation, &newstate))
- return -EINVAL;
-
- freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
- freqs.new = speedstep_freqs[newstate].frequency;
-
- if (freqs.old == freqs.new)
- return 0;
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- speedstep_set_state(newstate);
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ speedstep_set_state(index);
return 0;
}
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
- unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
@@ -316,32 +285,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
- /* get current speed setting */
- state = speedstep_get_state();
- speed = speedstep_freqs[state].frequency;
-
- pr_debug("currently at %s speed setting - %i MHz\n",
- (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
- ? "low" : "high",
- (speed / 1000));
-
- /* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- policy->cur = speed;
-
- result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
- if (result)
- return result;
-
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
- return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +307,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
return result;
}
-static struct freq_attr *speedstep_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
.init = speedstep_cpu_init,
- .exit = speedstep_cpu_exit,
+ .exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = speedstep_attr,
+ .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index a7b876fdc1d8..f42df7ec03c5 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
static unsigned int tegra_getspeed(unsigned int cpu)
{
unsigned long rate;
@@ -107,12 +102,8 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
unsigned long rate)
{
int ret = 0;
- struct cpufreq_freqs freqs;
- freqs.old = tegra_getspeed(0);
- freqs.new = rate;
-
- if (freqs.old == freqs.new)
+ if (tegra_getspeed(0) == rate)
return ret;
/*
@@ -126,21 +117,10 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
else
clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
-#ifdef CONFIG_CPU_FREQ_DEBUG
- printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
- freqs.old, freqs.new);
-#endif
-
- ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
- if (ret) {
- pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
- freqs.new);
- freqs.new = freqs.old;
- }
-
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ ret = tegra_cpu_clk_set_rate(rate * 1000);
+ if (ret)
+ pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
+ rate);
return ret;
}
@@ -155,11 +135,8 @@ static unsigned long tegra_cpu_highest_speed(void)
return rate;
}
-static int tegra_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned int idx;
unsigned int freq;
int ret = 0;
@@ -170,10 +147,7 @@ static int tegra_target(struct cpufreq_policy *policy,
goto out;
}
- cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &idx);
-
- freq = freq_table[idx].frequency;
+ freq = freq_table[index].frequency;
target_cpu_speed[policy->cpu] = freq;
@@ -209,21 +183,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
+ int ret;
+
if (policy->cpu >= NUM_CPUS)
return -EINVAL;
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
- policy->cur = tegra_getspeed(policy->cpu);
- target_cpu_speed[policy->cpu] = policy->cur;
+ target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
/* FIXME: what's the actual transition time? */
- policy->cpuinfo.transition_latency = 300 * 1000;
-
- cpumask_copy(policy->cpus, cpu_possible_mask);
+ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (ret) {
+ clk_disable_unprepare(cpu_clk);
+ clk_disable_unprepare(emc_clk);
+ return ret;
+ }
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +209,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
}
-static struct freq_attr *tegra_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
static struct cpufreq_driver tegra_cpufreq_driver = {
- .verify = tegra_verify_speed,
- .target = tegra_target,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = tegra_target,
.get = tegra_getspeed,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
- .attr = tegra_cpufreq_attr,
+ .attr = cpufreq_generic_attr,
};
static int __init tegra_cpufreq_init(void)
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index b225f04d8ae5..653ae2955b55 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+ cpufreq_verify_within_cpu_limits(policy);
return 0;
}
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
- policy->cur = ucv2_getspeed(0);
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 000000000000..7f7c9c01b44e
--- /dev/null
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,70 @@
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int ve_spc_init_opp_table(struct device *cpu_dev)
+{
+ /*
+ * platform specific SPC code must initialise the opp table
+ * so just check if the OPP count is non-zero
+ */
+ return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+}
+
+static int ve_spc_get_transition_latency(struct device *cpu_dev)
+{
+ return 1000000; /* 1 ms */
+}
+
+static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+ .name = "vexpress-spc",
+ .get_transition_latency = ve_spc_get_transition_latency,
+ .init_opp_table = ve_spc_init_opp_table,
+};
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+ return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+ .driver = {
+ .name = "vexpress-spc-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = ve_spc_cpufreq_probe,
+ .remove = ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index d6f57d5d9631..d988948a89a0 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -2,6 +2,17 @@
# ARM CPU Idle drivers
#
+config ARM_BIG_LITTLE_CPUIDLE
+ bool "Support for ARM big.LITTLE processors"
+ depends on ARCH_VEXPRESS_TC2_PM
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
+
config ARM_HIGHBANK_CPUIDLE
bool "CPU Idle Driver for Calxeda processors"
depends on ARM_PSCI
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE
help
Select this to enable cpuidle for ST-E u8500 processors
-config CPU_IDLE_BIG_LITTLE
- bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM
- select ARM_CPU_SUSPEND
- select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+ bool "Cpu Idle Driver for the AT91 processors"
+ default y
+ depends on ARCH_AT91
help
- Select this option to enable CPU idle driver for big.LITTLE based
- ARM systems. Driver manages CPUs coordination through MCPM and
- define different C-states for little and big cores through the
- multiple CPU idle drivers infrastructure.
+ Select this to enable cpuidle for AT91 processors
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index cea5ef58876d..527be28e5c1e 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index f8a86364c6b6..e952936418d0 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -147,7 +147,7 @@ static cpumask_t cpuidle_coupled_poked;
* has returned from this function, the barrier is immediately available for
* reuse.
*
- * The atomic variable a must be initialized to 0 before any cpu calls
+ * The atomic variable must be initialized to 0 before any cpu calls
* this function, will be reset to 0 before any cpu returns from this function.
*
* Must only be called from within a coupled idle state handler
diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c
new file mode 100644
index 000000000000..a0774370c6bc
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-at91.c
@@ -0,0 +1,69 @@
+/*
+ * based on arch/arm/mach-kirkwood/cpuidle.c
+ *
+ * CPU idle support for AT91 SoC
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The cpu idle uses wait-for-interrupt and RAM self refresh in order
+ * to implement two idle states -
+ * #1 wait-for-interrupt
+ * #2 wait-for-interrupt and RAM self refresh
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
+
+#define AT91_MAX_STATES 2
+
+static void (*at91_standby)(void);
+
+/* Actual code that puts the SoC in different idle states */
+static int at91_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ at91_standby();
+ return index;
+}
+
+static struct cpuidle_driver at91_idle_driver = {
+ .name = "at91_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = at91_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 10000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "RAM_SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = AT91_MAX_STATES,
+};
+
+/* Initialize CPU idle by registering the idle states */
+static int at91_cpuidle_probe(struct platform_device *dev)
+{
+ at91_standby = (void *)(dev->dev.platform_data);
+
+ return cpuidle_register(&at91_idle_driver, NULL);
+}
+
+static struct platform_driver at91_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-at91",
+ .owner = THIS_MODULE,
+ },
+ .probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
index e0564652af35..5e35804b1a95 100644
--- a/drivers/cpuidle/cpuidle-ux500.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
.state_count = 2,
};
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
{
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c
index 38e03a183591..aded75928028 100644
--- a/drivers/cpuidle/cpuidle-zynq.c
+++ b/drivers/cpuidle/cpuidle-zynq.c
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/proc-fns.h>
#include <asm/cpuidle.h>
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = {
};
/* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
{
- if (!of_machine_is_compatible("xlnx,zynq-7000"))
- return -ENODEV;
-
pr_info("Xilinx Zynq CpuIdle Driver started\n");
return cpuidle_register(&zynq_idle_driver, NULL);
}
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+ .driver = {
+ .name = "cpuidle-zynq",
+ .owner = THIS_MODULE,
+ },
+ .probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d75040ddd2b3..2a991e468f78 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -118,11 +118,9 @@ int cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv;
int next_state, entered_state;
+ bool broadcast;
- if (off)
- return -ENODEV;
-
- if (!initialized)
+ if (off || !initialized)
return -ENODEV;
/* check if the device is ready */
@@ -144,9 +142,10 @@ int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
- &dev->cpu);
+ broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
+
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
entered_state = cpuidle_enter_state_coupled(dev, drv,
@@ -154,9 +153,8 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
- if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- &dev->cpu);
+ if (broadcast)
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -228,45 +226,6 @@ void cpuidle_resume(void)
mutex_unlock(&cpuidle_lock);
}
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- ktime_t t1, t2;
- s64 diff;
-
- t1 = ktime_get();
- local_irq_enable();
- while (!need_resched())
- cpu_relax();
-
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
- return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
- struct cpuidle_state *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->flags = 0;
- state->enter = poll_idle;
- state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* cpuidle_enable_device - enables idle PM for a CPU
* @dev: the CPU
@@ -296,8 +255,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!dev->state_count)
dev->state_count = drv->state_count;
- poll_idle_init(drv);
-
ret = cpuidle_add_device_sysfs(dev);
if (ret)
return ret;
@@ -358,12 +315,10 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
module_put(drv->owner);
}
-static int __cpuidle_device_init(struct cpuidle_device *dev)
+static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
-
- return 0;
}
/**
@@ -385,13 +340,12 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
- if (ret) {
+ if (ret)
__cpuidle_unregister_device(dev);
- return ret;
- }
+ else
+ dev->registered = 1;
- dev->registered = 1;
- return 0;
+ return ret;
}
/**
@@ -410,9 +364,7 @@ int cpuidle_register_device(struct cpuidle_device *dev)
if (dev->registered)
goto out_unlock;
- ret = __cpuidle_device_init(dev);
- if (ret)
- goto out_unlock;
+ __cpuidle_device_init(dev);
ret = __cpuidle_register_device(dev);
if (ret)
@@ -516,7 +468,7 @@ int cpuidle_register(struct cpuidle_driver *drv,
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
/*
- * On multiplatform for ARM, the coupled idle states could
+ * On multiplatform for ARM, the coupled idle states could be
* enabled in the kernel even if the cpuidle driver does not
* use it. Note, coupled_cpus is a struct copy.
*/
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 6e11701f0fca..06dbe7c86199 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
@@ -56,7 +57,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
}
/**
- * __cpuidle_set_driver - set per CPU driver variables the the given driver.
+ * __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
* For each CPU in the driver's cpumask, unset the registered driver per CPU
@@ -132,7 +133,7 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* cpuidle_setup_broadcast_timer - enable/disable the broadcast timer
* @arg: a void pointer used to match the SMP cross call API
*
- * @arg is used as a value of type 'long' with on of the two values:
+ * @arg is used as a value of type 'long' with one of the two values:
* - CLOCK_EVT_NOTIFY_BROADCAST_ON
* - CLOCK_EVT_NOTIFY_BROADCAST_OFF
*
@@ -149,10 +150,8 @@ static void cpuidle_setup_broadcast_timer(void *arg)
/**
* __cpuidle_driver_init - initialize the driver's internal data
* @drv: a valid pointer to a struct cpuidle_driver
- *
- * Returns 0 on success, a negative error code otherwise.
*/
-static int __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
int i;
@@ -169,20 +168,55 @@ static int __cpuidle_driver_init(struct cpuidle_driver *drv)
/*
* Look for the timer stop flag in the different states, so that we know
* if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually on of the the deeper states has this flag set.
+ * order, because usually one of the deeper states have this flag set.
*/
for (i = drv->state_count - 1; i >= 0 ; i--) {
+ if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ drv->bctimer = 1;
+ break;
+ }
+ }
+}
- if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
- continue;
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t t1, t2;
+ s64 diff;
- drv->bctimer = 1;
- break;
- }
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
- return 0;
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
}
+static void poll_idle_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_state *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = 0;
+ state->enter = poll_idle;
+ state->disabled = false;
+}
+#else
+static void poll_idle_init(struct cpuidle_driver *drv) {}
+#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver
@@ -206,9 +240,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- ret = __cpuidle_driver_init(drv);
- if (ret)
- return ret;
+ __cpuidle_driver_init(drv);
ret = __cpuidle_set_driver(drv);
if (ret)
@@ -218,6 +250,8 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+ poll_idle_init(drv);
+
return 0;
}
@@ -346,10 +380,11 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
*/
void cpuidle_driver_unref(void)
{
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
+ drv = cpuidle_get_driver();
if (drv && !WARN_ON(drv->refcnt <= 0))
drv->refcnt--;
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ea2f8e7aa24a..ca89412f5122 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -96,46 +96,3 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
return ret;
}
-
-/**
- * cpuidle_replace_governor - find a replacement governor
- * @exclude_rating: the rating that will be skipped while looking for
- * new governor.
- */
-static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
-{
- struct cpuidle_governor *gov;
- struct cpuidle_governor *ret_gov = NULL;
- unsigned int max_rating = 0;
-
- list_for_each_entry(gov, &cpuidle_governors, governor_list) {
- if (gov->rating == exclude_rating)
- continue;
- if (gov->rating > max_rating) {
- max_rating = gov->rating;
- ret_gov = gov;
- }
- }
-
- return ret_gov;
-}
-
-/**
- * cpuidle_unregister_governor - unregisters a governor
- * @gov: the governor
- */
-void cpuidle_unregister_governor(struct cpuidle_governor *gov)
-{
- if (!gov)
- return;
-
- mutex_lock(&cpuidle_lock);
- if (gov == cpuidle_curr_governor) {
- struct cpuidle_governor *new_gov;
- new_gov = cpuidle_replace_governor(gov->rating);
- cpuidle_switch_governor(new_gov);
- }
- list_del(&gov->governor_list);
- mutex_unlock(&cpuidle_lock);
-}
-
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 8739cc05228c..e918b6d0caf7 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -52,11 +52,12 @@ static ssize_t show_current_driver(struct device *dev,
char *buf)
{
ssize_t ret;
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_driver)
- ret = sprintf(buf, "%s\n", cpuidle_driver->name);
+ drv = cpuidle_get_driver();
+ if (drv)
+ ret = sprintf(buf, "%s\n", drv->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index c99c00d35d34..a0b2f7e0eedb 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d,
{
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
- struct opp *opp;
+ struct dev_pm_opp *opp;
ssize_t count = 0;
unsigned long freq = 0;
rcu_read_lock();
do {
- opp = opp_find_freq_ceil(dev, &freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
@@ -993,10 +993,10 @@ static int __init devfreq_init(void)
}
devfreq_wq = create_freezable_workqueue("devfreq_wq");
- if (IS_ERR(devfreq_wq)) {
+ if (!devfreq_wq) {
class_destroy(devfreq_class);
pr_err("%s: couldn't create workqueue\n", __FILE__);
- return PTR_ERR(devfreq_wq);
+ return -ENOMEM;
}
devfreq_class->dev_groups = devfreq_groups;
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
- u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq,
+ u32 flags)
{
- struct opp *opp;
+ struct dev_pm_opp *opp;
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
/* The freq is an upper bound. opp should be lower */
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
- opp = opp_find_freq_ceil(dev, freq);
+ opp = dev_pm_opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
- opp = opp_find_freq_floor(dev, freq);
+ opp = dev_pm_opp_find_freq_floor(dev, freq);
}
return opp;
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
int ret = 0;
rcu_read_lock();
- nh = opp_get_notifier(dev);
+ nh = dev_pm_opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
index c5f86d8caca3..cede6f71cd63 100644
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ b/drivers/devfreq/exynos/exynos4_bus.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/devfreq.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long freq;
unsigned long old_freq = data->curr_oppinfo.rate;
struct busfreq_opp_info new_oppinfo;
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
rcu_read_unlock();
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
freq = new_oppinfo.rate;
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data)
exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
for (i = LV_0; i < EX4210_LV_NUM; i++) {
- err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
exynos4210_busclk_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
}
for (i = 0; i < EX4x12_LV_NUM; i++) {
- ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
exynos4x12_mifclk_table[i].volt);
if (ret) {
dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
mutex_unlock(&data->lock);
return PTR_ERR(opp);
}
- new_oppinfo.rate = opp_get_freq(opp);
- new_oppinfo.volt = opp_get_voltage(opp);
+ new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@ unlock:
static int exynos4_busfreq_probe(struct platform_device *pdev)
{
struct busfreq_data *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
int err = 0;
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ opp = dev_pm_opp_find_freq_floor(dev,
+ &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos4_devfreq_profile.initial_freq);
return PTR_ERR(opp);
}
- data->curr_oppinfo.rate = opp_get_freq(opp);
- data->curr_oppinfo.volt = opp_get_voltage(opp);
+ data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+ data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
platform_set_drvdata(pdev, data);
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
index 574b16b59be5..a60da3c1c48e 100644
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ b/drivers/devfreq/exynos/exynos5_bus.c
@@ -15,10 +15,9 @@
#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/suspend.h>
-#include <linux/opp.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long old_freq, freq;
unsigned long volt;
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
return PTR_ERR(opp);
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
old_freq = data->curr_freq;
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data)
int i, err = 0;
for (i = LV_0; i < _LV_END; i++) {
- err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+ err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
exynos5_int_opp_table[i].volt);
if (err) {
dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
{
struct busfreq_data_int *data = container_of(this,
struct busfreq_data_int, pm_notifier);
- struct opp *opp;
+ struct dev_pm_opp *opp;
unsigned long maxfreq = ULONG_MAX;
unsigned long freq;
unsigned long volt;
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
rcu_read_lock();
- opp = opp_find_freq_floor(data->dev, &maxfreq);
+ opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
if (IS_ERR(opp)) {
rcu_read_unlock();
err = PTR_ERR(opp);
goto unlock;
}
- freq = opp_get_freq(opp);
- volt = opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@ unlock:
static int exynos5_busfreq_int_probe(struct platform_device *pdev)
{
struct busfreq_data_int *data;
- struct opp *opp;
+ struct dev_pm_opp *opp;
struct device *dev = &pdev->dev;
struct device_node *np;
unsigned long initial_freq;
@@ -351,46 +350,43 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err = exynos5250_init_int_tables(data);
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
- data->int_clk = clk_get(dev, "int_clk");
+ data->int_clk = devm_clk_get(dev, "int_clk");
if (IS_ERR(data->int_clk)) {
dev_err(dev, "Cannot get clock \"int_clk\"\n");
- err = PTR_ERR(data->int_clk);
- goto err_clock;
+ return PTR_ERR(data->int_clk);
}
rcu_read_lock();
- opp = opp_find_freq_floor(dev,
+ opp = dev_pm_opp_find_freq_floor(dev,
&exynos5_devfreq_int_profile.initial_freq);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos5_devfreq_int_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ return PTR_ERR(opp);
}
- initial_freq = opp_get_freq(opp);
- initial_volt = opp_get_voltage(opp);
+ initial_freq = dev_pm_opp_get_freq(opp);
+ initial_volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
data->curr_freq = initial_freq;
err = clk_set_rate(data->int_clk, initial_freq * 1000);
if (err) {
dev_err(dev, "Failed to set initial frequency\n");
- goto err_opp_add;
+ return err;
}
err = exynos5_int_setvolt(data, initial_volt);
if (err)
- goto err_opp_add;
+ return err;
platform_set_drvdata(pdev, data);
@@ -419,12 +415,6 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
err_devfreq_add:
devfreq_remove_device(data->devfreq);
- platform_set_drvdata(pdev, NULL);
-err_opp_add:
- clk_put(data->int_clk);
-err_clock:
- regulator_put(data->vdd_int);
-err_regulator:
return err;
}
@@ -435,9 +425,6 @@ static int exynos5_busfreq_int_remove(struct platform_device *pdev)
pm_qos_remove_request(&data->int_req);
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- clk_put(data->int_clk);
- platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -479,7 +466,7 @@ static int __init exynos5_busfreq_int_init(void)
exynos5_devfreq_pdev =
platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
- if (IS_ERR_OR_NULL(exynos5_devfreq_pdev)) {
+ if (IS_ERR(exynos5_devfreq_pdev)) {
ret = PTR_ERR(exynos5_devfreq_pdev);
goto out1;
}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 57fe1ae32a0d..43959edd4291 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -193,16 +193,14 @@ out:
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, intel_handle;
- acpi_status status;
+ acpi_handle dhandle;
int ret;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM")) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e182719..cfbeee607b3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
- acpi_handle dhandle, nvidia_handle;
- acpi_status status;
+ acpi_handle dhandle;
int retval = 0;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
- status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(dhandle, "_DSM"))
return false;
- }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1336193b04b..fd7ce374f812 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -854,10 +854,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
};
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object params[4], *obj;
+ union acpi_object params[4];
struct acpi_object_list input;
struct acpi_device *adev;
+ unsigned long long value;
acpi_handle handle;
handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +878,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
params[3].package.count = 0;
params[3].package.elements = NULL;
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+ &value))) {
dev_err(&client->dev, "device _DSM execution failed\n");
return -ENODEV;
}
- obj = (union acpi_object *)buf.pointer;
- if (obj->type != ACPI_TYPE_INTEGER) {
- dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
- obj->type);
- kfree(buf.pointer);
- return -EINVAL;
- }
-
- pdata->hid_descriptor_address = obj->integer.value;
+ pdata->hid_descriptor_address = value;
- kfree(buf.pointer);
return 0;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3be58f89ac77..75ba8608383e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -254,10 +254,12 @@ static int i2c_device_probe(struct device *dev)
client->flags & I2C_CLIENT_WAKE);
dev_dbg(dev, "probe\n");
+ acpi_dev_pm_attach(&client->dev, true);
status = driver->probe(client, i2c_match_id(driver->id_table, client));
if (status) {
client->driver = NULL;
i2c_set_clientdata(client, NULL);
+ acpi_dev_pm_detach(&client->dev, true);
}
return status;
}
@@ -283,6 +285,7 @@ static int i2c_device_remove(struct device *dev)
client->driver = NULL;
i2c_set_clientdata(client, NULL);
}
+ acpi_dev_pm_detach(&client->dev, true);
return status;
}
@@ -1111,8 +1114,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
if (ret < 0 || !info.addr)
return AE_OK;
+ adev->power.flags.ignore_parent = true;
strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
if (!i2c_new_device(adapter, &info)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f116d664b473..3226ce98fb18 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
{
.name = "C1-NHM",
.desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
{
.name = "C1-SNB",
.desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
{
.name = "C1-IVB",
.desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
{
.name = "C1-HSW",
.desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E-ATM",
.desc = "MWAIT 0x00",
@@ -390,7 +390,7 @@ static int cpu_hotplug_notify(struct notifier_block *n,
int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev;
- switch (action & 0xf) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
@@ -490,7 +490,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
*/
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
{
unsigned int eax, ebx, ecx;
const struct x86_cpu_id *id;
@@ -558,7 +558,7 @@ static void intel_idle_cpuidle_devices_uninit(void)
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +628,7 @@ static int intel_idle_cpu_init(int cpu)
int num_substates, mwait_hint, mwait_cstate, mwait_substate;
if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ break;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 785675a56a10..900946950230 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -88,7 +88,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
- pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
+ pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
if (!pdev) {
/* warning will be printed below */
break;
@@ -99,7 +99,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
}
if (!pdev) {
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ segment, scope->bus, path->device, path->function);
*dev = NULL;
return 0;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index f71673dbb23d..ab86902fd9ff 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -686,12 +686,12 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
ir_hpet[ir_hpet_num].iommu = iommu;
ir_hpet[ir_hpet_num].id = scope->enumeration_id;
ir_hpet_num++;
@@ -714,13 +714,13 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
- bus = read_pci_config_byte(bus, path->dev, path->fn,
+ bus = read_pci_config_byte(bus, path->device, path->function,
PCI_SECONDARY_BUS);
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 2a47e82821da..5440131cd4ee 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
- acpi_handle tmp;
unsigned long long removable;
- status = acpi_get_handle(handle, "_ADR", &tmp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(handle, "_ADR"))
return 0;
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "_EJ0"))
return 1;
status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
if (ACPI_SUCCESS(status) && removable)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1ea75236a15f..4a0a9ac7a1e5 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -871,21 +871,17 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
put_bridge(bridge);
}
-static void hotplug_event_work(struct work_struct *work)
+static void hotplug_event_work(void *data, u32 type)
{
- struct acpiphp_context *context;
- struct acpi_hp_work *hp_work;
+ struct acpiphp_context *context = data;
+ acpi_handle handle = context->handle;
- hp_work = container_of(work, struct acpi_hp_work, work);
- context = hp_work->context;
acpi_scan_lock_acquire();
- hotplug_event(hp_work->handle, hp_work->type, context);
+ hotplug_event(handle, type, context);
acpi_scan_lock_release();
- acpi_evaluate_hotplug_ost(hp_work->handle, hp_work->type,
- ACPI_OST_SC_SUCCESS, NULL);
- kfree(hp_work); /* allocated in handle_hotplug_event() */
+ acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
put_bridge(context->func.parent);
}
@@ -936,10 +932,10 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context) {
+ if (context && !WARN_ON(context->handle != handle)) {
get_bridge(context->func.parent);
acpiphp_put_context(context);
- alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+ acpi_hotplug_execute(hotplug_event_work, context, type);
mutex_unlock(&acpiphp_context_lock);
return;
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b0299e6d9a3f..dfd1f59de729 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -181,7 +181,6 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev)
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
- acpi_handle tmp;
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ if (!handle || acpi_has_method(handle, "_EJ0"))
return -ENODEV;
switch (state) {
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a6afd4108beb..aefcc32e5634 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -190,16 +190,10 @@ struct eeepc_laptop {
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- struct acpi_object_list params;
- union acpi_object in_obj;
acpi_status status;
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = val;
+ status = acpi_execute_simple_method(handle, (char *)method, val);
- status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
return (status == AE_OK ? 0 : -1);
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 52b8a97efde1..9d30d69aa78f 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
{ .type = ACPI_TYPE_INTEGER }
};
struct acpi_object_list arg_list = { 4, &params[0] };
- struct acpi_buffer output;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_handle handle = NULL;
status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
params[2].integer.value = arg1;
params[3].integer.value = arg2;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+ status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return -ENODEV;
}
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
- "return an integer\n",
- cmd, arg0, arg1, arg2);
- return -ENODEV;
- }
-
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
- cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
- return out_obj.integer.value;
+ cmd, arg0, arg1, arg2, (int)value);
+ return value;
}
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
static int set_lcd_level(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@ static int set_lcd_level(int level)
return -ENODEV;
}
- arg0.integer.value = level;
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -345,8 +330,6 @@ static int set_lcd_level(int level)
static int set_lcd_level_alt(int level)
{
acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
acpi_handle handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level)
return -ENODEV;
}
- arg0.integer.value = level;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+ status = acpi_execute_simple_method(handle, NULL, level);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
- acpi_handle handle;
pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
- if (ACPI_SUCCESS(acpi_get_handle(NULL,
- "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+ if (acpi_has_method(NULL,
+ "\\_SB.PCI0.LPCB.FJEX.SBL2"))
use_alt_lcd_levels = 1;
else
use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
static int acpi_fujitsu_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
{
- acpi_handle handle;
int result = 0;
int state = 0;
struct input_dev *input;
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
fujitsu_hotkey->dev = device;
- if (ACPI_SUCCESS
- (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+ if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
(acpi_evaluate_object
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 89c4519d48ac..6788acc22ab9 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -72,8 +72,15 @@ enum {
VPCCMD_W_BL_POWER = 0x33,
};
+struct ideapad_rfk_priv {
+ int dev;
+ struct ideapad_private *priv;
+};
+
struct ideapad_private {
+ struct acpi_device *adev;
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+ struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
struct backlight_device *blightdev;
@@ -81,8 +88,6 @@ struct ideapad_private {
unsigned long cfg;
};
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
*/
static int debugfs_status_show(struct seq_file *s, void *data)
{
+ struct ideapad_private *priv = s->private;
unsigned long value;
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+ if (!priv)
+ return -EINVAL;
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
seq_printf(s, "Backlight max:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
seq_printf(s, "Backlight now:\t%lu\n", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
seq_printf(s, "Radio status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
seq_printf(s, "Wifi status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
seq_printf(s, "BT status:\t%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
seq_printf(s, "3G status:\t%s(%lu)\n",
value ? "On" : "Off", value);
seq_printf(s, "=====================\n");
- if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
seq_printf(s, "Touchpad status:%s(%lu)\n",
value ? "On" : "Off", value);
- if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
seq_printf(s, "Camera status:\t%s(%lu)\n",
value ? "On" : "Off", value);
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data)
static int debugfs_status_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_status_show, NULL);
+ return single_open(file, debugfs_status_show, inode->i_private);
}
static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = {
static int debugfs_cfg_show(struct seq_file *s, void *data)
{
- if (!ideapad_priv) {
+ struct ideapad_private *priv = s->private;
+
+ if (!priv) {
seq_printf(s, "cfg: N/A\n");
} else {
seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
- ideapad_priv->cfg);
- if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+ priv->cfg);
+ if (test_bit(CFG_BT_BIT, &priv->cfg))
seq_printf(s, "Bluetooth ");
- if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_3G_BIT, &priv->cfg))
seq_printf(s, "3G ");
- if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_WIFI_BIT, &priv->cfg))
seq_printf(s, "Wireless ");
- if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+ if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
seq_printf(s, "Camera ");
seq_printf(s, "\nGraphic: ");
- switch ((ideapad_priv->cfg)&0x700) {
+ switch ((priv->cfg)&0x700) {
case 0x100:
seq_printf(s, "Intel");
break;
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
static int debugfs_cfg_open(struct inode *inode, struct file *file)
{
- return single_open(file, debugfs_cfg_show, NULL);
+ return single_open(file, debugfs_cfg_show, inode->i_private);
}
static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv)
goto errout;
}
- node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
&debugfs_cfg_fops);
if (!node) {
pr_err("failed to create cfg in debugfs");
goto errout;
}
- node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+ node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
&debugfs_status_fops);
if (!node) {
pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
if (sscanf(buf, "%i", &state) != 1)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
if (ret < 0)
return -EIO;
return count;
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev,
char *buf)
{
unsigned long result;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
- if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
return sprintf(buf, "-1\n");
return sprintf(buf, "%lu\n", result);
}
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
const char *buf, size_t count)
{
int ret, state;
+ struct ideapad_private *priv = dev_get_drvdata(dev);
if (!count)
return 0;
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
return -EINVAL;
if (state < 0 || state > 4 || state == 3)
return -EINVAL;
- ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
if (ret < 0)
return -EIO;
return count;
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
else if (attr == &dev_attr_fan_mode.attr) {
unsigned long value;
- supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+ supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+ &value);
} else
supported = true;
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
static int ideapad_rfk_set(void *data, bool blocked)
{
- unsigned long opcode = (unsigned long)data;
+ struct ideapad_rfk_priv *priv = data;
- return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
}
static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
unsigned long hw_blocked;
int i;
- if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
return;
hw_blocked = !hw_blocked;
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
rfkill_set_hw_state(priv->rfk[i], hw_blocked);
}
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int ret;
unsigned long sw_blocked;
if (no_bt_rfkill &&
(ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
/* Force to enable bluetooth when no_bt_rfkill=1 */
- write_ec_cmd(ideapad_handle,
+ write_ec_cmd(priv->adev->handle,
ideapad_rfk_data[dev].opcode, 1);
return 0;
}
-
- priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
- ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
- (void *)(long)dev);
+ priv->rfk_priv[dev].dev = dev;
+ priv->rfk_priv[dev].priv = priv;
+
+ priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+ &priv->platform_device->dev,
+ ideapad_rfk_data[dev].type,
+ &ideapad_rfk_ops,
+ &priv->rfk_priv[dev]);
if (!priv->rfk[dev])
return -ENOMEM;
- if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+ if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
&sw_blocked)) {
rfkill_init_sw_state(priv->rfk[dev], 0);
} else {
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
return 0;
}
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
if (!priv->rfk[dev])
return;
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
/*
* Platform device
*/
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
{
- int result;
-
- priv->platform_device = platform_device_alloc("ideapad", -1);
- if (!priv->platform_device)
- return -ENOMEM;
- platform_set_drvdata(priv->platform_device, priv);
-
- result = platform_device_add(priv->platform_device);
- if (result)
- goto fail_platform_device;
-
- result = sysfs_create_group(&priv->platform_device->dev.kobj,
+ return sysfs_create_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- if (result)
- goto fail_sysfs;
- return 0;
-
-fail_sysfs:
- platform_device_del(priv->platform_device);
-fail_platform_device:
- platform_device_put(priv->platform_device);
- return result;
}
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
{
sysfs_remove_group(&priv->platform_device->dev.kobj,
&ideapad_attribute_group);
- platform_device_unregister(priv->platform_device);
}
/*
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
{
unsigned long long_pressed;
- if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
return;
if (long_pressed)
ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
{
unsigned long bit, value;
- read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+ read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
for (bit = 0; bit < 16; bit++) {
if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
*/
static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
{
+ struct ideapad_private *priv = bl_get_data(blightdev);
unsigned long now;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (!priv)
+ return -EINVAL;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
return now;
}
static int ideapad_backlight_update_status(struct backlight_device *blightdev)
{
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+ struct ideapad_private *priv = bl_get_data(blightdev);
+
+ if (!priv)
+ return -EINVAL;
+
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
blightdev->props.brightness))
return -EIO;
- if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+ if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
return -EIO;
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
struct backlight_properties props;
unsigned long max, now, power;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
return -EIO;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return -EIO;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
if (!blightdev)
return;
- if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
return;
blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/* if we control brightness via acpi video driver */
if (priv->blightdev == NULL) {
- read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+ read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
return;
}
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/*
* module init/exit
*/
-static const struct acpi_device_id ideapad_device_ids[] = {
- { "VPC2004", 0},
- { "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
unsigned long value;
/* Without reading from EC touchpad LED doesn't switch state */
- if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
/* Some IdeaPads don't really turn off touchpad - they only
* switch the LED state. We (de)activate KBC AUX port to turn
* touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
}
}
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ideapad_private *priv = data;
+ unsigned long vpc1, vpc2, vpc_bit;
+
+ if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+ return;
+ if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+ return;
+
+ vpc1 = (vpc2 << 8) | vpc1;
+ for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+ if (test_bit(vpc_bit, &vpc1)) {
+ switch (vpc_bit) {
+ case 9:
+ ideapad_sync_rfk_state(priv);
+ break;
+ case 13:
+ case 11:
+ case 7:
+ case 6:
+ ideapad_input_report(priv, vpc_bit);
+ break;
+ case 5:
+ ideapad_sync_touchpad_state(priv);
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 3:
+ ideapad_input_novokey(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
+ default:
+ pr_info("Unknown event: %lu\n", vpc_bit);
+ }
+ }
+ }
+}
+
+static int ideapad_acpi_add(struct platform_device *pdev)
{
int ret, i;
int cfg;
struct ideapad_private *priv;
+ struct acpi_device *adev;
+
+ ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+ if (ret)
+ return -ENODEV;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- dev_set_drvdata(&adevice->dev, priv);
- ideapad_priv = priv;
- ideapad_handle = adevice->handle;
+
+ dev_set_drvdata(&pdev->dev, priv);
priv->cfg = cfg;
+ priv->adev = adev;
+ priv->platform_device = pdev;
- ret = ideapad_platform_init(priv);
+ ret = ideapad_sysfs_init(priv);
if (ret)
- goto platform_failed;
+ goto sysfs_failed;
ret = ideapad_debugfs_init(priv);
if (ret)
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
- ideapad_register_rfkill(adevice, i);
+ ideapad_register_rfkill(priv, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(priv);
- ideapad_sync_touchpad_state(adevice);
+ ideapad_sync_touchpad_state(priv);
if (!acpi_video_backlight_support()) {
ret = ideapad_backlight_init(priv);
if (ret && ret != -ENODEV)
goto backlight_failed;
}
+ ret = acpi_install_notify_handler(adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+ if (ret)
+ goto notification_failed;
return 0;
-
+notification_failed:
+ ideapad_backlight_exit(priv);
backlight_failed:
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
input_failed:
ideapad_debugfs_exit(priv);
debugfs_failed:
- ideapad_platform_exit(priv);
-platform_failed:
+ ideapad_sysfs_exit(priv);
+sysfs_failed:
kfree(priv);
return ret;
}
-static int ideapad_acpi_remove(struct acpi_device *adevice)
+static int ideapad_acpi_remove(struct platform_device *pdev)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
int i;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
- ideapad_unregister_rfkill(adevice, i);
+ ideapad_unregister_rfkill(priv, i);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
- ideapad_platform_exit(priv);
- dev_set_drvdata(&adevice->dev, NULL);
+ ideapad_sysfs_exit(priv);
+ dev_set_drvdata(&pdev->dev, NULL);
kfree(priv);
return 0;
}
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
{
- struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
- acpi_handle handle = adevice->handle;
- unsigned long vpc1, vpc2, vpc_bit;
-
- if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
- return;
- if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
- return;
+ struct ideapad_private *priv;
- vpc1 = (vpc2 << 8) | vpc1;
- for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
- if (test_bit(vpc_bit, &vpc1)) {
- switch (vpc_bit) {
- case 9:
- ideapad_sync_rfk_state(priv);
- break;
- case 13:
- case 11:
- case 7:
- case 6:
- ideapad_input_report(priv, vpc_bit);
- break;
- case 5:
- ideapad_sync_touchpad_state(adevice);
- break;
- case 4:
- ideapad_backlight_notify_brightness(priv);
- break;
- case 3:
- ideapad_input_novokey(priv);
- break;
- case 2:
- ideapad_backlight_notify_power(priv);
- break;
- case 0:
- ideapad_check_special_buttons(priv);
- break;
- default:
- pr_info("Unknown event: %lu\n", vpc_bit);
- }
- }
- }
-}
+ if (!device)
+ return -EINVAL;
+ priv = dev_get_drvdata(device);
-static int ideapad_acpi_resume(struct device *device)
-{
- ideapad_sync_rfk_state(ideapad_priv);
- ideapad_sync_touchpad_state(to_acpi_device(device));
+ ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(priv);
return 0;
}
-
+#endif
static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
-static struct acpi_driver ideapad_acpi_driver = {
- .name = "ideapad_acpi",
- .class = "IdeaPad",
- .ids = ideapad_device_ids,
- .ops.add = ideapad_acpi_add,
- .ops.remove = ideapad_acpi_remove,
- .ops.notify = ideapad_acpi_notify,
- .drv.pm = &ideapad_pm,
- .owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+ { "VPC2004", 0},
+ { "", 0},
};
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+ .probe = ideapad_acpi_add,
+ .remove = ideapad_acpi_remove,
+ .driver = {
+ .name = "ideapad_acpi",
+ .owner = THIS_MODULE,
+ .pm = &ideapad_pm,
+ .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ },
+};
+
+module_platform_driver(ideapad_acpi_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("IdeaPad ACPI Extras");
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index 41b740cb28bc..a2083a9e5662 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
+ unsigned long long value;
acpi_status status;
acpi = to_acpi_device(dev);
- status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- return sprintf(buf, "%lld\n", result->integer.value);
+ return sprintf(buf, "%lld\n", value);
}
static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
const char *buf, size_t count)
{
struct acpi_device *acpi;
- struct acpi_object_list input;
- union acpi_object param;
acpi_status status;
unsigned long value;
int error;
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
if (error)
return error;
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = value;
-
- input.count = 1;
- input.pointer = &param;
-
- status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 52259dcabecb..1838400dc036 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL");
static int smartconnect_acpi_init(struct acpi_device *acpi)
{
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *result;
- union acpi_object param;
+ unsigned long long value;
acpi_status status;
- status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+ status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
if (!ACPI_SUCCESS(status))
return -EINVAL;
- result = output.pointer;
-
- if (result->type != ACPI_TYPE_INTEGER) {
- kfree(result);
- return -EINVAL;
- }
-
- if (result->integer.value & 0x1) {
- param.type = ACPI_TYPE_INTEGER;
- param.integer.value = 0;
-
- input.count = 1;
- input.pointer = &param;
-
+ if (value & 0x1) {
dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
- status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
- NULL);
+ status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
}
- kfree(result);
-
return 0;
}
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index d6cfc1558c2f..11244f8703c4 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = {
static int intel_menlow_memory_add(struct acpi_device *device)
{
int result = -ENODEV;
- acpi_status status = AE_OK;
- acpi_handle dummy;
struct thermal_cooling_device *cdev;
if (!device)
return -EINVAL;
- status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
goto end;
- status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
goto end;
cdev = thermal_cooling_device_register("Memory controller", device,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 13ec195f0ca6..47caab0ea7a1 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1508,7 +1508,6 @@ static void sony_nc_function_resume(void)
static int sony_nc_resume(struct device *dev)
{
struct sony_nc_value *item;
- acpi_handle handle;
for (item = sony_nc_values; item->name; item++) {
int ret;
@@ -1523,15 +1522,13 @@ static int sony_nc_resume(struct device *dev)
}
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle)))
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
sony_nc_function_resume();
return 0;
@@ -2682,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
static void sony_nc_backlight_setup(void)
{
- acpi_handle unused;
int max_brightness = 0;
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
@@ -2717,8 +2713,7 @@ static void sony_nc_backlight_setup(void)
sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
- &unused))) {
+ } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
ops = &sony_backlight_ops;
max_brightness = SONY_MAX_BRIGHTNESS - 1;
@@ -2750,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device)
{
acpi_status status;
int result = 0;
- acpi_handle handle;
struct sony_nc_value *item;
pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2790,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device)
goto outplatform;
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
int arg = 1;
if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
dprintk("Doing SNC setup\n");
/* retrieve the available handles */
result = sony_nc_handles_setup(sony_pf_device);
@@ -2821,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiget as described in the DSDT */
for (; item->acpiget && *item->acpiget; ++item->acpiget) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiget,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiget)) {
dprintk("Found %s getter: %s\n",
item->name, *item->acpiget);
item->devattr.attr.mode |= S_IRUGO;
@@ -2833,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device)
/* find the available acpiset as described in the DSDT */
for (; item->acpiset && *item->acpiset; ++item->acpiset) {
- if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
- *item->acpiset,
- &handle))) {
+ if (acpi_has_method(sony_nc_acpi_handle,
+ *item->acpiset)) {
dprintk("Found %s setter: %s\n",
item->name, *item->acpiset);
item->devattr.attr.mode |= S_IWUSR;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0b7efb269cf1..05e046aa5e31 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -701,6 +701,14 @@ static void __init drv_acpi_handle_init(const char *name,
static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
u32 level, void *context, void **return_value)
{
+ struct acpi_device *dev;
+ if (!strcmp(context, "video")) {
+ if (acpi_bus_get_device(handle, &dev))
+ return AE_OK;
+ if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ }
+
*(acpi_handle *)return_value = handle;
return AE_CTRL_TERMINATE;
@@ -713,10 +721,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name,
acpi_status status;
acpi_handle device_found;
- BUG_ON(!name || !hid || !handle);
+ BUG_ON(!name || !handle);
vdbg_printk(TPACPI_DBG_INIT,
"trying to locate ACPI handle for %s, using HID %s\n",
- name, hid);
+ name, hid ? hid : "NULL");
memset(&device_found, 0, sizeof(device_found));
status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6091,19 +6099,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
+ struct acpi_device *device, *child;
int rc;
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+ if (acpi_bus_get_device(handle, &device))
+ return 0;
+
+ rc = 0;
+ list_for_each_entry(child, &device->children, node) {
+ acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+ NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
pr_err("Unknown _BCL data, please report this to %s\n",
- TPACPI_MAIL);
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
}
- } else {
- return 0;
+ break;
}
kfree(buffer.pointer);
@@ -6119,7 +6136,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
acpi_handle video_device;
int bcl_levels = 0;
- tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+ tpacpi_acpi_handle_locate("video", NULL, &video_device);
if (video_device)
bcl_levels = tpacpi_query_bcl_levels(video_device);
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 4ab618c63b45..67897c8740ba 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
{
acpi_status status;
- union acpi_object fncx_params[1] = {
- { .type = ACPI_TYPE_INTEGER }
- };
- struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
- fncx_params[0].integer.value = state ? 0x86 : 0x87;
- status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+ status = acpi_execute_simple_method(device->handle, "FNCX",
+ state ? 0x86 : 0x87);
if (ACPI_FAILURE(status)) {
pr_err("Unable to switch FNCX notifications\n");
return -ENODEV;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index eb3467ea6d86..0cfadb65f7c6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
static int write_acpi_int(const char *methodName, int val)
{
- struct acpi_object_list params;
- union acpi_object in_objs[1];
acpi_status status;
- params.count = ARRAY_SIZE(in_objs);
- params.pointer = in_objs;
- in_objs[0].type = ACPI_TYPE_INTEGER;
- in_objs[0].integer.value = val;
-
- status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+ status = acpi_execute_simple_method(NULL, (char *)methodName, val);
return (status == AE_OK) ? 0 : -EIO;
}
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work)
*/
static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
{
- struct acpi_buffer buf;
- union acpi_object out_obj;
+ unsigned long long value;
acpi_status status;
- buf.pointer = &out_obj;
- buf.length = sizeof(out_obj);
-
- status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
- NULL, &buf);
- if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+ status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+ NULL, &value);
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI INFO method execution failed\n");
return -EIO;
}
- return out_obj.integer.value;
+ return value;
}
static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
- acpi_handle ec_handle, handle;
+ acpi_handle ec_handle;
int error;
u32 hci_result;
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
*/
status = AE_ERROR;
ec_handle = ec_get_handle();
- if (ec_handle)
- status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
- if (ACPI_SUCCESS(status)) {
+ if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
* Determine hotkey query interface. Prefer using the INFO
* method when it is available.
*/
- status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
dev->info_supported = 1;
- } else {
+ else {
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
if (hci_result == HCI_SUCCESS)
dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
static const char *find_hci_method(acpi_handle handle)
{
- acpi_status status;
- acpi_handle hci_handle;
-
- status = acpi_get_handle(handle, "GHCI", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "GHCI"))
return "GHCI";
- status = acpi_get_handle(handle, "SPFC", &hci_handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(handle, "SPFC"))
return "SPFC";
return NULL;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 601ea9512242..62e8c221d01e 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
{
struct guid_block *block = NULL;
char method[5];
- struct acpi_object_list input;
- union acpi_object params[1];
acpi_status status;
acpi_handle handle;
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
if (!block)
return AE_NOT_EXIST;
- input.count = 1;
- input.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = enable;
snprintf(method, 5, "WE%02X", block->notify_id);
- status = acpi_evaluate_object(handle, method, &input, NULL);
+ status = acpi_execute_simple_method(handle, method, enable);
if (status != AE_OK && status != AE_NOT_FOUND)
return status;
@@ -353,10 +347,10 @@ struct acpi_buffer *out)
{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
- acpi_handle handle, wc_handle;
+ acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
- struct acpi_object_list input, wc_input;
- union acpi_object wc_params[1], wq_params[1];
+ struct acpi_object_list input;
+ union acpi_object wq_params[1];
char method[5];
char wc_method[5] = "WC";
@@ -386,11 +380,6 @@ struct acpi_buffer *out)
* enable collection.
*/
if (block->flags & ACPI_WMI_EXPENSIVE) {
- wc_input.count = 1;
- wc_input.pointer = wc_params;
- wc_params[0].type = ACPI_TYPE_INTEGER;
- wc_params[0].integer.value = 1;
-
strncat(wc_method, block->object_id, 2);
/*
@@ -398,10 +387,9 @@ struct acpi_buffer *out)
* expensive, but have no corresponding WCxx method. So we
* should not fail if this happens.
*/
- wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
- if (ACPI_SUCCESS(wc_status))
- wc_status = acpi_evaluate_object(handle, wc_method,
- &wc_input, NULL);
+ if (acpi_has_method(handle, wc_method))
+ wc_status = acpi_execute_simple_method(handle,
+ wc_method, 1);
}
strcpy(method, "WQ");
@@ -414,9 +402,7 @@ struct acpi_buffer *out)
* the WQxx method failed - we should disable collection anyway.
*/
if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- wc_params[0].integer.value = 0;
- status = acpi_evaluate_object(handle,
- wc_method, &wc_input, NULL);
+ status = acpi_execute_simple_method(handle, wc_method, 0);
}
return status;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 34049b0b4c73..747826d99059 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
static int __init pnpacpi_add_device(struct acpi_device *device)
{
- acpi_handle temp = NULL;
- acpi_status status;
struct pnp_dev *dev;
char *pnpid;
struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
* If a PnPacpi device is not present , the device
* driver should not be loaded.
*/
- status = acpi_get_handle(device->handle, "_CRS", &temp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, "_CRS"))
return 0;
pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
- status = acpi_get_handle(device->handle, "_SRS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_SRS"))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
- status = acpi_get_handle(device->handle, "_DIS", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_DIS"))
dev->capabilities |= PNP_DISABLE;
if (strlen(acpi_device_name(device)))
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644
index 000000000000..a7c81b53d88a
--- /dev/null
+++ b/drivers/powercap/Kconfig
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+ bool "Generic powercap sysfs driver"
+ help
+ The power capping sysfs interface allows kernel subsystems to expose power
+ capping settings to user space in a consistent way. Usually, it consists
+ of multiple control types that determine which settings may be exposed and
+ power zones representing parts of the system that can be subject to power
+ capping.
+
+ If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+ tristate "Intel RAPL Support"
+ depends on X86
+ default n
+ ---help---
+ This enables support for the Intel Running Average Power Limit (RAPL)
+ technology which allows power limits to be enforced and monitored on
+ modern Intel processors (Sandy Bridge and later).
+
+ In RAPL, the platform level settings are divided into domains for
+ fine grained control. These domains include processor package, DRAM
+ controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+ 1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644
index 000000000000..0a21ef31372b
--- /dev/null
+++ b/drivers/powercap/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP) += powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644
index 000000000000..2a786c504460
--- /dev/null
+++ b/drivers/powercap/intel_rapl.c
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK 0xffffffff
+
+#define POWER_LIMIT1_MASK 0x7FFF
+#define POWER_LIMIT1_ENABLE BIT(15)
+#define POWER_LIMIT1_CLAMP BIT(16)
+
+#define POWER_LIMIT2_MASK (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP BIT_ULL(48)
+#define POWER_PACKAGE_LOCK BIT_ULL(63)
+#define POWER_PP_LOCK BIT(31)
+
+#define TIME_WINDOW1_MASK (0x7FULL<<17)
+#define TIME_WINDOW2_MASK (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET 0
+#define POWER_UNIT_MASK 0x0F
+
+#define ENERGY_UNIT_OFFSET 0x08
+#define ENERGY_UNIT_MASK 0x1F00
+
+#define TIME_UNIT_OFFSET 0x10
+#define TIME_UNIT_MASK 0xF0000
+
+#define POWER_INFO_MAX_MASK (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK 0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK 0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE (1000000)
+#define ENERGY_UNIT_SCALE (1000000)
+#define TIME_UNIT_SCALE (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+ ARBITRARY_UNIT, /* no translation */
+ POWER_UNIT,
+ ENERGY_UNIT,
+ TIME_UNIT,
+};
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM,/* DRAM control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+ RAPL_DOMAIN_MSR_LIMIT,
+ RAPL_DOMAIN_MSR_STATUS,
+ RAPL_DOMAIN_MSR_PERF,
+ RAPL_DOMAIN_MSR_POLICY,
+ RAPL_DOMAIN_MSR_INFO,
+ RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+
+#define DOMAIN_STATE_INACTIVE BIT(0)
+#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+ const char *name;
+ enum rapl_domain_type id;
+ int msrs[RAPL_DOMAIN_MSR_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+ container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+ unsigned int id; /* physical package/socket id */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit_divisor;
+ unsigned int energy_unit_divisor;
+ unsigned int time_unit_divisor;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ int nr_cpus; /* active cpus on the package, topology info is lost during
+ * cpu hotplug. so we have to track ourselves.
+ */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+ const char *name;
+ u64 mask;
+ int shift;
+ enum rapl_domain_msr_id id;
+ enum unit_type unit;
+ u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
+ .name = #p, \
+ .mask = m, \
+ .shift = s, \
+ .id = i, \
+ .unit = u, \
+ .flag = f \
+ }
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+ "package",
+ "core",
+ "uncore",
+ "dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (rp->id == id)
+ return rp;
+ }
+
+ return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if (topology_physical_package_id(i) == package_id)
+ return i;
+ }
+ /* all CPUs on this package are offline */
+
+ return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+ struct rapl_package *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+ kfree(p->domains);
+ list_del(&p->plist);
+ kfree(p);
+ }
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+ struct rapl_domain *rd;
+ u64 energy_now;
+
+ /* prevent CPU hotplug, make sure the RAPL domain does not go
+ * away while reading the counter.
+ */
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+
+ if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+ *energy_raw = energy_now;
+ put_online_cpus();
+
+ return 0;
+ }
+ put_online_cpus();
+
+ return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+ *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+ return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ struct rapl_package *rp;
+
+ /* package zone is the last zone of a package, we can free
+ * memory here since all children has been unregistered.
+ */
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ dev_warn(&power_zone->dev, "no package id %s\n",
+ rd->name);
+ return -ENODEV;
+ }
+ kfree(rd);
+ rp->domains = NULL;
+ }
+
+ return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+ int i;
+
+ for (i = 0; i < NR_POWER_LIMITS; i++) {
+ if (rd->rpl[i].name == NULL)
+ break;
+ }
+
+ return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ int nr_powerlimit;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+ return -EACCES;
+ get_online_cpus();
+ nr_powerlimit = find_nr_power_limit(rd);
+ /* here we activate/deactivate the hardware for power limiting */
+ rapl_write_data_raw(rd, PL1_ENABLE, mode);
+ /* always enable clamp such that p-state can go below OS requested
+ * range. power capping priority over guranteed frequency.
+ */
+ rapl_write_data_raw(rd, PL1_CLAMP, mode);
+ /* some domains have pl2 */
+ if (nr_powerlimit > 1) {
+ rapl_write_data_raw(rd, PL2_ENABLE, mode);
+ rapl_write_data_raw(rd, PL2_CLAMP, mode);
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+ struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+ u64 val;
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ *mode = false;
+ return 0;
+ }
+ get_online_cpus();
+ if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+ put_online_cpus();
+ return -EIO;
+ }
+ *mode = val;
+ put_online_cpus();
+
+ return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+ /* RAPL_DOMAIN_PACKAGE */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP0 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_PP1 */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+ /* RAPL_DOMAIN_DRAM */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+ u64 power_limit)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ rp = find_package_by_id(rd->package_id);
+ if (!rp) {
+ ret = -ENODEV;
+ goto set_exit;
+ }
+
+ if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+ rd->name);
+ ret = -EACCES;
+ goto set_exit;
+ }
+
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ package_power_limit_irq_save(rd->package_id);
+set_exit:
+ put_online_cpus();
+ return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = POWER_LIMIT1;
+ break;
+ case PL2_ENABLE:
+ prim = POWER_LIMIT2;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+ u64 window)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW1, window);
+ break;
+ case PL2_ENABLE:
+ rapl_write_data_raw(rd, TIME_WINDOW2, window);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ put_online_cpus();
+ return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (!ret)
+ *data = val;
+ put_online_cpus();
+
+ return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+ struct rapl_power_limit *rpl;
+ struct rapl_domain *rd;
+
+ rd = power_zone_to_rapl_domain(power_zone);
+ rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+ return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+ u64 *data)
+{
+ struct rapl_domain *rd;
+ u64 val;
+ int prim;
+ int ret = 0;
+
+ get_online_cpus();
+ rd = power_zone_to_rapl_domain(power_zone);
+ switch (rd->rpl[id].prim_id) {
+ case PL1_ENABLE:
+ prim = THERMAL_SPEC_POWER;
+ break;
+ case PL2_ENABLE:
+ prim = MAX_POWER;
+ break;
+ default:
+ put_online_cpus();
+ return -EINVAL;
+ }
+ if (rapl_read_data_raw(rd, prim, true, &val))
+ ret = -EIO;
+ else
+ *data = val;
+
+ put_online_cpus();
+
+ return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+ .set_power_limit_uw = set_power_limit,
+ .get_power_limit_uw = get_current_power_limit,
+ .set_time_window_us = set_time_window,
+ .get_time_window_us = get_time_window,
+ .get_max_power_uw = get_max_power,
+ .get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+ int i;
+ struct rapl_domain *rd = rp->domains;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ unsigned int mask = rp->domain_map & (1 << i);
+ switch (mask) {
+ case BIT(RAPL_DOMAIN_PACKAGE):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+ rd->id = RAPL_DOMAIN_PACKAGE;
+ rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+ rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+ rd->msrs[2] = MSR_PKG_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_PKG_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ rd->rpl[1].prim_id = PL2_ENABLE;
+ rd->rpl[1].name = pl2_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP0):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+ rd->id = RAPL_DOMAIN_PP0;
+ rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP0_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_PP1):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+ rd->id = RAPL_DOMAIN_PP1;
+ rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+ rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+ rd->msrs[2] = 0;
+ rd->msrs[3] = MSR_PP1_POLICY;
+ rd->msrs[4] = 0;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ case BIT(RAPL_DOMAIN_DRAM):
+ rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+ rd->id = RAPL_DOMAIN_DRAM;
+ rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+ rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+ rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+ rd->msrs[3] = 0;
+ rd->msrs[4] = MSR_DRAM_POWER_INFO;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ break;
+ }
+ if (mask) {
+ rd->package_id = rp->id;
+ rd++;
+ }
+ }
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ int to_raw)
+{
+ u64 divisor = 1;
+ int scale = 1; /* scale to user friendly data without floating point */
+ u64 f, y; /* fraction and exp. used for time unit */
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package);
+ if (!rp)
+ return value;
+
+ switch (type) {
+ case POWER_UNIT:
+ divisor = rp->power_unit_divisor;
+ scale = POWER_UNIT_SCALE;
+ break;
+ case ENERGY_UNIT:
+ scale = ENERGY_UNIT_SCALE;
+ divisor = rp->energy_unit_divisor;
+ break;
+ case TIME_UNIT:
+ divisor = rp->time_unit_divisor;
+ scale = TIME_UNIT_SCALE;
+ /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+ * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+ */
+ if (!to_raw) {
+ f = (value & 0x60) >> 5;
+ y = value & 0x1f;
+ value = (1 << y) * (4 + f) * scale / 4;
+ return div64_u64(value, divisor);
+ } else {
+ do_div(value, scale);
+ value *= divisor;
+ y = ilog2(value);
+ f = div64_u64(4 * (value - (1 << y)), 1 << y);
+ value = (y & 0x1f) | ((f & 0x3) << 5);
+ return value;
+ }
+ break;
+ case ARBITRARY_UNIT:
+ default:
+ return value;
+ };
+
+ if (to_raw)
+ return div64_u64(value * divisor, scale);
+ else
+ return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+ /* name, mask, shift, msr index, unit divisor */
+ PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+ RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+ RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+ RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+ 0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+ RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+ PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+ RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+ RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+ PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+ RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+ /* non-hardware */
+ PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+ RAPL_PRIMITIVE_DERIVED),
+ {NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * | xxxxx (mask) |
+ * | |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ bool xlate, u64 *data)
+{
+ u64 value, final;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+ return -EINVAL;
+
+ msr = rd->msrs[rp->id];
+ if (!msr)
+ return -EINVAL;
+ /* use physical package id to look up active cpus */
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+
+ /* special-case package domain, which uses a different bit*/
+ if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+ rp->mask = POWER_PACKAGE_LOCK;
+ rp->shift = 63;
+ }
+ /* non-hardware data are collected by the polling thread */
+ if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+ *data = rd->rdd.primitives[prim];
+ return 0;
+ }
+
+ if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+ pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ final = value & rp->mask;
+ final = final >> rp->shift;
+ if (xlate)
+ *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+ else
+ *data = final;
+
+ return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+ enum rapl_primitives prim,
+ unsigned long long value)
+{
+ u64 msr_val;
+ u32 msr;
+ struct rapl_primitive_info *rp = &rpi[prim];
+ int cpu;
+
+ cpu = find_active_cpu_on_package(rd->package_id);
+ if (cpu < 0)
+ return cpu;
+ msr = rd->msrs[rp->id];
+ if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+ value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+ msr_val &= ~rp->mask;
+ msr_val |= value << rp->shift;
+ if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+ dev_dbg(&rd->power_zone.dev,
+ "failed to write msr 0x%x on cpu %d\n", msr, cpu);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+ u64 msr_val;
+ u32 value;
+
+ if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+ pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+ MSR_RAPL_POWER_UNIT, cpu);
+ return -ENODEV;
+ }
+
+ /* Raw RAPL data stored in MSRs are in certain scales. We need to
+ * convert them into standard units based on the divisors reported in
+ * the RAPL unit MSRs.
+ * i.e.
+ * energy unit: 1/enery_unit_divisor Joules
+ * power unit: 1/power_unit_divisor Watts
+ * time unit: 1/time_unit_divisor Seconds
+ */
+ value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+ rp->energy_unit_divisor = 1 << value;
+
+
+ value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+ rp->power_unit_divisor = 1 << value;
+
+ value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+ rp->time_unit_divisor = 1 << value;
+
+ pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+ rp->id,
+ rp->energy_unit_divisor,
+ rp->time_unit_divisor,
+ rp->power_unit_divisor);
+
+ return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+ u32 l, h = 0;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+ /* save the state of PLN irq mask bit before disabling it */
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+ rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+ rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+ }
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+ u32 l, h;
+ int cpu;
+ struct rapl_package *rp;
+
+ rp = find_package_by_id(package_id);
+ if (!rp)
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+ return;
+
+ cpu = find_active_cpu_on_package(package_id);
+ if (cpu < 0)
+ return;
+
+ /* irq enable state not saved, nothing to restore */
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+ return;
+ rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+ if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+ l |= PACKAGE_THERM_INT_PLN_ENABLE;
+ else
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+ { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+ { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+ /* TODO: Add more CPU IDs after testing */
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+ int dmn, prim;
+ u64 val;
+ struct rapl_package *rp;
+
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+ pr_debug("update package %d domain %s data\n", rp->id,
+ rp->domains[dmn].name);
+ /* exclude non-raw primitives */
+ for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+ if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+ rpi[prim].unit,
+ &val))
+ rp->domains[dmn].rdd.primitives[prim] =
+ val;
+ }
+ }
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ /* unregister all active rapl packages from the powercap layer,
+ * hotplug lock held
+ */
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ package_power_limit_irq_restore(rp->id);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("remove package, undo power limit on %d: %s\n",
+ rp->id, rd->name);
+ rapl_write_data_raw(rd, PL1_ENABLE, 0);
+ rapl_write_data_raw(rd, PL2_ENABLE, 0);
+ rapl_write_data_raw(rd, PL1_CLAMP, 0);
+ rapl_write_data_raw(rd, PL2_CLAMP, 0);
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do the package zone last */
+ if (rd_package)
+ powercap_unregister_zone(control_type,
+ &rd_package->power_zone);
+ }
+ powercap_unregister_control_type(control_type);
+
+ return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+ struct rapl_domain *rd;
+ int ret = 0;
+ char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+ struct powercap_zone *power_zone = NULL;
+ int nr_pl;
+
+ /* first we register package domain as the parent zone*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ nr_pl = find_nr_power_limit(rd);
+ pr_debug("register socket %d package domain %s\n",
+ rp->id, rd->name);
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, sizeof(dev_name), "%s-%d",
+ rd->name, rp->id);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type,
+ dev_name, NULL,
+ &zone_ops[rd->id],
+ nr_pl,
+ &constraint_ops);
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register package, %d\n",
+ rp->id);
+ ret = PTR_ERR(power_zone);
+ goto exit_package;
+ }
+ /* track parent zone in per package/socket data */
+ rp->power_zone = power_zone;
+ /* done, only one package domain per socket */
+ break;
+ }
+ }
+ if (!power_zone) {
+ pr_err("no package domain found, unknown topology!\n");
+ ret = -ENODEV;
+ goto exit_package;
+ }
+ /* now register domains as children of the socket/package*/
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE)
+ continue;
+ /* number of power limits per domain varies */
+ nr_pl = find_nr_power_limit(rd);
+ power_zone = powercap_register_zone(&rd->power_zone,
+ control_type, rd->name,
+ rp->power_zone,
+ &zone_ops[rd->id], nr_pl,
+ &constraint_ops);
+
+ if (IS_ERR(power_zone)) {
+ pr_debug("failed to register power_zone, %d:%s:%s\n",
+ rp->id, rd->name, dev_name);
+ ret = PTR_ERR(power_zone);
+ goto err_cleanup;
+ }
+ }
+
+exit_package:
+ return ret;
+err_cleanup:
+ /* clean up previously initialized domains within the package if we
+ * failed after the first domain setup.
+ */
+ while (--rd >= rp->domains) {
+ pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+
+ return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+ struct rapl_domain *rd;
+ struct rapl_package *rp;
+ int ret = 0;
+
+ control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+ if (IS_ERR(control_type)) {
+ pr_debug("failed to register powercap control_type.\n");
+ return PTR_ERR(control_type);
+ }
+ /* read the initial data */
+ rapl_update_domain_data();
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (rapl_package_register_powercap(rp))
+ goto err_cleanup_package;
+ return ret;
+
+err_cleanup_package:
+ /* clean up previously initialized packages */
+ list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+ rd++) {
+ pr_debug("unregister zone/package %d, %s domain\n",
+ rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ }
+
+ return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+ unsigned msr;
+ u64 val1, val2 = 0;
+ int retry = 0;
+
+ switch (domain) {
+ case RAPL_DOMAIN_PACKAGE:
+ msr = MSR_PKG_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP0:
+ msr = MSR_PP0_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_PP1:
+ msr = MSR_PP1_ENERGY_STATUS;
+ break;
+ case RAPL_DOMAIN_DRAM:
+ msr = MSR_DRAM_ENERGY_STATUS;
+ break;
+ default:
+ pr_err("invalid domain id %d\n", domain);
+ return -EINVAL;
+ }
+ if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+ return -ENODEV;
+
+ /* energy counters roll slowly on some domains */
+ while (++retry < 10) {
+ usleep_range(10000, 15000);
+ rdmsrl_safe_on_cpu(cpu, msr, &val2);
+ if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+ return 0;
+ }
+ /* if energy counter does not change, report as bad domain */
+ pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+ rapl_domain_names[domain], val1, val2);
+
+ return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+ int i;
+ int ret = 0;
+ struct rapl_domain *rd;
+ u64 locked;
+
+ for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+ /* use physical package id to read counters */
+ if (!rapl_check_domain(cpu, i))
+ rp->domain_map |= 1 << i;
+ }
+ rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
+ if (!rp->nr_domains) {
+ pr_err("no valid rapl domains found in package %d\n", rp->id);
+ ret = -ENODEV;
+ goto done;
+ }
+ pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+ rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+ GFP_KERNEL);
+ if (!rp->domains) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ rapl_init_domains(rp);
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ /* check if the domain is locked by BIOS */
+ if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+ pr_info("RAPL package %d domain %s locked by BIOS\n",
+ rp->id, rd->name);
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ }
+ }
+
+
+done:
+ return ret;
+}
+
+static bool is_package_new(int package)
+{
+ struct rapl_package *rp;
+
+ /* caller prevents cpu hotplug, there will be no new packages added
+ * or deleted while traversing the package list, no need for locking.
+ */
+ list_for_each_entry(rp, &rapl_packages, plist)
+ if (package == rp->id)
+ return false;
+
+ return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+ int i;
+ int phy_package_id;
+ struct rapl_package *new_package, *rp;
+
+ for_each_online_cpu(i) {
+ phy_package_id = topology_physical_package_id(i);
+ if (is_package_new(phy_package_id)) {
+ new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+ if (!new_package) {
+ rapl_cleanup_data();
+ return -ENOMEM;
+ }
+ /* add the new package to the list */
+ new_package->id = phy_package_id;
+ new_package->nr_cpus = 1;
+
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(new_package, i) ||
+ rapl_check_unit(new_package, i)) {
+ kfree(new_package->domains);
+ kfree(new_package);
+ /* free up the packages already initialized */
+ rapl_cleanup_data();
+ return -ENODEV;
+ }
+ INIT_LIST_HEAD(&new_package->plist);
+ list_add(&new_package->plist, &rapl_packages);
+ } else {
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ }
+ }
+
+ return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+ struct rapl_domain *rd, *rd_package = NULL;
+
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ if (rd->id == RAPL_DOMAIN_PACKAGE) {
+ rd_package = rd;
+ continue;
+ }
+ pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+ powercap_unregister_zone(control_type, &rd->power_zone);
+ }
+ /* do parent zone last */
+ powercap_unregister_zone(control_type, &rd_package->power_zone);
+ list_del(&rp->plist);
+ kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+ int ret = 0;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ /* add the new package to the list */
+ rp->id = phy_package_id;
+ rp->nr_cpus = 1;
+ /* check if the package contains valid domains */
+ if (rapl_detect_domains(rp, cpu) ||
+ rapl_check_unit(rp, cpu)) {
+ ret = -ENODEV;
+ goto err_free_package;
+ }
+ if (!rapl_package_register_powercap(rp)) {
+ INIT_LIST_HEAD(&rp->plist);
+ list_add(&rp->plist, &rapl_packages);
+ return ret;
+ }
+
+err_free_package:
+ kfree(rp->domains);
+ kfree(rp);
+
+ return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ int phy_package_id;
+ struct rapl_package *rp;
+
+ phy_package_id = topology_physical_package_id(cpu);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (rp)
+ ++rp->nr_cpus;
+ else
+ rapl_add_package(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rp = find_package_by_id(phy_package_id);
+ if (!rp)
+ break;
+ if (--rp->nr_cpus == 0)
+ rapl_remove_package(rp);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+ .notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+ int ret = 0;
+
+ if (!x86_match_cpu(rapl_ids)) {
+ pr_err("driver does not support CPU family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ return -ENODEV;
+ }
+ /* prevent CPU hotplug during detection */
+ get_online_cpus();
+ ret = rapl_detect_topology();
+ if (ret)
+ goto done;
+
+ if (rapl_register_powercap()) {
+ rapl_cleanup_data();
+ ret = -ENODEV;
+ goto done;
+ }
+ register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+ get_online_cpus();
+ unregister_hotcpu_notifier(&rapl_cpu_notifier);
+ rapl_unregister_powercap();
+ rapl_cleanup_data();
+ put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644
index 000000000000..8d0fe431dbdd
--- /dev/null
+++ b/drivers/powercap/powercap_sys.c
@@ -0,0 +1,685 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+ container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr) \
+static ssize_t _attr##_show(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -EINVAL; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ \
+ if (power_zone->ops->get_##_attr) { \
+ if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr) \
+static ssize_t _attr##_store(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ u64 value; \
+ \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (value) \
+ return count; \
+ if (power_zone->ops->reset_##_attr) { \
+ if (!power_zone->ops->reset_##_attr(power_zone)) \
+ return count; \
+ } \
+ \
+ return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+ struct device_attribute *dev_attr,\
+ char *buf) \
+{ \
+ u64 value; \
+ ssize_t len = -ENODATA; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+ if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+ len = sprintf(buf, "%lld\n", value); \
+ } \
+ \
+ return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+ struct device_attribute *dev_attr, \
+ const char *buf, size_t count) \
+{ \
+ int err; \
+ u64 value; \
+ struct powercap_zone *power_zone = to_powercap_zone(dev); \
+ int id; \
+ struct powercap_zone_constraint *pconst;\
+ \
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+ return -EINVAL; \
+ if (id >= power_zone->const_id_cnt) \
+ return -EINVAL; \
+ pconst = &power_zone->constraints[id]; \
+ err = kstrtoull(buf, 10, &value); \
+ if (err) \
+ return -EINVAL; \
+ if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+ if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+ return count; \
+ } \
+ \
+ return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+ struct device_attribute power_limit_attr;
+ struct device_attribute time_window_attr;
+ struct device_attribute max_power_attr;
+ struct device_attribute min_power_attr;
+ struct device_attribute max_time_window_attr;
+ struct device_attribute min_time_window_attr;
+ struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ const char *name;
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ int id;
+ ssize_t len = -ENODATA;
+ struct powercap_zone_constraint *pconst;
+
+ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+ return -EINVAL;
+ if (id >= power_zone->const_id_cnt)
+ return -EINVAL;
+ pconst = &power_zone->constraints[id];
+
+ if (pconst && pconst->ops && pconst->ops->get_name) {
+ name = pconst->ops->get_name(power_zone, id);
+ if (name) {
+ snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+ "%s\n", name);
+ buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+ len = strlen(buf);
+ }
+ }
+
+ return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+ int mode,
+ struct device_attribute *dev_attr,
+ ssize_t (*show)(struct device *,
+ struct device_attribute *, char *),
+ ssize_t (*store)(struct device *,
+ struct device_attribute *,
+ const char *, size_t)
+ )
+{
+
+ dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+ id, name);
+ if (!dev_attr->attr.name)
+ return -ENOMEM;
+ dev_attr->attr.mode = mode;
+ dev_attr->show = show;
+ dev_attr->store = store;
+
+ return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ kfree(constraint_attrs[i].power_limit_attr.attr.name);
+ kfree(constraint_attrs[i].time_window_attr.attr.name);
+ kfree(constraint_attrs[i].name_attr.attr.name);
+ kfree(constraint_attrs[i].max_power_attr.attr.name);
+ kfree(constraint_attrs[i].min_power_attr.attr.name);
+ kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+ kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+ }
+}
+
+static int seed_constraint_attributes(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+ ret = create_constraint_attribute(i, "power_limit_uw",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].power_limit_attr,
+ show_constraint_power_limit_uw,
+ store_constraint_power_limit_uw);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "time_window_us",
+ S_IWUSR | S_IRUGO,
+ &constraint_attrs[i].time_window_attr,
+ show_constraint_time_window_us,
+ store_constraint_time_window_us);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "name", S_IRUGO,
+ &constraint_attrs[i].name_attr,
+ show_constraint_name,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+ &constraint_attrs[i].max_power_attr,
+ show_constraint_max_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+ &constraint_attrs[i].min_power_attr,
+ show_constraint_min_power_uw,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "max_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].max_time_window_attr,
+ show_constraint_max_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+ ret = create_constraint_attribute(i, "min_time_window_us",
+ S_IRUGO,
+ &constraint_attrs[i].min_time_window_attr,
+ show_constraint_min_time_window_us,
+ NULL);
+ if (ret)
+ goto err_alloc;
+
+ }
+
+ return 0;
+
+err_alloc:
+ free_constraint_attributes();
+
+ return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int i;
+ int ret = 0;
+ int count;
+ struct powercap_zone_constraint *pconst;
+
+ if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+ !const_ops->set_power_limit_uw ||
+ !const_ops->get_time_window_us ||
+ !const_ops->set_time_window_us)
+ return -EINVAL;
+
+ count = power_zone->zone_attr_count;
+ for (i = 0; i < nr_constraints; ++i) {
+ pconst = &power_zone->constraints[i];
+ pconst->ops = const_ops;
+ pconst->id = power_zone->const_id_cnt;
+ power_zone->const_id_cnt++;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].power_limit_attr.attr;
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].time_window_attr.attr;
+ if (pconst->ops->get_name)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].name_attr.attr;
+ if (pconst->ops->get_max_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_power_attr.attr;
+ if (pconst->ops->get_min_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_power_attr.attr;
+ if (pconst->ops->get_max_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].max_time_window_attr.attr;
+ if (pconst->ops->get_min_time_window_us)
+ power_zone->zone_dev_attrs[count++] =
+ &constraint_attrs[i].min_time_window_attr.attr;
+ }
+ power_zone->zone_attr_count = count;
+
+ return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+ bool found = false;
+
+ mutex_lock(&powercap_cntrl_list_lock);
+
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return found;
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+ struct powercap_zone *power_zone)
+{
+ int count = 0;
+
+ power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+ if (power_zone->ops->get_max_energy_range_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_energy_range_uj.attr;
+ if (power_zone->ops->get_energy_uj)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_energy_uj.attr;
+ if (power_zone->ops->get_power_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_power_uw.attr;
+ if (power_zone->ops->get_max_power_range_uw)
+ power_zone->zone_dev_attrs[count++] =
+ &dev_attr_max_power_range_uw.attr;
+ power_zone->zone_dev_attrs[count] = NULL;
+ power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+ bool allocated;
+
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = power_zone->allocated;
+ /* Remove id from parent idr struct */
+ idr_remove(power_zone->parent_idr, power_zone->id);
+ /* Destroy idrs allocated for this zone */
+ idr_destroy(&power_zone->idr);
+ kfree(power_zone->name);
+ kfree(power_zone->zone_dev_attrs);
+ kfree(power_zone->constraints);
+ if (power_zone->ops->release)
+ power_zone->ops->release(power_zone);
+ if (allocated)
+ kfree(power_zone);
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+
+ /* Store flag as the release() may free memory */
+ allocated = control_type->allocated;
+ idr_destroy(&control_type->idr);
+ mutex_destroy(&control_type->lock);
+ if (control_type->ops && control_type->ops->release)
+ control_type->ops->release(control_type);
+ if (allocated)
+ kfree(control_type);
+ }
+}
+
+static ssize_t enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool mode = true;
+
+ /* Default is enabled */
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->get_enable)
+ if (power_zone->ops->get_enable(power_zone, &mode))
+ mode = false;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->get_enable)
+ if (control_type->ops->get_enable(control_type, &mode))
+ mode = false;
+ }
+
+ return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool mode;
+
+ if (strtobool(buf, &mode))
+ return -EINVAL;
+ if (dev->parent) {
+ struct powercap_zone *power_zone = to_powercap_zone(dev);
+ if (power_zone->ops->set_enable)
+ if (!power_zone->ops->set_enable(power_zone, mode))
+ return len;
+ } else {
+ struct powercap_control_type *control_type =
+ to_powercap_control_type(dev);
+ if (control_type->ops && control_type->ops->set_enable)
+ if (!control_type->ops->set_enable(control_type, mode))
+ return len;
+ }
+
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR_RW(enabled);
+
+static struct attribute *powercap_attrs[] = {
+ &dev_attr_enabled.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(powercap);
+
+static struct class powercap_class = {
+ .name = "powercap",
+ .dev_release = powercap_release,
+ .dev_groups = powercap_groups,
+};
+
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops)
+{
+ int result;
+ int nr_attrs;
+
+ if (!name || !control_type || !ops ||
+ nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+ (!ops->get_energy_uj && !ops->get_power_uw) ||
+ !control_type_valid(control_type))
+ return ERR_PTR(-EINVAL);
+
+ if (power_zone) {
+ if (!ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(power_zone, 0, sizeof(*power_zone));
+ } else {
+ power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+ if (!power_zone)
+ return ERR_PTR(-ENOMEM);
+ power_zone->allocated = true;
+ }
+ power_zone->ops = ops;
+ power_zone->control_type_inst = control_type;
+ if (!parent) {
+ power_zone->dev.parent = &control_type->dev;
+ power_zone->parent_idr = &control_type->idr;
+ } else {
+ power_zone->dev.parent = &parent->dev;
+ power_zone->parent_idr = &parent->idr;
+ }
+ power_zone->dev.class = &powercap_class;
+
+ mutex_lock(&control_type->lock);
+ /* Using idr to get the unique id */
+ result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+ if (result < 0)
+ goto err_idr_alloc;
+
+ power_zone->id = result;
+ idr_init(&power_zone->idr);
+ power_zone->name = kstrdup(name, GFP_KERNEL);
+ if (!power_zone->name)
+ goto err_name_alloc;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
+ power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+ nr_constraints, GFP_KERNEL);
+ if (!power_zone->constraints)
+ goto err_const_alloc;
+
+ nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+ POWERCAP_ZONE_MAX_ATTRS + 1;
+ power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+ nr_attrs, GFP_KERNEL);
+ if (!power_zone->zone_dev_attrs)
+ goto err_attr_alloc;
+ create_power_zone_common_attributes(power_zone);
+ result = create_constraints(power_zone, nr_constraints, const_ops);
+ if (result)
+ goto err_dev_ret;
+
+ power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+ power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+ power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+ power_zone->dev_attr_groups[1] = NULL;
+ power_zone->dev.groups = power_zone->dev_attr_groups;
+ result = device_register(&power_zone->dev);
+ if (result)
+ goto err_dev_ret;
+
+ control_type->nr_zones++;
+ mutex_unlock(&control_type->lock);
+
+ return power_zone;
+
+err_dev_ret:
+ kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+ kfree(power_zone->constraints);
+err_const_alloc:
+ kfree(power_zone->name);
+err_name_alloc:
+ idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+ if (power_zone->allocated)
+ kfree(power_zone);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone)
+{
+ if (!power_zone || !control_type)
+ return -EINVAL;
+
+ mutex_lock(&control_type->lock);
+ control_type->nr_zones--;
+ mutex_unlock(&control_type->lock);
+
+ device_unregister(&power_zone->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops)
+{
+ int result;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+ if (control_type) {
+ if (!ops || !ops->release)
+ return ERR_PTR(-EINVAL);
+ memset(control_type, 0, sizeof(*control_type));
+ } else {
+ control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+ if (!control_type)
+ return ERR_PTR(-ENOMEM);
+ control_type->allocated = true;
+ }
+ mutex_init(&control_type->lock);
+ control_type->ops = ops;
+ INIT_LIST_HEAD(&control_type->node);
+ control_type->dev.class = &powercap_class;
+ dev_set_name(&control_type->dev, "%s", name);
+ result = device_register(&control_type->dev);
+ if (result) {
+ if (control_type->allocated)
+ kfree(control_type);
+ return ERR_PTR(result);
+ }
+ idr_init(&control_type->idr);
+
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_add_tail(&control_type->node, &powercap_cntrl_list);
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+ struct powercap_control_type *pos = NULL;
+
+ if (control_type->nr_zones) {
+ dev_err(&control_type->dev, "Zones of this type still not freed\n");
+ return -EINVAL;
+ }
+ mutex_lock(&powercap_cntrl_list_lock);
+ list_for_each_entry(pos, &powercap_cntrl_list, node) {
+ if (pos == control_type) {
+ list_del(&control_type->node);
+ mutex_unlock(&powercap_cntrl_list_lock);
+ device_unregister(&control_type->dev);
+ return 0;
+ }
+ }
+ mutex_unlock(&powercap_cntrl_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+ int result = 0;
+
+ result = seed_constraint_attributes();
+ if (result)
+ return result;
+
+ result = class_register(&powercap_class);
+
+ return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8d05accf706c..927998aa5e71 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -245,15 +245,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ acpi_dev_pm_attach(&spi->dev, true);
+ ret = sdrv->probe(spi);
+ if (ret)
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->probe(to_spi_device(dev));
+ return ret;
}
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ ret = sdrv->remove(spi);
+ acpi_dev_pm_detach(&spi->dev, true);
- return sdrv->remove(to_spi_device(dev));
+ return ret;
}
static void spi_drv_shutdown(struct device *dev)
@@ -1145,8 +1157,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_OK;
}
+ adev->power.flags.ignore_parent = true;
strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index dbfc390330ac..5ef596765060 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -78,7 +78,6 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
- select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 94a403a9717a..5d05555fe841 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -21,6 +21,9 @@
#include <asm/backlight.h>
#endif
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
static const char *const backlight_types[] = {
[BACKLIGHT_RAW] = "raw",
[BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name,
mutex_unlock(&pmac_backlight_mutex);
#endif
+ mutex_lock(&backlight_dev_list_mutex);
+ list_add(&new_bd->entry, &backlight_dev_list);
+ mutex_unlock(&backlight_dev_list_mutex);
+
return new_bd;
}
EXPORT_SYMBOL(backlight_device_register);
+bool backlight_device_registered(enum backlight_type type)
+{
+ bool found = false;
+ struct backlight_device *bd;
+
+ mutex_lock(&backlight_dev_list_mutex);
+ list_for_each_entry(bd, &backlight_dev_list, entry) {
+ if (bd->props.type == type) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&backlight_dev_list_mutex);
+
+ return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
/**
* backlight_device_unregister - unregisters a backlight device object.
* @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd)
if (!bd)
return;
+ mutex_lock(&backlight_dev_list_mutex);
+ list_del(&bd->entry);
+ mutex_unlock(&backlight_dev_list_mutex);
+
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight == bd)
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void)
backlight_class->dev_groups = bl_device_groups;
backlight_class->pm = &backlight_class_dev_pm_ops;
+ INIT_LIST_HEAD(&backlight_dev_list);
+ mutex_init(&backlight_dev_list_mutex);
return 0;
}