summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/devtmpfs.c338
-rw-r--r--drivers/base/iommu.c124
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/platform.c23
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c252
-rw-r--r--drivers/base/power/domain.c1273
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c87
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/Makefile3
-rw-r--r--drivers/base/regmap/regmap-i2c.c115
-rw-r--r--drivers/base/regmap/regmap-spi.c72
-rw-r--r--drivers/base/regmap/regmap.c455
-rw-r--r--drivers/base/syscore.c8
21 files changed, 2526 insertions, 455 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fa64fa04feec..21cf46f45245 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -172,4 +172,6 @@ config SYS_HYPERVISOR
bool
default n
+source "drivers/base/regmap/Kconfig"
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4c5701c15f53..99a375ad2cc9 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -13,11 +13,11 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
obj-$(CONFIG_SMP) += topology.o
-obj-$(CONFIG_IOMMU_API) += iommu.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_REGMAP) += regmap/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 82bbb5967aa9..b89fffc1d777 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -21,12 +21,11 @@
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/ramfs.h>
-#include <linux/cred.h>
#include <linux/sched.h>
-#include <linux/init_task.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
-static struct vfsmount *dev_mnt;
+static struct task_struct *thread;
#if defined CONFIG_DEVTMPFS_MOUNT
static int mount_dev = 1;
@@ -34,7 +33,16 @@ static int mount_dev = 1;
static int mount_dev;
#endif
-static DEFINE_MUTEX(dirlock);
+static DEFINE_SPINLOCK(req_lock);
+
+static struct req {
+ struct req *next;
+ struct completion done;
+ int err;
+ const char *name;
+ mode_t mode; /* 0 => delete */
+ struct device *dev;
+} *requests;
static int __init mount_param(char *str)
{
@@ -68,131 +76,152 @@ static inline int is_blockdev(struct device *dev)
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
+int devtmpfs_create_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ struct req req;
+
+ if (!thread)
+ return 0;
+
+ req.mode = 0;
+ req.name = device_get_devnode(dev, &req.mode, &tmp);
+ if (!req.name)
+ return -ENOMEM;
+
+ if (req.mode == 0)
+ req.mode = 0600;
+ if (is_blockdev(dev))
+ req.mode |= S_IFBLK;
+ else
+ req.mode |= S_IFCHR;
+
+ req.dev = dev;
+
+ init_completion(&req.done);
+
+ spin_lock(&req_lock);
+ req.next = requests;
+ requests = &req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req.done);
+
+ kfree(tmp);
+
+ return req.err;
+}
+
+int devtmpfs_delete_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ struct req req;
+
+ if (!thread)
+ return 0;
+
+ req.name = device_get_devnode(dev, NULL, &tmp);
+ if (!req.name)
+ return -ENOMEM;
+
+ req.mode = 0;
+ req.dev = dev;
+
+ init_completion(&req.done);
+
+ spin_lock(&req_lock);
+ req.next = requests;
+ requests = &req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req.done);
+
+ kfree(tmp);
+ return req.err;
+}
+
static int dev_mkdir(const char *name, mode_t mode)
{
- struct nameidata nd;
struct dentry *dentry;
+ struct path path;
int err;
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- name, LOOKUP_PARENT, &nd);
- if (err)
- return err;
-
- dentry = lookup_create(&nd, 1);
- if (!IS_ERR(dentry)) {
- err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
- if (!err)
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &dev_mnt;
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
- }
-
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ if (!err)
+ /* mark as kernel-created inode */
+ dentry->d_inode->i_private = &thread;
+ dput(dentry);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ path_put(&path);
return err;
}
static int create_path(const char *nodepath)
{
+ char *path;
+ char *s;
int err;
- mutex_lock(&dirlock);
- err = dev_mkdir(nodepath, 0755);
- if (err == -ENOENT) {
- char *path;
- char *s;
-
- /* parent directories do not exist, create them */
- path = kstrdup(nodepath, GFP_KERNEL);
- if (!path) {
- err = -ENOMEM;
- goto out;
- }
- s = path;
- for (;;) {
- s = strchr(s, '/');
- if (!s)
- break;
- s[0] = '\0';
- err = dev_mkdir(path, 0755);
- if (err && err != -EEXIST)
- break;
- s[0] = '/';
- s++;
- }
- kfree(path);
+ /* parent directories do not exist, create them */
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ s = path;
+ for (;;) {
+ s = strchr(s, '/');
+ if (!s)
+ break;
+ s[0] = '\0';
+ err = dev_mkdir(path, 0755);
+ if (err && err != -EEXIST)
+ break;
+ s[0] = '/';
+ s++;
}
-out:
- mutex_unlock(&dirlock);
+ kfree(path);
return err;
}
-int devtmpfs_create_node(struct device *dev)
+static int handle_create(const char *nodename, mode_t mode, struct device *dev)
{
- const char *tmp = NULL;
- const char *nodename;
- const struct cred *curr_cred;
- mode_t mode = 0;
- struct nameidata nd;
struct dentry *dentry;
+ struct path path;
int err;
- if (!dev_mnt)
- return 0;
-
- nodename = device_get_devnode(dev, &mode, &tmp);
- if (!nodename)
- return -ENOMEM;
-
- if (mode == 0)
- mode = 0600;
- if (is_blockdev(dev))
- mode |= S_IFBLK;
- else
- mode |= S_IFCHR;
-
- curr_cred = override_creds(&init_cred);
-
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
- if (err == -ENOENT) {
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
}
- if (err)
- goto out;
-
- dentry = lookup_create(&nd, 0);
- if (!IS_ERR(dentry)) {
- err = vfs_mknod(nd.path.dentry->d_inode,
- dentry, mode, dev->devt);
- if (!err) {
- struct iattr newattrs;
-
- /* fixup possibly umasked mode */
- newattrs.ia_mode = mode;
- newattrs.ia_valid = ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
-
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &dev_mnt;
- }
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mknod(path.dentry->d_inode,
+ dentry, mode, dev->devt);
+ if (!err) {
+ struct iattr newattrs;
+
+ /* fixup possibly umasked mode */
+ newattrs.ia_mode = mode;
+ newattrs.ia_valid = ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ /* mark as kernel-created inode */
+ dentry->d_inode->i_private = &thread;
}
+ dput(dentry);
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
-out:
- kfree(tmp);
- revert_creds(curr_cred);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ path_put(&path);
return err;
}
@@ -202,8 +231,7 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- name, LOOKUP_PARENT, &nd);
+ err = kern_path_parent(name, &nd);
if (err)
return err;
@@ -211,7 +239,7 @@ static int dev_rmdir(const char *name)
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (!IS_ERR(dentry)) {
if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &dev_mnt)
+ if (dentry->d_inode->i_private == &thread)
err = vfs_rmdir(nd.path.dentry->d_inode,
dentry);
else
@@ -238,7 +266,6 @@ static int delete_path(const char *nodepath)
if (!path)
return -ENOMEM;
- mutex_lock(&dirlock);
for (;;) {
char *base;
@@ -250,7 +277,6 @@ static int delete_path(const char *nodepath)
if (err)
break;
}
- mutex_unlock(&dirlock);
kfree(path);
return err;
@@ -259,7 +285,7 @@ static int delete_path(const char *nodepath)
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
{
/* did we create it */
- if (inode->i_private != &dev_mnt)
+ if (inode->i_private != &thread)
return 0;
/* does the dev_t match */
@@ -277,29 +303,17 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
return 1;
}
-int devtmpfs_delete_node(struct device *dev)
+static int handle_remove(const char *nodename, struct device *dev)
{
- const char *tmp = NULL;
- const char *nodename;
- const struct cred *curr_cred;
struct nameidata nd;
struct dentry *dentry;
struct kstat stat;
int deleted = 1;
int err;
- if (!dev_mnt)
- return 0;
-
- nodename = device_get_devnode(dev, NULL, &tmp);
- if (!nodename)
- return -ENOMEM;
-
- curr_cred = override_creds(&init_cred);
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
+ err = kern_path_parent(nodename, &nd);
if (err)
- goto out;
+ return err;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
@@ -337,9 +351,6 @@ int devtmpfs_delete_node(struct device *dev)
path_put(&nd.path);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
-out:
- kfree(tmp);
- revert_creds(curr_cred);
return err;
}
@@ -354,7 +365,7 @@ int devtmpfs_mount(const char *mntdir)
if (!mount_dev)
return 0;
- if (!dev_mnt)
+ if (!thread)
return 0;
err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
@@ -365,31 +376,80 @@ int devtmpfs_mount(const char *mntdir)
return err;
}
+static __initdata DECLARE_COMPLETION(setup_done);
+
+static int handle(const char *name, mode_t mode, struct device *dev)
+{
+ if (mode)
+ return handle_create(name, mode, dev);
+ else
+ return handle_remove(name, dev);
+}
+
+static int devtmpfsd(void *p)
+{
+ char options[] = "mode=0755";
+ int *err = p;
+ *err = sys_unshare(CLONE_NEWNS);
+ if (*err)
+ goto out;
+ *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+ if (*err)
+ goto out;
+ sys_chdir("/.."); /* will traverse into overmounted root */
+ sys_chroot(".");
+ complete(&setup_done);
+ while (1) {
+ spin_lock(&req_lock);
+ while (requests) {
+ struct req *req = requests;
+ requests = NULL;
+ spin_unlock(&req_lock);
+ while (req) {
+ struct req *next = req->next;
+ req->err = handle(req->name, req->mode, req->dev);
+ complete(&req->done);
+ req = next;
+ }
+ spin_lock(&req_lock);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&req_lock);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ return 0;
+out:
+ complete(&setup_done);
+ return *err;
+}
+
/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
int __init devtmpfs_init(void)
{
- int err;
- struct vfsmount *mnt;
- char options[] = "mode=0755";
-
- err = register_filesystem(&dev_fs_type);
+ int err = register_filesystem(&dev_fs_type);
if (err) {
printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
"type %i\n", err);
return err;
}
- mnt = kern_mount_data(&dev_fs_type, options);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
+ thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
+ if (!IS_ERR(thread)) {
+ wait_for_completion(&setup_done);
+ } else {
+ err = PTR_ERR(thread);
+ thread = NULL;
+ }
+
+ if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
return err;
}
- dev_mnt = mnt;
printk(KERN_INFO "devtmpfs: initialized\n");
return 0;
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
deleted file mode 100644
index 6e6b6a11b3ce..000000000000
--- a/drivers/base/iommu.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/bug.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/iommu.h>
-
-static struct iommu_ops *iommu_ops;
-
-void register_iommu(struct iommu_ops *ops)
-{
- if (iommu_ops)
- BUG();
-
- iommu_ops = ops;
-}
-
-bool iommu_found(void)
-{
- return iommu_ops != NULL;
-}
-EXPORT_SYMBOL_GPL(iommu_found);
-
-struct iommu_domain *iommu_domain_alloc(void)
-{
- struct iommu_domain *domain;
- int ret;
-
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
- ret = iommu_ops->domain_init(domain);
- if (ret)
- goto out_free;
-
- return domain;
-
-out_free:
- kfree(domain);
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(iommu_domain_alloc);
-
-void iommu_domain_free(struct iommu_domain *domain)
-{
- iommu_ops->domain_destroy(domain);
- kfree(domain);
-}
-EXPORT_SYMBOL_GPL(iommu_domain_free);
-
-int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
-{
- return iommu_ops->attach_dev(domain, dev);
-}
-EXPORT_SYMBOL_GPL(iommu_attach_device);
-
-void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
-{
- iommu_ops->detach_dev(domain, dev);
-}
-EXPORT_SYMBOL_GPL(iommu_detach_device);
-
-phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
- unsigned long iova)
-{
- return iommu_ops->iova_to_phys(domain, iova);
-}
-EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
-
-int iommu_domain_has_cap(struct iommu_domain *domain,
- unsigned long cap)
-{
- return iommu_ops->domain_has_cap(domain, cap);
-}
-EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
-
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
-{
- unsigned long invalid_mask;
- size_t size;
-
- size = 0x1000UL << gfp_order;
- invalid_mask = size - 1;
-
- BUG_ON((iova | paddr) & invalid_mask);
-
- return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
-}
-EXPORT_SYMBOL_GPL(iommu_map);
-
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
-{
- unsigned long invalid_mask;
- size_t size;
-
- size = 0x1000UL << gfp_order;
- invalid_mask = size - 1;
-
- BUG_ON(iova & invalid_mask);
-
- return iommu_ops->unmap(domain, iova, gfp_order);
-}
-EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b2359f718..45d7c8fc73bd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -30,7 +30,6 @@
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
static int sections_per_block;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af637b3..0cad9c7f6bb5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -32,6 +32,25 @@ struct device platform_bus = {
EXPORT_SYMBOL_GPL(platform_bus);
/**
+ * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
+ * @dev: platform device
+ *
+ * This is called before platform_device_add() such that any pdev_archdata may
+ * be setup before the platform_notifier is called. So if a user needs to
+ * manipulate any relevant information in the pdev_archdata they can do:
+ *
+ * platform_devic_alloc()
+ * ... manipulate ...
+ * platform_device_add()
+ *
+ * And if they don't care they can just call platform_device_register() and
+ * everything will just work out.
+ */
+void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+}
+
+/**
* platform_get_resource - get a resource for a device
* @dev: platform device
* @type: resource type
@@ -173,6 +192,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
pa->pdev.id = id;
device_initialize(&pa->pdev.dev);
pa->pdev.dev.release = platform_device_release;
+ arch_setup_pdev_archdata(&pa->pdev);
}
return pa ? &pa->pdev : NULL;
@@ -334,6 +354,7 @@ EXPORT_SYMBOL_GPL(platform_device_del);
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
+ arch_setup_pdev_archdata(pdev);
return platform_device_add(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_register);
@@ -367,7 +388,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *__init_or_module platform_device_register_resndata(
+struct platform_device *platform_device_register_resndata(
struct device *parent,
const char *name, int id,
const struct resource *res, unsigned int num,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e7..2639ae79a372 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index c0dd09df7be8..a846b2f95cfb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
#include <linux/slab.h>
#include <linux/err.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
-struct pm_runtime_clk_data {
+struct pm_clk_data {
struct list_head clock_list;
struct mutex lock;
};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
enum pce_status status;
};
-static struct pm_runtime_clk_data *__to_prd(struct device *dev)
+static struct pm_clk_data *__to_pcd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
/**
- * pm_runtime_clk_add - Start using a device clock for runtime PM.
- * @dev: Device whose clock is going to be used for runtime PM.
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-int pm_runtime_clk_add(struct device *dev, const char *con_id)
+int pm_clk_add(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
}
}
- mutex_lock(&prd->lock);
- list_add_tail(&ce->node, &prd->clock_list);
- mutex_unlock(&prd->lock);
+ mutex_lock(&pcd->lock);
+ list_add_tail(&ce->node, &pcd->clock_list);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
- * @ce: Runtime PM clock entry to destroy.
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
*
- * This routine must be called under the mutex protecting the runtime PM list
- * of clocks corresponding the the @ce's device.
+ * This routine must be called under the mutex protecting the PM list of clocks
+ * corresponding the the @ce's device.
*/
-static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
+static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
- * @dev: Device whose clock should not be used for runtime PM any more.
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+void pm_clk_remove(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
continue;
} else if (!strcmp(con_id, ce->con_id)) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
}
/**
- * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
- * @dev: Device to initialize the list of runtime PM clocks for.
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
*
- * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
+ * Allocate a struct pm_clk_data object, initialize its lock member and
* make the @dev's power.subsys_data field point to it.
*/
-int pm_runtime_clk_init(struct device *dev)
+int pm_clk_init(struct device *dev)
{
- struct pm_runtime_clk_data *prd;
+ struct pm_clk_data *pcd;
- prd = kzalloc(sizeof(*prd), GFP_KERNEL);
- if (!prd) {
- dev_err(dev, "Not enough memory fo runtime PM data.\n");
+ pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+ if (!pcd) {
+ dev_err(dev, "Not enough memory for PM clock data.\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&prd->clock_list);
- mutex_init(&prd->lock);
- dev->power.subsys_data = prd;
+ INIT_LIST_HEAD(&pcd->clock_list);
+ mutex_init(&pcd->lock);
+ dev->power.subsys_data = pcd;
return 0;
}
/**
- * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
- * @dev: Device to destroy the list of runtime PM clocks for.
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_runtime_clk_data object pointed to by it before and free
+ * from the struct pm_clk_data object pointed to by it before and free
* that object.
*/
-void pm_runtime_clk_destroy(struct device *dev)
+void pm_clk_destroy(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
- if (!prd)
+ if (!pcd)
return;
dev->power.subsys_data = NULL;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
- __pm_runtime_clk_remove(ce);
+ list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+ __pm_clk_remove(ce);
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
- kfree(prd);
+ kfree(pcd);
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
/**
- * pm_runtime_clk_acquire - Acquire a device clock.
+ * pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
-static void pm_runtime_clk_acquire(struct device *dev,
+static void pm_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
}
/**
- * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
-int pm_runtime_clk_suspend(struct device *dev)
+int pm_clk_suspend(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_reverse(ce, &prd->clock_list, node) {
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
-int pm_runtime_clk_resume(struct device *dev)
+int pm_clk_resume(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
@@ -265,33 +269,33 @@ int pm_runtime_clk_resume(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the pwr_domain member of that object is copied to the device's
- * pwr_domain field and its con_ids member is used to populate the device's list
- * of runtime PM clocks, depending on @action.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
*
- * If the device's pwr_domain field is already populated with a value different
+ * If the device's pm_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
- char *con_id;
+ char **con_id;
int error;
dev_dbg(dev, "%s() %ld\n", __func__, action);
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (dev->pwr_domain)
+ if (dev->pm_domain)
break;
- error = pm_runtime_clk_init(dev);
+ error = pm_clk_init(dev);
if (error)
break;
- dev->pwr_domain = clknb->pwr_domain;
+ dev->pm_domain = clknb->pm_domain;
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- pm_runtime_clk_add(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ pm_clk_add(dev, *con_id);
} else {
- pm_runtime_clk_add(dev, NULL);
+ pm_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
- if (dev->pwr_domain != clknb->pwr_domain)
+ if (dev->pm_domain != clknb->pm_domain)
break;
- dev->pwr_domain = NULL;
- pm_runtime_clk_destroy(dev);
+ dev->pm_domain = NULL;
+ pm_clk_destroy(dev);
break;
}
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#else /* !CONFIG_PM_RUNTIME */
+#ifdef CONFIG_PM
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks are already disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+ clk_disable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks should remain disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry(ce, &pcd->clock_list, node)
+ clk_enable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
@@ -375,30 +433,30 @@ static void disable_clock(struct device *dev, const char *con_id)
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
struct device *dev = data;
- char *con_id;
+ char **con_id;
dev_dbg(dev, "%s() %ld\n", __func__, action);
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- enable_clock(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ enable_clock(dev, *con_id);
} else {
enable_clock(dev, NULL);
}
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
- for (con_id = clknb->con_ids[0]; *con_id; con_id++)
- disable_clock(dev, con_id);
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ disable_clock(dev, *con_id);
} else {
disable_clock(dev, NULL);
}
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#endif /* !CONFIG_PM_RUNTIME */
/**
- * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
- * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
+ * notifier_call member will be replaced with pm_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
-void pm_runtime_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
- clknb->nb.notifier_call = pm_runtime_clk_notify;
+ clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000000..be8714aa9dd6
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1273 @@
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+#ifdef CONFIG_PM
+
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev->pm_domain))
+ return ERR_PTR(-EINVAL);
+
+ return pd_to_genpd(dev->pm_domain);
+}
+
+static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+ if (!WARN_ON(genpd->sd_count == 0))
+ genpd->sd_count--;
+}
+
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&genpd->lock);
+ /*
+ * Wait for the domain to transition into either the active,
+ * or the power off state.
+ */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status == GPD_STATE_ACTIVE
+ || genpd->status == GPD_STATE_POWER_OFF)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+ mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+ if (genpd->resume_count == 0)
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ start:
+ if (parent) {
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&genpd->lock);
+ }
+
+ if (genpd->status == GPD_STATE_ACTIVE
+ || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+ goto out;
+
+ if (genpd->status != GPD_STATE_POWER_OFF) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (parent && parent->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&genpd->lock);
+ genpd_release_lock(parent);
+
+ ret = pm_genpd_poweron(parent);
+ if (ret)
+ return ret;
+
+ goto start;
+ }
+
+ if (genpd->power_on) {
+ int ret = genpd->power_on(genpd);
+ if (ret)
+ goto out;
+ }
+
+ genpd_set_active(genpd);
+ if (parent)
+ parent->sd_count++;
+
+ out:
+ mutex_unlock(&genpd->lock);
+ if (parent)
+ genpd_release_lock(parent);
+
+ return ret;
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * __pm_genpd_save_device - Save the pre-suspend state of a device.
+ * @dle: Device list entry of the device to save the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static int __pm_genpd_save_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (dle->need_restore)
+ return 0;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ ret = drv->pm->runtime_suspend(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ if (!ret)
+ dle->need_restore = true;
+
+ return ret;
+}
+
+/**
+ * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
+ * @dle: Device list entry of the device to restore the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+
+ if (!dle->need_restore)
+ return;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_resume) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ drv->pm->runtime_resume(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ dle->need_restore = false;
+}
+
+/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+ return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ if (!work_pending(&genpd->power_off_work))
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * pm_genpd_poweroff - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, run the runtime suspend callbacks provided by all of
+ * the @genpd's devices' drivers and remove power from @genpd.
+ */
+static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct generic_pm_domain *parent;
+ struct dev_list_entry *dle;
+ unsigned int not_suspended;
+ int ret = 0;
+
+ start:
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ * (3) One of the domain's devices is being resumed right now.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+ || genpd->resume_count > 0)
+ return 0;
+
+ if (genpd->sd_count > 0)
+ return -EBUSY;
+
+ not_suspended = 0;
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+ not_suspended++;
+
+ if (not_suspended > genpd->in_progress)
+ return -EBUSY;
+
+ if (genpd->poweroff_task) {
+ /*
+ * Another instance of pm_genpd_poweroff() is executing
+ * callbacks, so tell it to start over and return.
+ */
+ genpd->status = GPD_STATE_REPEAT;
+ return 0;
+ }
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ genpd->status = GPD_STATE_BUSY;
+ genpd->poweroff_task = current;
+
+ list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+ ret = __pm_genpd_save_device(dle, genpd);
+ if (ret) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (genpd_abort_poweroff(genpd))
+ goto out;
+
+ if (genpd->status == GPD_STATE_REPEAT) {
+ genpd->poweroff_task = NULL;
+ goto start;
+ }
+ }
+
+ parent = genpd->parent;
+ if (parent) {
+ mutex_unlock(&genpd->lock);
+
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+ if (genpd_abort_poweroff(genpd)) {
+ genpd_release_lock(parent);
+ goto out;
+ }
+ }
+
+ if (genpd->power_off) {
+ ret = genpd->power_off(genpd);
+ if (ret == -EBUSY) {
+ genpd_set_active(genpd);
+ if (parent)
+ genpd_release_lock(parent);
+
+ goto out;
+ }
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ if (parent->sd_count == 0)
+ genpd_queue_power_off_work(parent);
+
+ genpd_release_lock(parent);
+ }
+
+ out:
+ genpd->poweroff_task = NULL;
+ wake_up_all(&genpd->status_wait_queue);
+ return ret;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+ genpd_acquire_lock(genpd);
+ pm_genpd_poweroff(genpd);
+ genpd_release_lock(genpd);
+}
+
+/**
+ * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->stop_device) {
+ int ret = genpd->stop_device(dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&genpd->lock);
+ genpd->in_progress++;
+ pm_genpd_poweroff(genpd);
+ genpd->in_progress--;
+ mutex_unlock(&genpd->lock);
+
+ return 0;
+}
+
+/**
+ * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ struct dev_list_entry *dle;
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev == dev) {
+ __pm_genpd_restore_device(dle, genpd);
+ break;
+ }
+ }
+}
+
+/**
+ * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ DEFINE_WAIT(wait);
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ ret = pm_genpd_poweron(genpd);
+ if (ret)
+ return ret;
+
+ mutex_lock(&genpd->lock);
+ genpd->status = GPD_STATE_BUSY;
+ genpd->resume_count++;
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ /*
+ * If current is the powering off task, we have been called
+ * reentrantly from one of the device callbacks, so we should
+ * not wait.
+ */
+ if (!genpd->poweroff_task || genpd->poweroff_task == current)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+ __pm_genpd_runtime_resume(dev, genpd);
+ genpd->resume_count--;
+ genpd_set_active(genpd);
+ wake_up_all(&genpd->status_wait_queue);
+ mutex_unlock(&genpd->lock);
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return 0;
+}
+
+#else
+
+static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+static inline void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd) {}
+
+#define pm_genpd_runtime_suspend NULL
+#define pm_genpd_runtime_resume NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so. Also, in that case propagate to its parent.
+ *
+ * This function is only called in "noirq" stages of system power transitions,
+ * so it need not acquire locks (all of the "noirq" callbacks are executed
+ * sequentially, so it is guaranteed that it will never run twice in parallel).
+ */
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ return;
+
+ if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+ return;
+
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ pm_genpd_sync_poweroff(parent);
+ }
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int pm_genpd_prepare(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count++ == 0)
+ genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+ genpd_release_lock(genpd);
+
+ if (genpd->suspend_power_off) {
+ pm_runtime_put_noidle(dev);
+ return 0;
+ }
+
+ /*
+ * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+ * so pm_genpd_poweron() will return immediately, but if the device
+ * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * to make it operational.
+ */
+ pm_runtime_resume(dev);
+ __pm_runtime_disable(dev, false);
+
+ ret = pm_generic_prepare(dev);
+ if (ret) {
+ mutex_lock(&genpd->lock);
+
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+ pm_runtime_enable(dev);
+ }
+
+ pm_runtime_put_sync(dev);
+ return ret;
+}
+
+/**
+ * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Suspend a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Resume a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Freeze a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_freeze(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_freeze_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Carry out an early thaw of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Thaw a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_thaw(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Power off a device under the assumption that its pm_domain field points to
+ * the domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late powering off of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_poweroff_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early restore of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_restore_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (genpd->suspend_power_off) {
+ /*
+ * The boot kernel might put the domain into the power on state,
+ * so make sure it really is powered off.
+ */
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+ return 0;
+ }
+
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * pm_genpd_restore - Restore a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Restore a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_restore(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+}
+
+/**
+ * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void pm_genpd_complete(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ bool run_complete;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return;
+
+ mutex_lock(&genpd->lock);
+
+ run_complete = !genpd->suspend_power_off;
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+
+ if (run_complete) {
+ pm_generic_complete(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+ }
+}
+
+#else
+
+#define pm_genpd_prepare NULL
+#define pm_genpd_suspend NULL
+#define pm_genpd_suspend_noirq NULL
+#define pm_genpd_resume_noirq NULL
+#define pm_genpd_resume NULL
+#define pm_genpd_freeze NULL
+#define pm_genpd_freeze_noirq NULL
+#define pm_genpd_thaw_noirq NULL
+#define pm_genpd_thaw NULL
+#define pm_genpd_dev_poweroff_noirq NULL
+#define pm_genpd_dev_poweroff NULL
+#define pm_genpd_restore_noirq NULL
+#define pm_genpd_restore NULL
+#define pm_genpd_complete NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->status == GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev == dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dle = kzalloc(sizeof(*dle), GFP_KERNEL);
+ if (!dle) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dle->dev = dev;
+ dle->need_restore = false;
+ list_add_tail(&dle->node, &genpd->dev_list);
+ genpd->device_count++;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = &genpd->domain;
+ spin_unlock_irq(&dev->power.lock);
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @genpd: PM domain to remove the device from.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = -EINVAL;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev != dev)
+ continue;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ genpd->device_count--;
+ list_del(&dle->node);
+ kfree(dle);
+
+ ret = 0;
+ break;
+ }
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Master PM domain to add the subdomain to.
+ * @new_subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+ mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (new_subdomain->status != GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ if (genpd->status == GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain == new_subdomain) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+ new_subdomain->parent = genpd;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd->sd_count++;
+
+ out:
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Master PM domain to remove the subdomain from.
+ * @target: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain != target)
+ continue;
+
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ list_del(&subdomain->sd_node);
+ subdomain->parent = NULL;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd_sd_counter_dec(genpd);
+
+ mutex_unlock(&subdomain->lock);
+
+ ret = 0;
+ break;
+ }
+
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ */
+void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+ if (IS_ERR_OR_NULL(genpd))
+ return;
+
+ INIT_LIST_HEAD(&genpd->sd_node);
+ genpd->parent = NULL;
+ INIT_LIST_HEAD(&genpd->dev_list);
+ INIT_LIST_HEAD(&genpd->sd_list);
+ mutex_init(&genpd->lock);
+ genpd->gov = gov;
+ INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+ genpd->in_progress = 0;
+ genpd->sd_count = 0;
+ genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ init_waitqueue_head(&genpd->status_wait_queue);
+ genpd->poweroff_task = NULL;
+ genpd->resume_count = 0;
+ genpd->device_count = 0;
+ genpd->suspended_count = 0;
+ genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+ genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+ genpd->domain.ops.prepare = pm_genpd_prepare;
+ genpd->domain.ops.suspend = pm_genpd_suspend;
+ genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+ genpd->domain.ops.resume = pm_genpd_resume;
+ genpd->domain.ops.freeze = pm_genpd_freeze;
+ genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+ genpd->domain.ops.thaw = pm_genpd_thaw;
+ genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+ genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.complete = pm_genpd_complete;
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681c..9508df71274b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
* __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* If the device has not been suspended at run time, execute the
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
* return its error code. Otherwise, return zero.
*/
-static int __pm_generic_call(struct device *dev, int event)
+static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
switch (event) {
case PM_EVENT_SUSPEND:
- callback = pm->suspend;
+ callback = noirq ? pm->suspend_noirq : pm->suspend;
break;
case PM_EVENT_FREEZE:
- callback = pm->freeze;
+ callback = noirq ? pm->freeze_noirq : pm->freeze;
break;
case PM_EVENT_HIBERNATE:
- callback = pm->poweroff;
+ callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
case PM_EVENT_THAW:
- callback = pm->thaw;
+ callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
default:
callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
}
/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE);
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW);
+ return __pm_generic_call(dev, PM_EVENT_THAW, false);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
* __pm_generic_resume - Generic resume/restore callback for subsystems.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* Execute the resume/resotre callback provided by the @dev's driver, if
* defined. If it returns 0, change the device's runtime PM status to 'active'.
* Return the callback's error code.
*/
-static int __pm_generic_resume(struct device *dev, int event)
+static int __pm_generic_resume(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
switch (event) {
case PM_EVENT_RESUME:
- callback = pm->resume;
+ callback = noirq ? pm->resume_noirq : pm->resume;
break;
case PM_EVENT_RESTORE:
- callback = pm->restore;
+ callback = noirq ? pm->restore_noirq : pm->restore;
break;
default:
callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
return 0;
ret = callback(dev);
- if (!ret && pm_runtime_enabled(dev)) {
+ if (!ret && !noirq && pm_runtime_enabled(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
}
/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME);
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.prepare = pm_generic_prepare,
.suspend = pm_generic_suspend,
+ .suspend_noirq = pm_generic_suspend_noirq,
.resume = pm_generic_resume,
+ .resume_noirq = pm_generic_resume_noirq,
.freeze = pm_generic_freeze,
+ .freeze_noirq = pm_generic_freeze_noirq,
.thaw = pm_generic_thaw,
+ .thaw_noirq = pm_generic_thaw_noirq,
.poweroff = pm_generic_poweroff,
+ .poweroff_noirq = pm_generic_poweroff_noirq,
.restore = pm_generic_restore,
+ .restore_noirq = pm_generic_restore_noirq,
.complete = pm_generic_complete,
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..a85459126bc6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@@ -424,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
@@ -504,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ bool put = false;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -511,11 +513,21 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
- if (dev->pwr_domain) {
+ if (!dev->power.is_suspended)
+ goto Unlock;
+
+ pm_runtime_enable(dev);
+ put = true;
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -548,10 +560,17 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
TRACE_RESUME(error);
+
+ if (put)
+ pm_runtime_put_sync(dev);
+
return error;
}
@@ -630,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pwr_domain->ops.complete)
- dev->pwr_domain->ops.complete(dev);
+ if (dev->pm_domain->ops.complete)
+ dev->pm_domain->ops.complete(dev);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
if (dev->type->pm->complete)
@@ -670,7 +689,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@@ -733,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error;
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
if (error)
return error;
} else if (dev->type && dev->type->pm) {
@@ -832,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
int error = 0;
dpm_wait_for_children(dev, async);
- device_lock(dev);
if (async_error)
- goto End;
+ return 0;
+
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
async_error = -EBUSY;
- goto End;
+ return 0;
}
- if (dev->pwr_domain) {
+ device_lock(dev);
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -877,11 +902,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = !error;
+
device_unlock(dev);
complete_all(&dev->power.completion);
- if (error)
+ if (error) {
+ pm_runtime_put_sync(dev);
async_error = error;
+ } else if (dev->power.is_suspended) {
+ __pm_runtime_disable(dev, false);
+ }
return error;
}
@@ -968,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pwr_domain->ops.prepare)
- error = dev->pwr_domain->ops.prepare(dev);
- suspend_report_result(dev->pwr_domain->ops.prepare, error);
+ if (dev->pm_domain->ops.prepare)
+ error = dev->pm_domain->ops.prepare(dev);
+ suspend_report_result(dev->pm_domain->ops.prepare, error);
if (error)
goto End;
} else if (dev->type && dev->type->pm) {
@@ -1021,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- pm_runtime_put_sync(dev);
- error = pm_wakeup_pending() ?
- -EBUSY : device_prepare(dev, state);
+ error = device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -1042,7 +1067,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9e..5cc12322ef32 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c55..8dc247c974af 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
if (dev->power.runtime_error)
retval = -EINVAL;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
else if (!pm_children_suspended(dev))
retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
* run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_idle;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_idle;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
@@ -262,15 +263,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
spin_lock_irq(&dev->power.lock);
}
dev->power.runtime_error = retval;
- return retval;
+ return retval != -EACCES ? retval : -EIO;
}
/**
- * rpm_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another suspend has been started earlier, either return immediately or wait
* for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
* pending idle notification. If the RPM_ASYNC flag is set then queue a
@@ -374,8 +375,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_suspend;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -388,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = 0;
+ dev->power.deferred_resume = false;
if (retval == -EAGAIN || retval == -EBUSY)
dev->power.runtime_error = 0;
else
@@ -429,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
/**
- * rpm_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be resumed. Cancel
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
* earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_error)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
+ retval = -EACCES;
if (retval)
goto out;
@@ -550,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's run-time PM is disabled or it
+ * We can resume if the parent's runtime PM is disabled or it
* is set to ignore children.
*/
if (!parent->power.disable_depth
@@ -573,8 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_resume;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_resume;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
@@ -614,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
/**
- * pm_runtime_work - Universal run-time PM work function.
+ * pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate run-time PM function.
+ * is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
@@ -717,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/**
- * __pm_runtime_idle - Entry point for run-time idle operations.
+ * __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
@@ -746,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
@@ -775,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
- * __pm_runtime_resume - Entry point for run-time resume operations.
+ * __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
@@ -801,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * __pm_runtime_set_status - Set run-time PM status of a device.
+ * __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
- * @status: New run-time PM status of the device.
+ * @status: New runtime PM status of the device.
*
- * If run-time PM of the device is disabled or its power.runtime_error field is
+ * If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
@@ -851,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
/*
* It is invalid to put an active child under a parent that is
- * not active, has run-time PM enabled and the
+ * not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth
@@ -885,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
- * run-time PM operations involving the device in progress to complete.
+ * runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
@@ -933,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all run-time PM operations involving the device in
+ * from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
@@ -963,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
/**
- * __pm_runtime_disable - Disable run-time PM of a device.
+ * __pm_runtime_disable - Disable runtime PM of a device.
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
* Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending run-time PM requests for the device and wait for all
+ * cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
- * suspended after its run-time PM has been disabled.
+ * suspended after its runtime PM has been disabled.
*
* If @check_resume is set and there's a resume request pending when
* __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its run-time PM.
+ * function will wake up the device before disabling its runtime PM.
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
@@ -987,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/*
* Wake up the device if there's a resume request pending, because that
- * means there probably is some I/O to process and disabling run-time PM
+ * means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending
@@ -1012,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
- * pm_runtime_enable - Enable run-time PM of a device.
+ * pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
- * pm_runtime_forbid - Block run-time PM of a device.
+ * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
- * pm_runtime_allow - Unblock run-time PM of a device.
+ * pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no run-time PM
- * callbacks of its own. The run-time sysfs attributes will be removed.
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
@@ -1156,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
- * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
- * changes the other way, allow run-time suspends.
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
@@ -1177,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
- * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b8979611..942d6a7c9ae1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
+ device_lock(dev);
if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
pm_runtime_allow(dev);
else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
pm_runtime_forbid(dev);
else
- return -EINVAL;
+ n = -EINVAL;
+ device_unlock(dev);
return n;
}
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
+ device_lock(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ device_unlock(dev);
return n;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62fe..af10abecb99b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
new file mode 100644
index 000000000000..fabbf6cc5367
--- /dev/null
+++ b/drivers/base/regmap/Kconfig
@@ -0,0 +1,13 @@
+# Generic register map support. There are no user servicable options here,
+# this is an API intended to be used by other kernel subsystems. These
+# subsystems should select the appropriate symbols.
+
+config REGMAP
+ default y if (REGMAP_I2C || REGMAP_SPI)
+ bool
+
+config REGMAP_I2C
+ tristate
+
+config REGMAP_SPI
+ tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
new file mode 100644
index 000000000000..f476f4571295
--- /dev/null
+++ b/drivers/base/regmap/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_REGMAP) += regmap.o
+obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
new file mode 100644
index 000000000000..c2231ff06cbc
--- /dev/null
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -0,0 +1,115 @@
+/*
+ * Register map access API - I2C support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(i2c, data, count);
+ if (ret == count)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_gather_write(struct device *dev,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* If the I2C controller can't do a gather tell the core, it
+ * will substitute in a linear write for us.
+ */
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ return -ENOTSUPP;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = val_size;
+ xfer[1].buf = (void *)val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_read(struct device *dev,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = val_size;
+ xfer[1].buf = val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static struct regmap_bus regmap_i2c = {
+ .type = &i2c_bus_type,
+ .write = regmap_i2c_write,
+ .gather_write = regmap_i2c_gather_write,
+ .read = regmap_i2c_read,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ return regmap_init(&i2c->dev, &regmap_i2c, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_i2c);
+
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
new file mode 100644
index 000000000000..4deba0621bc7
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi.c
@@ -0,0 +1,72 @@
+/*
+ * Register map access API - SPI support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/init.h>
+
+static int regmap_spi_write(struct device *dev, const void *data, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int regmap_spi_gather_write(struct device *dev,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
+ { .tx_buf = val, .len = val_len, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int regmap_spi_read(struct device *dev,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus regmap_spi = {
+ .type = &spi_bus_type,
+ .write = regmap_spi_write,
+ .gather_write = regmap_spi_gather_write,
+ .read = regmap_spi_read,
+ .owner = THIS_MODULE,
+ .read_flag_mask = 0x80,
+};
+
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ return regmap_init(&spi->dev, &regmap_spi, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spi);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
new file mode 100644
index 000000000000..cf3565cae93d
--- /dev/null
+++ b/drivers/base/regmap/regmap.c
@@ -0,0 +1,455 @@
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+
+#include <linux/regmap.h>
+
+struct regmap;
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t val_bytes;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg);
+ void (*format_val)(void *buf, unsigned int val);
+ unsigned int (*parse_val)(void *buf);
+};
+
+struct regmap {
+ struct mutex lock;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+};
+
+static void regmap_format_4_12_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 12) | val);
+}
+
+static void regmap_format_7_9_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 9) | val);
+}
+
+static void regmap_format_8(void *buf, unsigned int val)
+{
+ u8 *b = buf;
+
+ b[0] = val;
+}
+
+static void regmap_format_16(void *buf, unsigned int val)
+{
+ __be16 *b = buf;
+
+ b[0] = cpu_to_be16(val);
+}
+
+static unsigned int regmap_parse_8(void *buf)
+{
+ u8 *b = buf;
+
+ return b[0];
+}
+
+static unsigned int regmap_parse_16(void *buf)
+{
+ __be16 *b = buf;
+
+ b[0] = be16_to_cpu(b[0]);
+
+ return b[0];
+}
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+struct regmap *regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+ int ret = -EINVAL;
+
+ if (!bus || !config)
+ return NULL;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_init(&map->lock);
+ map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
+ map->format.reg_bytes = config->reg_bits / 8;
+ map->format.val_bytes = config->val_bits / 8;
+ map->dev = dev;
+ map->bus = bus;
+
+ switch (config->reg_bits) {
+ case 4:
+ switch (config->val_bits) {
+ case 12:
+ map->format.format_write = regmap_format_4_12_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
+ case 7:
+ switch (config->val_bits) {
+ case 9:
+ map->format.format_write = regmap_format_7_9_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
+ case 8:
+ map->format.format_reg = regmap_format_8;
+ break;
+
+ case 16:
+ map->format.format_reg = regmap_format_16;
+ break;
+
+ default:
+ goto err_map;
+ }
+
+ switch (config->val_bits) {
+ case 8:
+ map->format.format_val = regmap_format_8;
+ map->format.parse_val = regmap_parse_8;
+ break;
+ case 16:
+ map->format.format_val = regmap_format_16;
+ map->format.parse_val = regmap_parse_16;
+ break;
+ }
+
+ if (!map->format.format_write &&
+ !(map->format.format_reg && map->format.format_val))
+ goto err_map;
+
+ map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_bus;
+ }
+
+ return map;
+
+err_bus:
+ module_put(map->bus->owner);
+err_map:
+ kfree(map);
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(regmap_init);
+
+/**
+ * regmap_exit(): Free a previously allocated register map
+ */
+void regmap_exit(struct regmap *map)
+{
+ kfree(map->work_buf);
+ module_put(map->bus->owner);
+ kfree(map);
+}
+EXPORT_SYMBOL_GPL(regmap_exit);
+
+static int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ void *buf;
+ int ret = -ENOTSUPP;
+ size_t len;
+
+ map->format.format_reg(map->work_buf, reg);
+
+ /* Try to do a gather write if we can */
+ if (map->bus->gather_write)
+ ret = map->bus->gather_write(map->dev, map->work_buf,
+ map->format.reg_bytes,
+ val, val_len);
+
+ /* Otherwise fall back on linearising by hand. */
+ if (ret == -ENOTSUPP) {
+ len = map->format.reg_bytes + val_len;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, map->work_buf, map->format.reg_bytes);
+ memcpy(buf + map->format.reg_bytes, val, val_len);
+ ret = map->bus->write(map->dev, buf, len);
+
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+static int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ BUG_ON(!map->format.format_write && !map->format.format_val);
+
+ if (map->format.format_write) {
+ map->format.format_write(map, reg, val);
+
+ return map->bus->write(map->dev, map->work_buf,
+ map->format.buf_size);
+ } else {
+ map->format.format_val(map->work_buf + map->format.reg_bytes,
+ val);
+ return _regmap_raw_write(map, reg,
+ map->work_buf + map->format.reg_bytes,
+ map->format.val_bytes);
+ }
+}
+
+/**
+ * regmap_write(): Write a value to a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_write(map, reg, val);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write);
+
+/**
+ * regmap_raw_write(): Write raw values to one or more registers
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_raw_write(map, reg, val, val_len);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write);
+
+static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ unsigned int val_len)
+{
+ u8 *u8 = map->work_buf;
+ int ret;
+
+ map->format.format_reg(map->work_buf, reg);
+
+ /*
+ * Some buses flag reads by setting the high bits in the
+ * register addresss; since it's always the high bits for all
+ * current formats we can do this here rather than in
+ * formatting. This may break if we get interesting formats.
+ */
+ if (map->bus->read_flag_mask)
+ u8[0] |= map->bus->read_flag_mask;
+
+ ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
+ val, map->format.val_bytes);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int _regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ if (ret == 0)
+ *val = map->format.parse_val(map->work_buf);
+
+ return ret;
+}
+
+/**
+ * regmap_read(): Read a value from a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_read(map, reg, val);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read);
+
+/**
+ * regmap_raw_read(): Read raw data from the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be read from
+ * @val: Pointer to store read value
+ * @val_len: Size of data to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_len)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_raw_read(map, reg, val, val_len);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_read);
+
+/**
+ * regmap_bulk_read(): Read multiple registers from the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret, i;
+ size_t val_bytes = map->format.val_bytes;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_val(val + i);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_read);
+
+/**
+ * remap_update_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+ unsigned int tmp;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_read(map, reg, &tmp);
+ if (ret != 0)
+ goto out;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ ret = _regmap_write(map, reg, tmp);
+
+out:
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3cb7d1..e8d11b6630ee 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ pr_debug("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ ret = check_wakeup_irqs();
+ if (ret)
+ return ret;
+
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");