diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-08 13:10:57 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-08 13:10:57 -0800 |
commit | eb59c505f8a5906ad2e053d14fab50eb8574fd6f (patch) | |
tree | c6e875adc12b481b916e847e8f80b8881a0fb02c /drivers/base | |
parent | 1619ed8f60959829d070d8f39cd2f8ca0e7135ce (diff) | |
parent | c233523b3d392e530033a7587d7970dc62a02361 (diff) |
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits)
PM / Hibernate: Implement compat_ioctl for /dev/snapshot
PM / Freezer: fix return value of freezable_schedule_timeout_killable()
PM / shmobile: Allow the A4R domain to be turned off at run time
PM / input / touchscreen: Make st1232 use device PM QoS constraints
PM / QoS: Introduce dev_pm_qos_add_ancestor_request()
PM / shmobile: Remove the stay_on flag from SH7372's PM domains
PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume
PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode
PM: Drop generic_subsys_pm_ops
PM / Sleep: Remove forward-only callbacks from AMBA bus type
PM / Sleep: Remove forward-only callbacks from platform bus type
PM: Run the driver callback directly if the subsystem one is not there
PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers
PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412.
PM / Sleep: Merge internal functions in generic_ops.c
PM / Sleep: Simplify generic system suspend callbacks
PM / Hibernate: Remove deprecated hibernation snapshot ioctls
PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled()
ARM: S3C64XX: Implement basic power domain support
PM / shmobile: Use common always on power domain governor
...
Fix up trivial conflict in fs/xfs/xfs_buf.c due to removal of unused
XBT_FORCE_SLEEP bit
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/firmware_class.c | 4 | ||||
-rw-r--r-- | drivers/base/platform.c | 115 | ||||
-rw-r--r-- | drivers/base/power/Makefile | 2 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 539 | ||||
-rw-r--r-- | drivers/base/power/domain_governor.c | 156 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 91 | ||||
-rw-r--r-- | drivers/base/power/main.c | 375 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 49 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 157 |
9 files changed, 912 insertions, 576 deletions
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 3719c94be19c..26ab358dac62 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p, return 0; } + read_lock_usermodehelper(); + if (WARN_ON(usermodehelper_is_disabled())) { dev_err(device, "firmware: %s will not be loaded\n", name); retval = -EBUSY; @@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p, fw_destroy_instance(fw_priv); out: + read_unlock_usermodehelper(); + if (retval) { release_firmware(firmware); *firmware_p = NULL; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index a7c06374062e..f0c605e99ade 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev) return ret; } -int platform_pm_prepare(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (drv && drv->pm && drv->pm->prepare) - ret = drv->pm->prepare(dev); - - return ret; -} - -void platform_pm_complete(struct device *dev) -{ - struct device_driver *drv = dev->driver; - - if (drv && drv->pm && drv->pm->complete) - drv->pm->complete(dev); -} - #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND @@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev) return ret; } -int platform_pm_suspend_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend_noirq) - ret = drv->pm->suspend_noirq(dev); - } - - return ret; -} - int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; @@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev) return ret; } -int platform_pm_resume_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume_noirq) - ret = drv->pm->resume_noirq(dev); - } - - return ret; -} - #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS @@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev) return ret; } -int platform_pm_freeze_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze_noirq) - ret = drv->pm->freeze_noirq(dev); - } - - return ret; -} - int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; @@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev) return ret; } -int platform_pm_thaw_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw_noirq) - ret = drv->pm->thaw_noirq(dev); - } - - return ret; -} - int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; @@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev) return ret; } -int platform_pm_poweroff_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff_noirq) - ret = drv->pm->poweroff_noirq(dev); - } - - return ret; -} - int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; @@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev) return ret; } -int platform_pm_restore_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore_noirq) - ret = drv->pm->restore_noirq(dev); - } - - return ret; -} - #endif /* CONFIG_HIBERNATE_CALLBACKS */ static const struct dev_pm_ops platform_dev_pm_ops = { diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 81676dd17900..2e58ebb1f6c0 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o -obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 6790cf7eba5a..92e6a9048065 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -15,13 +15,44 @@ #include <linux/err.h> #include <linux/sched.h> #include <linux/suspend.h> +#include <linux/export.h> + +#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ +({ \ + type (*__routine)(struct device *__d); \ + type __ret = (type)0; \ + \ + __routine = genpd->dev_ops.callback; \ + if (__routine) { \ + __ret = __routine(dev); \ + } else { \ + __routine = dev_gpd_data(dev)->ops.callback; \ + if (__routine) \ + __ret = __routine(dev); \ + } \ + __ret; \ +}) + +#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ +({ \ + ktime_t __start = ktime_get(); \ + type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ + s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ + struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ + if (__elapsed > __gpd_data->td.field) { \ + __gpd_data->td.field = __elapsed; \ + dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + __elapsed); \ + } \ + __retval; \ +}) static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); #ifdef CONFIG_PM -static struct generic_pm_domain *dev_to_genpd(struct device *dev) +struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); @@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } +static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, + stop_latency_ns, "stop"); +} + +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, + start_latency_ns, "start"); +} + +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } if (genpd->power_on) { + ktime_t time_start = ktime_get(); + s64 elapsed_ns; + ret = genpd->power_on(genpd); if (ret) goto err; + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_on_latency_ns) { + genpd->power_on_latency_ns = elapsed_ns; + if (genpd->name) + pr_warning("%s: Power-on latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } } genpd_set_active(genpd); @@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - struct device_driver *drv = dev->driver; int ret = 0; if (gpd_data->need_restore) @@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, mutex_unlock(&genpd->lock); - if (drv && drv->pm && drv->pm->runtime_suspend) { - if (genpd->start_device) - genpd->start_device(dev); - - ret = drv->pm->runtime_suspend(dev); - - if (genpd->stop_device) - genpd->stop_device(dev); - } + genpd_start_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); + genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); @@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - struct device_driver *drv = dev->driver; if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); - if (drv && drv->pm && drv->pm->runtime_resume) { - if (genpd->start_device) - genpd->start_device(dev); - - drv->pm->runtime_resume(dev); - - if (genpd->stop_device) - genpd->stop_device(dev); - } + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); @@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } if (genpd->power_off) { + ktime_t time_start; + s64 elapsed_ns; + if (atomic_read(&genpd->sd_count) > 0) { ret = -EBUSY; goto out; } + time_start = ktime_get(); + /* * If sd_count > 0 at this point, one of the subdomains hasn't * managed to call pm_genpd_poweron() for the master yet after @@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_set_active(genpd); goto out; } + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_off_latency_ns) { + genpd->power_off_latency_ns = elapsed_ns; + if (genpd->name) + pr_warning("%s: Power-off latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } } genpd->status = GPD_STATE_POWER_OFF; + genpd->power_off_time = ktime_get(); + + /* Update PM QoS information for devices in the domain. */ + list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td = &to_gpd_data(pdd)->td; + + pm_runtime_update_max_time_suspended(pdd->dev, + td->start_latency_ns + + td->restore_state_latency_ns + + genpd->power_on_latency_ns); + } list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; + bool (*stop_ok)(struct device *__dev); + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (genpd->stop_device) { - int ret = genpd->stop_device(dev); - if (ret) - return ret; - } + stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; + if (stop_ok && !stop_ok(dev)) + return -EBUSY; + + ret = genpd_stop_dev(genpd, dev); + if (ret) + return ret; + + pm_runtime_update_max_time_suspended(dev, + dev_gpd_data(dev)->td.start_latency_ns); /* * If power.irq_safe is set, this routine will be run with interrupts @@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev) mutex_unlock(&genpd->lock); out: - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); return 0; } @@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP +static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); +} + +static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); +} + +static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); +} + +static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); +} + +static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume, dev); +} + +static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); +} + +static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); +} + +static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); +} + +static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); +} + /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. @@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) if (!device_can_wakeup(dev)) return false; - active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); + active_wakeup = genpd_dev_active_wakeup(genpd, dev); return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; } @@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, * so pm_genpd_poweron() will return immediately, but if the device - * is suspended (e.g. it's been stopped by .stop_device()), we need + * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ pm_runtime_resume(dev); @@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); + return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); } /** @@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = pm_generic_suspend_noirq(dev); + ret = genpd_suspend_late(genpd, dev); if (ret) return ret; - if (dev->power.wakeup_path - && genpd->active_wakeup && genpd->active_wakeup(dev)) + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) return 0; - if (genpd->stop_device) - genpd->stop_device(dev); + genpd_stop_dev(genpd, dev); /* * Since all of the "noirq" callbacks are executed sequentially, it is @@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev) */ pm_genpd_poweron(genpd); genpd->suspended_count--; - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); - return pm_generic_resume_noirq(dev); + return genpd_resume_early(genpd, dev); } /** @@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); + return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); } /** @@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); + return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); } /** @@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = pm_generic_freeze_noirq(dev); + ret = genpd_freeze_late(genpd, dev); if (ret) return ret; - if (genpd->stop_device) - genpd->stop_device(dev); + genpd_stop_dev(genpd, dev); return 0; } @@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); - return pm_generic_thaw_noirq(dev); + return genpd_thaw_early(genpd, dev); } /** @@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); -} - -/** - * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. - * @dev: Device to suspend. - * - * Power off a device under the assumption that its pm_domain field points to - * the domain member of an object of type struct generic_pm_domain representing - * a PM domain consisting of I/O devices. - */ -static int pm_genpd_dev_poweroff(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); -} - -/** - * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. - * @dev: Device to suspend. - * - * Carry out a late powering off of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a PM domain consisting of I/O devices. - */ -static int pm_genpd_dev_poweroff_noirq(struct device *dev) -{ - struct generic_pm_domain *genpd; - int ret; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - if (genpd->suspend_power_off) - return 0; - - ret = pm_generic_poweroff_noirq(dev); - if (ret) - return ret; - - if (dev->power.wakeup_path - && genpd->active_wakeup && genpd->active_wakeup(dev)) - return 0; - - if (genpd->stop_device) - genpd->stop_device(dev); - - /* - * Since all of the "noirq" callbacks are executed sequentially, it is - * guaranteed that this function will never run twice in parallel for - * the same PM domain, so it is not necessary to use locking here. - */ - genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); - - return 0; + return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); } /** @@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_poweron(genpd); genpd->suspended_count--; - if (genpd->start_device) - genpd->start_device(dev); - - return pm_generic_restore_noirq(dev); -} - -/** - * pm_genpd_restore - Restore a device belonging to an I/O power domain. - * @dev: Device to resume. - * - * Restore a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a power domain consisting of I/O devices. - */ -static int pm_genpd_restore(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; + genpd_start_dev(genpd, dev); - return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); + return genpd_resume_early(genpd, dev); } /** @@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev) #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw NULL -#define pm_genpd_dev_poweroff_noirq NULL -#define pm_genpd_dev_poweroff NULL #define pm_genpd_restore_noirq NULL -#define pm_genpd_restore NULL #define pm_genpd_complete NULL #endif /* CONFIG_PM_SLEEP */ /** - * pm_genpd_add_device - Add a device to an I/O PM domain. + * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. */ -int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) +int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct gpd_timing_data *td) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; @@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) gpd_data->base.dev = dev; gpd_data->need_restore = false; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + if (td) + gpd_data->td = *td; out: genpd_release_lock(genpd); @@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, } /** + * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. + * @dev: Device to add the callbacks to. + * @ops: Set of callbacks to add. + * @td: Timing data to add to the device along with the callbacks (optional). + */ +int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, + struct gpd_timing_data *td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data && ops)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); + +/** + * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. + * @dev: Device to remove the callbacks from. + * @clear_td: If set, clear the device's timing data too. + */ +int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = (struct gpd_dev_ops){ 0 }; + if (clear_td) + gpd_data->td = (struct gpd_timing_data){ 0 }; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); + +/* Default device callbacks for generic PM domains. */ + +/** + * pm_genpd_default_save_state - Default "save device state" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_save_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.save_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_suspend) + return drv->pm->runtime_suspend(dev); + + return 0; +} + +/** + * pm_genpd_default_restore_state - Default PM domians "restore device state". + * @dev: Device to handle. + */ +static int pm_genpd_default_restore_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.restore_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_resume) + return drv->pm->runtime_resume(dev); + + return 0; +} + +/** + * pm_genpd_default_suspend - Default "device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; + + return cb ? cb(dev) : pm_generic_suspend(dev); +} + +/** + * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; + + return cb ? cb(dev) : pm_generic_suspend_noirq(dev); +} + +/** + * pm_genpd_default_resume_early - Default "early device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; + + return cb ? cb(dev) : pm_generic_resume_noirq(dev); +} + +/** + * pm_genpd_default_resume - Default "device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; + + return cb ? cb(dev) : pm_generic_resume(dev); +} + +/** + * pm_genpd_default_freeze - Default "device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; + + return cb ? cb(dev) : pm_generic_freeze(dev); +} + +/** + * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; + + return cb ? cb(dev) : pm_generic_freeze_noirq(dev); +} + +/** + * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; + + return cb ? cb(dev) : pm_generic_thaw_noirq(dev); +} + +/** + * pm_genpd_default_thaw - Default "device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; + + return cb ? cb(dev) : pm_generic_thaw(dev); +} + +/** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. * @gov: PM domain governor to associate with the domain (may be NULL). @@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->resume_count = 0; genpd->device_count = 0; genpd->suspended_count = 0; + genpd->max_off_time_ns = -1; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; @@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw = pm_genpd_thaw; - genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; - genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; + genpd->domain.ops.poweroff = pm_genpd_suspend; + genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; - genpd->domain.ops.restore = pm_genpd_restore; + genpd->domain.ops.restore = pm_genpd_resume; genpd->domain.ops.complete = pm_genpd_complete; + genpd->dev_ops.save_state = pm_genpd_default_save_state; + genpd->dev_ops.restore_state = pm_genpd_default_restore_state; + genpd->dev_ops.suspend = pm_genpd_default_suspend; + genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; + genpd->dev_ops.resume_early = pm_genpd_default_resume_early; + genpd->dev_ops.resume = pm_genpd_default_resume; + genpd->dev_ops.freeze = pm_genpd_default_freeze; + genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; + genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; + genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c new file mode 100644 index 000000000000..51527ee92d10 --- /dev/null +++ b/drivers/base/power/domain_governor.c @@ -0,0 +1,156 @@ +/* + * drivers/base/power/domain_governor.c - Governors for device PM domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/hrtimer.h> + +/** + * default_stop_ok - Default PM domain governor routine for stopping devices. + * @dev: Device to check. + */ +bool default_stop_ok(struct device *dev) +{ + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + + dev_dbg(dev, "%s()\n", __func__); + + if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) + return true; + + return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns + && td->break_even_ns < dev->power.max_time_suspended_ns; +} + +/** + * default_power_down_ok - Default generic PM domain power off governor routine. + * @pd: PM domain to check. + * + * This routine must be executed under the PM domain's lock. + */ +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct gpd_link *link; + struct pm_domain_data *pdd; + s64 min_dev_off_time_ns; + s64 off_on_time_ns; + ktime_t time_now = ktime_get(); + + off_on_time_ns = genpd->power_off_latency_ns + + genpd->power_on_latency_ns; + /* + * It doesn't make sense to remove power from the domain if saving + * the state of all devices in it and the power off/power on operations + * take too much time. + * + * All devices in this domain have been stopped already at this point. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + if (pdd->dev->driver) + off_on_time_ns += + to_gpd_data(pdd)->td.save_state_latency_ns; + } + + /* + * Check if subdomains can be off for enough time. + * + * All subdomains have been powered off already at this point. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + struct generic_pm_domain *sd = link->slave; + s64 sd_max_off_ns = sd->max_off_time_ns; + + if (sd_max_off_ns < 0) + continue; + + sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now, + sd->power_off_time)); + /* + * Check if the subdomain is allowed to be off long enough for + * the current domain to turn off and on (that's how much time + * it will have to wait worst case). + */ + if (sd_max_off_ns <= off_on_time_ns) + return false; + } + + /* + * Check if the devices in the domain can be off enough time. + */ + min_dev_off_time_ns = -1; + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td; + struct device *dev = pdd->dev; + s64 dev_off_time_ns; + + if (!dev->driver || dev->power.max_time_suspended_ns < 0) + continue; + + td = &to_gpd_data(pdd)->td; + dev_off_time_ns = dev->power.max_time_suspended_ns - + (td->start_latency_ns + td->restore_state_latency_ns + + ktime_to_ns(ktime_sub(time_now, + dev->power.suspend_time))); + if (dev_off_time_ns <= off_on_time_ns) + return false; + + if (min_dev_off_time_ns > dev_off_time_ns + || min_dev_off_time_ns < 0) + min_dev_off_time_ns = dev_off_time_ns; + } + + if (min_dev_off_time_ns < 0) { + /* + * There are no latency constraints, so the domain can spend + * arbitrary time in the "off" state. + */ + genpd->max_off_time_ns = -1; + return true; + } + + /* + * The difference between the computed minimum delta and the time needed + * to turn the domain on is the maximum theoretical time this domain can + * spend in the "off" state. + */ + min_dev_off_time_ns -= genpd->power_on_latency_ns; + + /* + * If the difference between the computed minimum delta and the time + * needed to turn the domain off and back on on is smaller than the + * domain's power break even time, removing power from the domain is not + * worth it. + */ + if (genpd->break_even_ns > + min_dev_off_time_ns - genpd->power_off_latency_ns) + return false; + + genpd->max_off_time_ns = min_dev_off_time_ns; + return true; +} + +struct dev_power_governor simple_qos_governor = { + .stop_ok = default_stop_ok, + .power_down_ok = default_power_down_ok, +}; + +static bool always_on_power_down_ok(struct dev_pm_domain *domain) +{ + return false; +} + +/** + * pm_genpd_gov_always_on - A governor implementing an always-on policy + */ +struct dev_power_governor pm_domain_always_on_gov = { + .power_down_ok = always_on_power_down_ok, + .stop_ok = default_stop_ok, +}; diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 265a0ee3b49e..10bdd793f0bd 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev) * @event: PM transition of the system under way. * @bool: Whether or not this is the "noirq" stage. * - * If the device has not been suspended at run time, execute the - * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and - * return its error code. Otherwise, return zero. + * Execute the PM callback corresponding to @event provided by the driver of + * @dev, if defined, and return its error code. Return 0 if the callback is + * not present. */ static int __pm_generic_call(struct device *dev, int event, bool noirq) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*callback)(struct device *); - if (!pm || pm_runtime_suspended(dev)) + if (!pm) return 0; switch (event) { @@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq) case PM_EVENT_HIBERNATE: callback = noirq ? pm->poweroff_noirq : pm->poweroff; break; + case PM_EVENT_RESUME: + callback = noirq ? pm->resume_noirq : pm->resume; + break; case PM_EVENT_THAW: callback = noirq ? pm->thaw_noirq : pm->thaw; break; + case PM_EVENT_RESTORE: + callback = noirq ? pm->restore_noirq : pm->restore; + break; default: callback = NULL; break; @@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_thaw); /** - * __pm_generic_resume - Generic resume/restore callback for subsystems. - * @dev: Device to handle. - * @event: PM transition of the system under way. - * @bool: Whether or not this is the "noirq" stage. - * - * Execute the resume/resotre callback provided by the @dev's driver, if - * defined. If it returns 0, change the device's runtime PM status to 'active'. - * Return the callback's error code. - */ -static int __pm_generic_resume(struct device *dev, int event, bool noirq) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int (*callback)(struct device *); - int ret; - - if (!pm) - return 0; - - switch (event) { - case PM_EVENT_RESUME: - callback = noirq ? pm->resume_noirq : pm->resume; - break; - case PM_EVENT_RESTORE: - callback = noirq ? pm->restore_noirq : pm->restore; - break; - default: - callback = NULL; - break; - } - - if (!callback) - return 0; - - ret = callback(dev); - if (!ret && !noirq && pm_runtime_enabled(dev)) { - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } - - return ret; -} - -/** * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. * @dev: Device to resume. */ int pm_generic_resume_noirq(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESUME, true); + return __pm_generic_call(dev, PM_EVENT_RESUME, true); } EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); @@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); */ int pm_generic_resume(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESUME, false); + return __pm_generic_call(dev, PM_EVENT_RESUME, false); } EXPORT_SYMBOL_GPL(pm_generic_resume); @@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); */ int pm_generic_restore_noirq(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); + return __pm_generic_call(dev, PM_EVENT_RESTORE, true); } EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); @@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); */ int pm_generic_restore(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); + return __pm_generic_call(dev, PM_EVENT_RESTORE, false); } EXPORT_SYMBOL_GPL(pm_generic_restore); @@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev) pm_runtime_idle(dev); } #endif /* CONFIG_PM_SLEEP */ - -struct dev_pm_ops generic_subsys_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .prepare = pm_generic_prepare, - .suspend = pm_generic_suspend, - .suspend_noirq = pm_generic_suspend_noirq, - .resume = pm_generic_resume, - .resume_noirq = pm_generic_resume_noirq, - .freeze = pm_generic_freeze, - .freeze_noirq = pm_generic_freeze_noirq, - .thaw = pm_generic_thaw, - .thaw_noirq = pm_generic_thaw_noirq, - .poweroff = pm_generic_poweroff, - .poweroff_noirq = pm_generic_poweroff_noirq, - .restore = pm_generic_restore, - .restore_noirq = pm_generic_restore_noirq, - .complete = pm_generic_complete, -#endif -#ifdef CONFIG_PM_RUNTIME - .runtime_suspend = pm_generic_runtime_suspend, - .runtime_resume = pm_generic_runtime_resume, - .runtime_idle = pm_generic_runtime_idle, -#endif -}; -EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c3d2dfcf438d..e2cc3d2e0ecc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -32,6 +32,8 @@ #include "../base.h" #include "power.h" +typedef int (*pm_callback_t)(struct device *); + /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev) ktime_t calltime = ktime_set(0, 0); if (initcall_debug) { - pr_info("calling %s+ @ %i\n", - dev_name(dev), task_pid_nr(current)); + pr_info("calling %s+ @ %i, parent: %s\n", + dev_name(dev), task_pid_nr(current), + dev->parent ? dev_name(dev->parent) : "none"); calltime = ktime_get(); } @@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async) } /** - * pm_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_op - Return the PM operation appropriate for given PM event. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. */ -static int pm_op(struct device *dev, - const struct dev_pm_ops *ops, - pm_message_t state) +static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) { - int error = 0; - ktime_t calltime; - - calltime = initcall_debug_start(dev); - switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: - if (ops->suspend) { - error = ops->suspend(dev); - suspend_report_result(ops->suspend, error); - } - break; + return ops->suspend; case PM_EVENT_RESUME: - if (ops->resume) { - error = ops->resume(dev); - suspend_report_result(ops->resume, error); - } - break; + return ops->resume; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: - if (ops->freeze) { - error = ops->freeze(dev); - suspend_report_result(ops->freeze, error); - } - break; + return ops->freeze; case PM_EVENT_HIBERNATE: - if (ops->poweroff) { - error = ops->poweroff(dev); - suspend_report_result(ops->poweroff, error); - } - break; + return ops->poweroff; case PM_EVENT_THAW: case PM_EVENT_RECOVER: - if (ops->thaw) { - error = ops->thaw(dev); - suspend_report_result(ops->thaw, error); - } + return ops->thaw; break; case PM_EVENT_RESTORE: - if (ops->restore) { - error = ops->restore(dev); - suspend_report_result(ops->restore, error); - } - break; + return ops->restore; #endif /* CONFIG_HIBERNATE_CALLBACKS */ - default: - error = -EINVAL; } - initcall_debug_report(dev, calltime, error); - - return error; + return NULL; } /** - * pm_noirq_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_noirq_op - Return the PM operation appropriate for given PM event. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. * * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int pm_noirq_op(struct device *dev, - const struct dev_pm_ops *ops, - pm_message_t state) +static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) { - int error = 0; - ktime_t calltime = ktime_set(0, 0), delta, rettime; - - if (initcall_debug) { - pr_info("calling %s+ @ %i, parent: %s\n", - dev_name(dev), task_pid_nr(current), - dev->parent ? dev_name(dev->parent) : "none"); - calltime = ktime_get(); - } - switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: - if (ops->suspend_noirq) { - error = ops->suspend_noirq(dev); - suspend_report_result(ops->suspend_noirq, error); - } - break; + return ops->suspend_noirq; case PM_EVENT_RESUME: - if (ops->resume_noirq) { - error = ops->resume_noirq(dev); - suspend_report_result(ops->resume_noirq, error); - } - break; + return ops->resume_noirq; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: - if (ops->freeze_noirq) { - error = ops->freeze_noirq(dev); - suspend_report_result(ops->freeze_noirq, error); - } - break; + return ops->freeze_noirq; case PM_EVENT_HIBERNATE: - if (ops->poweroff_noirq) { - error = ops->poweroff_noirq(dev); - suspend_report_result(ops->poweroff_noirq, error); - } - break; + return ops->poweroff_noirq; case PM_EVENT_THAW: case PM_EVENT_RECOVER: - if (ops->thaw_noirq) { - error = ops->thaw_noirq(dev); - suspend_report_result(ops->thaw_noirq, error); - } - break; + return ops->thaw_noirq; case PM_EVENT_RESTORE: - if (ops->restore_noirq) { - error = ops->restore_noirq(dev); - suspend_report_result(ops->restore_noirq, error); - } - break; + return ops->restore_noirq; #endif /* CONFIG_HIBERNATE_CALLBACKS */ - default: - error = -EINVAL; - } - - if (initcall_debug) { - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - printk("initcall %s_i+ returned %d after %Ld usecs\n", - dev_name(dev), error, - (unsigned long long)ktime_to_ns(delta) >> 10); } - return error; + return NULL; } static char *pm_verb(int event) @@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } +static int dpm_run_callback(pm_callback_t cb, struct device *dev, + pm_message_t state, char *info) +{ + ktime_t calltime; + int error; + + if (!cb) + return 0; + + calltime = initcall_debug_start(dev); + + pm_dev_dbg(dev, state, info); + error = cb(dev); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + /*------------------------- Resume routines -------------------------*/ /** @@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) */ static int device_resume_noirq(struct device *dev, pm_message_t state) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "EARLY power domain "); - error = pm_noirq_op(dev, &dev->pm_domain->ops, state); + info = "EARLY power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "EARLY type "); - error = pm_noirq_op(dev, dev->type->pm, state); + info = "EARLY type "; + callback = pm_noirq_op(dev->type->pm, state); } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "EARLY class "); - error = pm_noirq_op(dev, dev->class->pm, state); + info = "EARLY class "; + callback = pm_noirq_op(dev->class->pm, state); } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "EARLY "); - error = pm_noirq_op(dev, dev->bus->pm, state); + info = "EARLY bus "; + callback = pm_noirq_op(dev->bus->pm, state); } + if (!callback && dev->driver && dev->driver->pm) { + info = "EARLY driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + TRACE_RESUME(error); return error; } @@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state) EXPORT_SYMBOL_GPL(dpm_resume_noirq); /** - * legacy_resume - Execute a legacy (bus or class) resume callback for device. - * @dev: Device to resume. - * @cb: Resume callback to execute. - */ -static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) -{ - int error; - ktime_t calltime; - - calltime = initcall_debug_start(dev); - - error = cb(dev); - suspend_report_result(cb, error); - - initcall_debug_report(dev, calltime, error); - - return error; -} - -/** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. @@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) */ static int device_resume(struct device *dev, pm_message_t state, bool async) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; bool put = false; @@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) put = true; if (dev->pm_domain) { - pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pm_domain->ops, state); - goto End; + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Driver; } if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "type "); - error = pm_op(dev, dev->type->pm, state); - goto End; + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Driver; } if (dev->class) { if (dev->class->pm) { - pm_dev_dbg(dev, state, "class "); - error = pm_op(dev, dev->class->pm, state); - goto End; + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; } else if (dev->class->resume) { - pm_dev_dbg(dev, state, "legacy class "); - error = legacy_resume(dev, dev->class->resume); + info = "legacy class "; + callback = dev->class->resume; goto End; } } if (dev->bus) { if (dev->bus->pm) { - pm_dev_dbg(dev, state, ""); - error = pm_op(dev, dev->bus->pm, state); + info = "bus "; + callback = pm_op(dev->bus->pm, state); } else if (dev->bus->resume) { - pm_dev_dbg(dev, state, "legacy "); - error = legacy_resume(dev, dev->bus->resume); + info = "legacy bus "; + callback = dev->bus->resume; + goto End; } } + Driver: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + End: + error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false; Unlock: @@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state) */ static void device_complete(struct device *dev, pm_message_t state) { + void (*callback)(struct device *) = NULL; + char *info = NULL; + device_lock(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "completing power domain "); - if (dev->pm_domain->ops.complete) - dev->pm_domain->ops.complete(dev); + info = "completing power domain "; + callback = dev->pm_domain->ops.complete; } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "completing type "); - if (dev->type->pm->complete) - dev->type->pm->complete(dev); + info = "completing type "; + callback = dev->type->pm->complete; } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "completing class "); - if (dev->class->pm->complete) - dev->class->pm->complete(dev); + info = "completing class "; + callback = dev->class->pm->complete; } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "completing "); - if (dev->bus->pm->complete) - dev->bus->pm->complete(dev); + info = "completing bus "; + callback = dev->bus->pm->complete; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "completing driver "; + callback = dev->driver->pm->complete; + } + + if (callback) { + pm_dev_dbg(dev, state, info); + callback(dev); } device_unlock(dev); @@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state) */ static int device_suspend_noirq(struct device *dev, pm_message_t state) { - int error; + pm_callback_t callback = NULL; + char *info = NULL; if (dev->pm_domain) { - pm_dev_dbg(dev, state, "LATE power domain "); - error = pm_noirq_op(dev, &dev->pm_domain->ops, state); - if (error) - return error; + info = "LATE power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "LATE type "); - error = pm_noirq_op(dev, dev->type->pm, state); - if (error) - return error; + info = "LATE type "; + callback = pm_noirq_op(dev->type->pm, state); } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "LATE class "); - error = pm_noirq_op(dev, dev->class->pm, state); - if (error) - return error; + info = "LATE class "; + callback = pm_noirq_op(dev->class->pm, state); } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "LATE "); - error = pm_noirq_op(dev, dev->bus->pm, state); - if (error) - return error; + info = "LATE bus "; + callback = pm_noirq_op(dev->bus->pm, state); } - return 0; + if (!callback && dev->driver && dev->driver->pm) { + info = "LATE driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + return dpm_run_callback(callback, dev, state, info); } /** @@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state, */ static int __device_suspend(struct device *dev, pm_message_t state, bool async) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; dpm_wait_for_children(dev, async); @@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) device_lock(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pm_domain->ops, state); - goto End; + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Run; } if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "type "); - error = pm_op(dev, dev->type->pm, state); - goto End; + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Run; } if (dev->class) { if (dev->class->pm) { - pm_dev_dbg(dev, state, "class "); - error = pm_op(dev, dev->class->pm, state); - goto End; + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); @@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->bus) { if (dev->bus->pm) { - pm_dev_dbg(dev, state, ""); - error = pm_op(dev, dev->bus->pm, state); + info = "bus "; + callback = pm_op(dev->bus->pm, state); } else if (dev->bus->suspend) { - pm_dev_dbg(dev, state, "legacy "); + pm_dev_dbg(dev, state, "legacy bus "); error = legacy_suspend(dev, state, dev->bus->suspend); + goto End; } } + Run: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + End: if (!error) { dev->power.is_suspended = true; @@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state) */ static int device_prepare(struct device *dev, pm_message_t state) { + int (*callback)(struct device *) = NULL; + char *info = NULL; int error = 0; device_lock(dev); @@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state) dev->power.wakeup_path = device_may_wakeup(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "preparing power domain "); - if (dev->pm_domain->ops.prepare) - error = dev->pm_domain->ops.prepare(dev); - suspend_report_result(dev->pm_domain->ops.prepare, error); - if (error) - goto End; + info = "preparing power domain "; + callback = dev->pm_domain->ops.prepare; } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "preparing type "); - if (dev->type->pm->prepare) - error = dev->type->pm->prepare(dev); - suspend_report_result(dev->type->pm->prepare, error); - if (error) - goto End; + info = "preparing type "; + callback = dev->type->pm->prepare; } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "preparing class "); - if (dev->class->pm->prepare) - error = dev->class->pm->prepare(dev); - suspend_report_result(dev->class->pm->prepare, error); - if (error) - goto End; + info = "preparing class "; + callback = dev->class->pm->prepare; } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "preparing "); - if (dev->bus->pm->prepare) - error = dev->bus->pm->prepare(dev); - suspend_report_result(dev->bus->pm->prepare, error); + info = "preparing bus "; + callback = dev->bus->pm->prepare; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "preparing driver "; + callback = dev->driver->pm->prepare; + } + + if (callback) { + error = callback(dev); + suspend_report_result(callback, error); } - End: device_unlock(dev); return error; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 86de6c50fc41..c5d358837461 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); /** - * dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + * + * This routine must be called with dev->power.lock held. + */ +s32 __dev_pm_qos_read_value(struct device *dev) +{ + struct pm_qos_constraints *c = dev->power.constraints; + + return c ? pm_qos_read_value(c) : 0; +} + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). * @dev: Device to get the PM QoS constraint value for. */ s32 dev_pm_qos_read_value(struct device *dev) { - struct pm_qos_constraints *c; unsigned long flags; - s32 ret = 0; + s32 ret; spin_lock_irqsave(&dev->power.lock, flags); - - c = dev->power.constraints; - if (c) - ret = pm_qos_read_value(c); - + ret = __dev_pm_qos_read_value(dev); spin_unlock_irqrestore(&dev->power.lock, flags); return ret; @@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); + +/** + * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. + * @dev: Device whose ancestor to add the request for. + * @req: Pointer to the preallocated handle. + * @value: Constraint latency value. + */ +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value) +{ + struct device *ancestor = dev->parent; + int error = -ENODEV; + + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + + if (ancestor) + error = dev_pm_qos_add_request(ancestor, req, value); + + if (error) + req->dev = NULL; + + return error; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8c78443bca8f..541f821d4ea6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_idle; + if (callback) __rpm_callback(callback, dev); @@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) return retval != -EACCES ? retval : -EIO; } +struct rpm_qos_data { + ktime_t time_now; + s64 constraint_ns; +}; + +/** + * rpm_update_qos_constraint - Update a given PM QoS constraint data. + * @dev: Device whose timing data to use. + * @data: PM QoS constraint data to update. + * + * Use the suspend timing data of @dev to update PM QoS constraint data pointed + * to by @data. + */ +static int rpm_update_qos_constraint(struct device *dev, void *data) +{ + struct rpm_qos_data *qos = data; + unsigned long flags; + s64 delta_ns; + int ret = 0; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (dev->power.max_time_suspended_ns < 0) + goto out; + + delta_ns = dev->power.max_time_suspended_ns - + ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time)); + if (delta_ns <= 0) { + ret = -EBUSY; + goto out; + } + + if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0) + qos->constraint_ns = delta_ns; + + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + /** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. @@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) { int (*callback)(struct device *); struct device *parent = NULL; + struct rpm_qos_data qos; int retval; trace_rpm_suspend(dev, rpmflags); @@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + qos.constraint_ns = __dev_pm_qos_read_value(dev); + if (qos.constraint_ns < 0) { + /* Negative constraint means "never suspend". */ + retval = -EPERM; + goto out; + } + qos.constraint_ns *= NSEC_PER_USEC; + qos.time_now = ktime_get(); + __update_runtime_status(dev, RPM_SUSPENDING); + if (!dev->power.ignore_children) { + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = device_for_each_child(dev, &qos, + rpm_update_qos_constraint); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); + + if (retval) + goto fail; + } + + dev->power.suspend_time = qos.time_now; + dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1; + if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_suspend; else if (dev->type && dev->type->pm) @@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_suspend; + retval = rpm_callback(callback, dev); - if (retval) { - __update_runtime_status(dev, RPM_ACTIVE); - dev->power.deferred_resume = false; - if (retval == -EAGAIN || retval == -EBUSY) { - dev->power.runtime_error = 0; + if (retval) + goto fail; - /* - * If the callback routine failed an autosuspend, and - * if the last_busy time has been updated so that there - * is a new autosuspend expiration time, automatically - * reschedule another autosuspend. - */ - if ((rpmflags & RPM_AUTO) && - pm_runtime_autosuspend_expiration(dev) != 0) - goto repeat; - } else { - pm_runtime_cancel_pending(dev); - } - wake_up_all(&dev->power.wait_queue); - goto out; - } no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags) trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; + + fail: + __update_runtime_status(dev, RPM_ACTIVE); + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + dev->power.deferred_resume = false; + if (retval == -EAGAIN || retval == -EBUSY) { + dev->power.runtime_error = 0; + + /* + * If the callback routine failed an autosuspend, and + * if the last_busy time has been updated so that there + * is a new autosuspend expiration time, automatically + * reschedule another autosuspend. + */ + if ((rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + } else { + pm_runtime_cancel_pending(dev); + } + wake_up_all(&dev->power.wait_queue); + goto out; } /** @@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags) if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + __update_runtime_status(dev, RPM_RESUMING); if (dev->pm_domain) @@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_resume; + retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); @@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev) setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, (unsigned long)dev); + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + init_waitqueue_head(&dev->power.wait_queue); } @@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put_sync(dev->parent); } + +/** + * pm_runtime_update_max_time_suspended - Update device's suspend time data. + * @dev: Device to handle. + * @delta_ns: Value to subtract from the device's max_time_suspended_ns field. + * + * Update the device's power.max_time_suspended_ns field by subtracting + * @delta_ns from it. The resulting value of power.max_time_suspended_ns is + * never negative. + */ +void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) { + if (dev->power.max_time_suspended_ns > delta_ns) + dev->power.max_time_suspended_ns -= delta_ns; + else + dev->power.max_time_suspended_ns = 0; + } + + spin_unlock_irqrestore(&dev->power.lock, flags); +} |