summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acoutput.h8
-rw-r--r--include/acpi/acpi_bus.h29
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/processor.h5
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/cpufreq.h54
-rw-r--r--include/linux/cpuidle.h6
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device.h22
-rw-r--r--include/linux/freezer.h171
-rw-r--r--include/linux/memory_hotplug.h14
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/suspend.h1
-rw-r--r--include/trace/events/power.h173
-rw-r--r--include/xen/acpi.h16
18 files changed, 422 insertions, 105 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 14ceff788c40..1c16f821434f 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -219,8 +219,8 @@
*
*****************************************************************************/
-#define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */
-#define ACPI_DB_LINE_BUFFER_SIZE 512
+#define ACPI_DEBUGGER_MAX_ARGS ACPI_METHOD_NUM_ARGS + 4 /* Max command line arguments */
+#define ACPI_DB_LINE_BUFFER_SIZE 512
#define ACPI_DEBUGGER_COMMAND_PROMPT '-'
#define ACPI_DEBUGGER_EXECUTE_PROMPT '%'
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 4f52ea795c7a..4607b027a657 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -428,27 +428,21 @@
* This is the non-debug case -- make everything go away,
* leaving no executable debug code!
*/
-#define ACPI_FUNCTION_NAME(a)
#define ACPI_DEBUG_PRINT(pl)
#define ACPI_DEBUG_PRINT_RAW(pl)
#define ACPI_DEBUG_EXEC(a)
#define ACPI_DEBUG_ONLY_MEMBERS(a)
+#define ACPI_FUNCTION_NAME(a)
#define ACPI_FUNCTION_TRACE(a)
#define ACPI_FUNCTION_TRACE_PTR(a, b)
#define ACPI_FUNCTION_TRACE_U32(a, b)
#define ACPI_FUNCTION_TRACE_STR(a, b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
#define ACPI_FUNCTION_ENTRY()
#define ACPI_DUMP_STACK_ENTRY(a)
#define ACPI_DUMP_OPERANDS(a, b, c)
#define ACPI_DUMP_ENTRY(a, b)
-#define ACPI_DUMP_TABLES(a, b)
#define ACPI_DUMP_PATHNAME(a, b, c, d)
#define ACPI_DUMP_BUFFER(a, b)
-#define ACPI_DEBUG_PRINT(pl)
-#define ACPI_DEBUG_PRINT_RAW(pl)
#define ACPI_IS_DEBUG_ENABLED(level, component) 0
/* Return macros must have a return statement at the minimum */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c13c919ab99e..ca081ace2a1d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -63,13 +63,6 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
#define ACPI_BUS_FILE_ROOT "acpi"
extern struct proc_dir_entry *acpi_root_dir;
-enum acpi_bus_removal_type {
- ACPI_BUS_REMOVAL_NORMAL = 0,
- ACPI_BUS_REMOVAL_EJECT,
- ACPI_BUS_REMOVAL_SUPRISE,
- ACPI_BUS_REMOVAL_TYPE_COUNT
-};
-
enum acpi_bus_device_type {
ACPI_BUS_TYPE_DEVICE = 0,
ACPI_BUS_TYPE_POWER,
@@ -163,12 +156,10 @@ struct acpi_device_flags {
u32 dynamic_status:1;
u32 removable:1;
u32 ejectable:1;
- u32 suprise_removal_ok:1;
u32 power_manageable:1;
- u32 performance_manageable:1;
u32 eject_pending:1;
u32 match_driver:1;
- u32 reserved:24;
+ u32 reserved:26;
};
/* File System */
@@ -286,6 +277,7 @@ struct acpi_device_physical_node {
u8 node_id;
struct list_head node;
struct device *dev;
+ bool put_online:1;
};
/* set maximum of physical nodes to 32 for expansibility */
@@ -310,7 +302,6 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct device dev;
- enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
u8 physical_node_count;
struct list_head physical_node_list;
struct mutex physical_node_lock;
@@ -468,8 +459,6 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
acpi_notify_handler handler, void *context);
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
acpi_notify_handler handler);
-int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
- u32 target_state, int d_max_in, int *d_min_p);
int acpi_pm_device_sleep_state(struct device *, int *, int);
void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
@@ -485,23 +474,13 @@ static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
{
return AE_SUPPORT;
}
-static inline int __acpi_device_power_state(int m, int *p)
+static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
{
if (p)
*p = ACPI_STATE_D0;
+
return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0;
}
-static inline int acpi_device_power_state(struct device *dev,
- struct acpi_device *adev,
- u32 target_state, int d_max_in,
- int *d_min_p)
-{
- return __acpi_device_power_state(d_max_in, d_min_p);
-}
-static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
-{
- return __acpi_device_power_state(m, p);
-}
static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
struct device *depdev) {}
static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 454881e6450a..1b09300810e6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20130328
+#define ACPI_CA_VERSION 0x20130517
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -80,6 +80,7 @@ extern bool acpi_gbl_enable_aml_debug_object;
extern u8 acpi_gbl_copy_dsdt_locally;
extern u8 acpi_gbl_truncate_io_addresses;
extern u8 acpi_gbl_disable_auto_repair;
+extern u8 acpi_gbl_disable_ssdt_table_load;
/*
* Hardware-reduced prototypes. All interfaces that use these macros will
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index ea69367fdd3b..66096d06925e 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -6,6 +6,10 @@
#include <linux/thermal.h>
#include <asm/acpi.h>
+#define ACPI_PROCESSOR_CLASS "processor"
+#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
+#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
+
#define ACPI_PROCESSOR_BUSY_METRIC 10
#define ACPI_PROCESSOR_MAX_POWER 8
@@ -207,6 +211,7 @@ struct acpi_processor {
struct acpi_processor_throttling throttling;
struct acpi_processor_limit limit;
struct thermal_cooling_device *cdev;
+ struct device *dev; /* Processor device. */
};
struct acpi_processor_errata {
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 17b5b5967641..353ba256f368 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -352,8 +352,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
- (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
- defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) && \
+ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
defined(CONFIG_ACPI_CONTAINER)
#define ACPI_HOTPLUG_OST
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 037d36ae63e5..4d7390bc1727 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1,8 +1,8 @@
/*
- * linux/include/linux/cpufreq.h
+ * linux/include/linux/cpufreq.h
*
- * Copyright (C) 2001 Russell King
- * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,7 +26,6 @@
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
-
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
@@ -71,6 +70,10 @@ struct cpufreq_governor;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
+int cpufreq_get_global_kobject(void);
+void cpufreq_put_global_kobject(void);
+int cpufreq_sysfs_create_file(const struct attribute *attr);
+void cpufreq_sysfs_remove_file(const struct attribute *attr);
#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo {
@@ -107,6 +110,7 @@ struct cpufreq_policy {
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
void *governor_data;
+ bool governor_enabled; /* governor start/stop flag */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
@@ -115,6 +119,7 @@ struct cpufreq_policy {
struct kobject kobj;
struct completion kobj_unregister;
+ bool transition_ongoing; /* Tracks transition status */
};
#define CPUFREQ_ADJUST (0)
@@ -148,17 +153,18 @@ struct cpufreq_freqs {
u8 flags; /* flags of cpufreq_driver, see below. */
};
-
/**
- * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe)
+ * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
+ * safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
- * new = old * mult / div
+ * new = old * mult / div
*/
-static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult)
+static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
+ u_int mult)
{
#if BITS_PER_LONG == 32
@@ -211,14 +217,12 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
-
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
unsigned int cpu);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
-
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
*********************************************************************/
@@ -229,7 +233,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
struct freq_attr;
struct cpufreq_driver {
- struct module *owner;
+ struct module *owner;
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
@@ -277,11 +281,11 @@ struct cpufreq_driver {
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
-
void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state);
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max)
{
if (policy->min < min)
policy->min = min;
@@ -337,12 +341,16 @@ const char *cpufreq_get_current_driver(void);
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
*********************************************************************/
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#ifdef CONFIG_CPU_FREQ
-/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
+/*
+ * query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
+ */
unsigned int cpufreq_get(unsigned int cpu);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
@@ -351,7 +359,9 @@ static inline unsigned int cpufreq_get(unsigned int cpu)
}
#endif
-/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
+/*
+ * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
+ */
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
@@ -366,16 +376,14 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
#endif
-
/*********************************************************************
* CPUFREQ DEFAULT GOVERNOR *
*********************************************************************/
-
/*
- Performance governor is fallback governor if any other gov failed to
- auto load due latency restrictions
-*/
+ * Performance governor is fallback governor if any other gov failed to auto
+ * load due latency restrictions
+ */
#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
extern struct cpufreq_governor cpufreq_gov_performance;
#endif
@@ -395,7 +403,6 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
#endif
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
@@ -404,7 +411,7 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
#define CPUFREQ_TABLE_END ~1
struct cpufreq_frequency_table {
- unsigned int index; /* any */
+ unsigned int driver_data; /* driver specific data, not used by core */
unsigned int frequency; /* kHz - doesn't need to be in ascending
* order */
};
@@ -432,4 +439,7 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
+
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
+
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f0406230a0a..0bc4b74668e9 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -111,6 +111,9 @@ struct cpuidle_driver {
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;
+
+ /* the driver handles the cpus in cpumask */
+ struct cpumask *cpumask;
};
#ifdef CONFIG_CPU_IDLE
@@ -135,9 +138,6 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
-extern int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu);
-extern void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu);
-
#else
static inline void disable_cpuidle(void) { }
static inline int cpuidle_idle_call(void) { return -ENODEV; }
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 21ca773f77bf..822c1354f3a6 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -51,7 +51,7 @@ struct task_struct;
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
#else
static inline void debug_show_all_locks(void)
{
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
}
static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
{
}
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index fe8c4476f7e4..5f1ab92107e6 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -181,6 +181,8 @@ extern struct devfreq *devfreq_add_device(struct device *dev,
const char *governor_name,
void *data);
extern int devfreq_remove_device(struct devfreq *devfreq);
+
+/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d4835a8f8b8..bcf8c0d4cd98 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -71,6 +71,10 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* the specific driver's probe to initial the matched device.
* @remove: Called when a device removed from this bus.
* @shutdown: Called at shut-down time to quiesce the device.
+ *
+ * @online: Called to put the device back online (after offlining it).
+ * @offline: Called to put the device offline for hot-removal. May fail.
+ *
* @suspend: Called when a device on this bus wants to go to sleep mode.
* @resume: Called to bring a device on this bus out of sleep mode.
* @pm: Power management operations of this bus, callback the specific
@@ -105,6 +109,9 @@ struct bus_type {
int (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
+ int (*online)(struct device *dev);
+ int (*offline)(struct device *dev);
+
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
@@ -652,6 +659,9 @@ struct acpi_dev_node {
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
*
+ * @offline_disabled: If set, the device is permanently online.
+ * @offline: Set after successful invocation of bus type's .offline().
+ *
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
* that the device model core needs to model the system. Most subsystems,
@@ -723,6 +733,9 @@ struct device {
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
+
+ bool offline_disabled:1;
+ bool offline:1;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -859,6 +872,15 @@ extern const char *device_get_devnode(struct device *dev,
extern void *dev_get_drvdata(const struct device *dev);
extern int dev_set_drvdata(struct device *dev, void *data);
+static inline bool device_supports_offline(struct device *dev)
+{
+ return dev->bus && dev->bus->offline && dev->bus->online;
+}
+
+extern void lock_device_hotplug(void);
+extern void unlock_device_hotplug(void);
+extern int device_offline(struct device *dev);
+extern int device_online(struct device *dev);
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index e70df40d84f6..7fd81b8c4897 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -3,6 +3,7 @@
#ifndef FREEZER_H_INCLUDED
#define FREEZER_H_INCLUDED
+#include <linux/debug_locks.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
@@ -46,7 +47,11 @@ extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
-static inline bool try_to_freeze(void)
+/*
+ * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
+ * If try_to_freeze causes a lockdep warning it means the caller may deadlock
+ */
+static inline bool try_to_freeze_unsafe(void)
{
might_sleep();
if (likely(!freezing(current)))
@@ -54,6 +59,13 @@ static inline bool try_to_freeze(void)
return __refrigerator(false);
}
+static inline bool try_to_freeze(void)
+{
+ if (!(current->flags & PF_NOFREEZE))
+ debug_check_no_locks_held();
+ return try_to_freeze_unsafe();
+}
+
extern bool freeze_task(struct task_struct *p);
extern bool set_freezable(void);
@@ -115,6 +127,14 @@ static inline void freezer_count(void)
try_to_freeze();
}
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezer_count_unsafe(void)
+{
+ current->flags &= ~PF_FREEZER_SKIP;
+ smp_mb();
+ try_to_freeze_unsafe();
+}
+
/**
* freezer_should_skip - whether to skip a task when determining frozen
* state is reached
@@ -139,28 +159,86 @@ static inline bool freezer_should_skip(struct task_struct *p)
}
/*
- * These macros are intended to be used whenever you want allow a sleeping
+ * These functions are intended to be used whenever you want allow a sleeping
* task to be frozen. Note that neither return any clear indication of
* whether a freeze event happened while in this function.
*/
/* Like schedule(), but should not block the freezer. */
-#define freezable_schedule() \
-({ \
- freezer_do_not_count(); \
- schedule(); \
- freezer_count(); \
-})
+static inline void freezable_schedule(void)
+{
+ freezer_do_not_count();
+ schedule();
+ freezer_count();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezable_schedule_unsafe(void)
+{
+ freezer_do_not_count();
+ schedule();
+ freezer_count_unsafe();
+}
+
+/*
+ * Like freezable_schedule_timeout(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout(timeout);
+ freezer_count();
+ return __retval;
+}
+
+/*
+ * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout_interruptible(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_interruptible(timeout);
+ freezer_count();
+ return __retval;
+}
/* Like schedule_timeout_killable(), but should not block the freezer. */
-#define freezable_schedule_timeout_killable(timeout) \
-({ \
- long __retval; \
- freezer_do_not_count(); \
- __retval = schedule_timeout_killable(timeout); \
- freezer_count(); \
- __retval; \
-})
+static inline long freezable_schedule_timeout_killable(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_killable(timeout);
+ freezer_count();
+ return __retval;
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_killable(timeout);
+ freezer_count_unsafe();
+ return __retval;
+}
+
+/*
+ * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
+ unsigned long delta, const enum hrtimer_mode mode)
+{
+ int __retval;
+ freezer_do_not_count();
+ __retval = schedule_hrtimeout_range(expires, delta, mode);
+ freezer_count();
+ return __retval;
+}
/*
* Freezer-friendly wrappers around wait_event_interruptible(),
@@ -177,33 +255,45 @@ static inline bool freezer_should_skip(struct task_struct *p)
__retval; \
})
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+#define wait_event_freezekillable_unsafe(wq, condition) \
+({ \
+ int __retval; \
+ freezer_do_not_count(); \
+ __retval = wait_event_killable(wq, (condition)); \
+ freezer_count_unsafe(); \
+ __retval; \
+})
+
#define wait_event_freezable(wq, condition) \
({ \
int __retval; \
- for (;;) { \
- __retval = wait_event_interruptible(wq, \
- (condition) || freezing(current)); \
- if (__retval || (condition)) \
- break; \
- try_to_freeze(); \
- } \
+ freezer_do_not_count(); \
+ __retval = wait_event_interruptible(wq, (condition)); \
+ freezer_count(); \
__retval; \
})
#define wait_event_freezable_timeout(wq, condition, timeout) \
({ \
long __retval = timeout; \
- for (;;) { \
- __retval = wait_event_interruptible_timeout(wq, \
- (condition) || freezing(current), \
- __retval); \
- if (__retval <= 0 || (condition)) \
- break; \
- try_to_freeze(); \
- } \
+ freezer_do_not_count(); \
+ __retval = wait_event_interruptible_timeout(wq, (condition), \
+ __retval); \
+ freezer_count(); \
__retval; \
})
+#define wait_event_freezable_exclusive(wq, condition) \
+({ \
+ int __retval; \
+ freezer_do_not_count(); \
+ __retval = wait_event_interruptible_exclusive(wq, condition); \
+ freezer_count(); \
+ __retval; \
+})
+
+
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -225,18 +315,37 @@ static inline void set_freezable(void) {}
#define freezable_schedule() schedule()
+#define freezable_schedule_unsafe() schedule()
+
+#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
+
+#define freezable_schedule_timeout_interruptible(timeout) \
+ schedule_timeout_interruptible(timeout)
+
#define freezable_schedule_timeout_killable(timeout) \
schedule_timeout_killable(timeout)
+#define freezable_schedule_timeout_killable_unsafe(timeout) \
+ schedule_timeout_killable(timeout)
+
+#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
+ schedule_hrtimeout_range(expires, delta, mode)
+
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
#define wait_event_freezable_timeout(wq, condition, timeout) \
wait_event_interruptible_timeout(wq, condition, timeout)
+#define wait_event_freezable_exclusive(wq, condition) \
+ wait_event_interruptible_exclusive(wq, condition)
+
#define wait_event_freezekillable(wq, condition) \
wait_event_killable(wq, condition)
+#define wait_event_freezekillable_unsafe(wq, condition) \
+ wait_event_killable(wq, condition)
+
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 3e622c610925..dd38e62b84d2 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -234,6 +234,8 @@ static inline void unlock_memory_hotplug(void) {}
extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
+extern void remove_memory(int nid, u64 start, u64 size);
#else
static inline int is_mem_section_removable(unsigned long pfn,
@@ -243,15 +245,23 @@ static inline int is_mem_section_removable(unsigned long pfn,
}
static inline void try_offline_node(int nid) {}
+
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+{
+ return -EINVAL;
+}
+
+static inline void remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
+ void *arg, int (*func)(struct memory_block *, void *));
extern int mem_online_node(int nid);
extern int add_memory(int nid, u64 start, u64 size);
extern int arch_add_memory(int nid, u64 start, u64 size);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern int offline_memory_block(struct memory_block *mem);
extern bool is_memblock_offlined(struct memory_block *mem);
-extern int remove_memory(int nid, u64 start, u64 size);
+extern void remove_memory(int nid, u64 start, u64 size);
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
int nr_pages);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7d7e09efff9b..6fa7cea25da9 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -37,7 +37,6 @@ extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
-extern int pm_generic_runtime_idle(struct device *dev);
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern void pm_runtime_no_callbacks(struct device *dev);
@@ -143,7 +142,6 @@ static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
-static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index c0f44c2b006d..d8b187c3925d 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -299,11 +299,11 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
/* Smartreflex driver hooks to be called from Smartreflex class driver */
-int sr_enable(struct voltagedomain *voltdm, unsigned long volt);
-void sr_disable(struct voltagedomain *voltdm);
-int sr_configure_errgen(struct voltagedomain *voltdm);
-int sr_disable_errgen(struct voltagedomain *voltdm);
-int sr_configure_minmax(struct voltagedomain *voltdm);
+int sr_enable(struct omap_sr *sr, unsigned long volt);
+void sr_disable(struct omap_sr *sr);
+int sr_configure_errgen(struct omap_sr *sr);
+int sr_disable_errgen(struct omap_sr *sr);
+int sr_configure_minmax(struct omap_sr *sr);
/* API to register the smartreflex class driver with the smartreflex driver */
int sr_register_class(struct omap_sr_class_data *class_data);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d4e3f16d5e89..f73cabf59012 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -363,6 +363,7 @@ extern bool pm_wakeup_pending(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
+extern void pm_print_active_wakeup_sources(void);
static inline void lock_system_sleep(void)
{
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 427acab5d69a..8e42410bd159 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -5,6 +5,7 @@
#define _TRACE_POWER_H
#include <linux/ktime.h>
+#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(cpu,
@@ -177,6 +178,178 @@ DEFINE_EVENT(power_domain, power_domain_target,
TP_ARGS(name, state, cpu_id)
);
+
+/*
+ * The pm qos events are used for pm qos update
+ */
+DECLARE_EVENT_CLASS(pm_qos_request,
+
+ TP_PROTO(int pm_qos_class, s32 value),
+
+ TP_ARGS(pm_qos_class, value),
+
+ TP_STRUCT__entry(
+ __field( int, pm_qos_class )
+ __field( s32, value )
+ ),
+
+ TP_fast_assign(
+ __entry->pm_qos_class = pm_qos_class;
+ __entry->value = value;
+ ),
+
+ TP_printk("pm_qos_class=%s value=%d",
+ __print_symbolic(__entry->pm_qos_class,
+ { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
+ { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
+ { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
+ __entry->value)
+);
+
+DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
+
+ TP_PROTO(int pm_qos_class, s32 value),
+
+ TP_ARGS(pm_qos_class, value)
+);
+
+DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
+
+ TP_PROTO(int pm_qos_class, s32 value),
+
+ TP_ARGS(pm_qos_class, value)
+);
+
+DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
+
+ TP_PROTO(int pm_qos_class, s32 value),
+
+ TP_ARGS(pm_qos_class, value)
+);
+
+TRACE_EVENT(pm_qos_update_request_timeout,
+
+ TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
+
+ TP_ARGS(pm_qos_class, value, timeout_us),
+
+ TP_STRUCT__entry(
+ __field( int, pm_qos_class )
+ __field( s32, value )
+ __field( unsigned long, timeout_us )
+ ),
+
+ TP_fast_assign(
+ __entry->pm_qos_class = pm_qos_class;
+ __entry->value = value;
+ __entry->timeout_us = timeout_us;
+ ),
+
+ TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
+ __print_symbolic(__entry->pm_qos_class,
+ { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
+ { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
+ { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
+ __entry->value, __entry->timeout_us)
+);
+
+DECLARE_EVENT_CLASS(pm_qos_update,
+
+ TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
+
+ TP_ARGS(action, prev_value, curr_value),
+
+ TP_STRUCT__entry(
+ __field( enum pm_qos_req_action, action )
+ __field( int, prev_value )
+ __field( int, curr_value )
+ ),
+
+ TP_fast_assign(
+ __entry->action = action;
+ __entry->prev_value = prev_value;
+ __entry->curr_value = curr_value;
+ ),
+
+ TP_printk("action=%s prev_value=%d curr_value=%d",
+ __print_symbolic(__entry->action,
+ { PM_QOS_ADD_REQ, "ADD_REQ" },
+ { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
+ { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
+ __entry->prev_value, __entry->curr_value)
+);
+
+DEFINE_EVENT(pm_qos_update, pm_qos_update_target,
+
+ TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
+
+ TP_ARGS(action, prev_value, curr_value)
+);
+
+DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags,
+
+ TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
+
+ TP_ARGS(action, prev_value, curr_value),
+
+ TP_printk("action=%s prev_value=0x%x curr_value=0x%x",
+ __print_symbolic(__entry->action,
+ { PM_QOS_ADD_REQ, "ADD_REQ" },
+ { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
+ { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
+ __entry->prev_value, __entry->curr_value)
+);
+
+DECLARE_EVENT_CLASS(dev_pm_qos_request,
+
+ TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
+ s32 new_value),
+
+ TP_ARGS(name, type, new_value),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( enum dev_pm_qos_req_type, type )
+ __field( s32, new_value )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->type = type;
+ __entry->new_value = new_value;
+ ),
+
+ TP_printk("device=%s type=%s new_value=%d",
+ __get_str(name),
+ __print_symbolic(__entry->type,
+ { DEV_PM_QOS_LATENCY, "DEV_PM_QOS_LATENCY" },
+ { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
+ __entry->new_value)
+);
+
+DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request,
+
+ TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
+ s32 new_value),
+
+ TP_ARGS(name, type, new_value)
+);
+
+DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request,
+
+ TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
+ s32 new_value),
+
+ TP_ARGS(name, type, new_value)
+);
+
+DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
+
+ TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
+ s32 new_value),
+
+ TP_ARGS(name, type, new_value)
+);
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 68d73d09b770..46aa3d1c1654 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -78,11 +78,25 @@ static inline int xen_acpi_get_pxm(acpi_handle h)
int xen_acpi_notify_hypervisor_state(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
+static inline int xen_acpi_suspend_lowlevel(void)
+{
+ /*
+ * Xen will save and restore CPU context, so
+ * we can skip that and just go straight to
+ * the suspend.
+ */
+ acpi_enter_sleep_state(ACPI_STATE_S3);
+ return 0;
+}
+
static inline void xen_acpi_sleep_register(void)
{
- if (xen_initial_domain())
+ if (xen_initial_domain()) {
acpi_os_set_prepare_sleep(
&xen_acpi_notify_hypervisor_state);
+
+ acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
+ }
}
#else
static inline void xen_acpi_sleep_register(void)