summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/process.c36
-rw-r--r--arch/x86/mm/pat.c2
2 files changed, 28 insertions, 10 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 67e9b4a1e89d..ba370dc8685b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -99,15 +99,6 @@ static void mwait_idle(void)
local_irq_enable();
}
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
- if (force_mwait)
- return 1;
- /* Any C1 states supported? */
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
@@ -119,6 +110,33 @@ static void poll_idle(void)
cpu_relax();
}
+/*
+ * mwait selection logic:
+ *
+ * It depends on the CPU. For AMD CPUs that support MWAIT this is
+ * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
+ * then depend on a clock divisor and current Pstate of the core. If
+ * all cores of a processor are in halt state (C1) the processor can
+ * enter the C1E (C1 enhanced) state. If mwait is used this will never
+ * happen.
+ *
+ * idle=mwait overrides this decision and forces the usage of mwait.
+ */
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+ if (force_mwait)
+ return 1;
+
+ if (c->x86_vendor == X86_VENDOR_AMD) {
+ switch(c->x86) {
+ case 0x10:
+ case 0x11:
+ return 0;
+ }
+ }
+ return 1;
+}
+
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
static int selected;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bcb1a8e4b2db..de3a99812450 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -28,7 +28,7 @@
#ifdef CONFIG_X86_PAT
int __read_mostly pat_wc_enabled = 1;
-void __init pat_disable(char *reason)
+void __cpuinit pat_disable(char *reason)
{
pat_wc_enabled = 0;
printk(KERN_INFO "%s\n", reason);