summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-02-11 17:16:04 -0200
committerIngo Molnar <mingo@elte.hu>2008-04-17 17:40:47 +0200
commit96597fd2be7070631ad0776cd8bced21415fd5e3 (patch)
treef592ab1b01a1b10d032a44f0c2137693ec73df04 /arch/x86
parent2785c8d052278228cc3806233c09295088f83d42 (diff)
x86: introduce vsmp paravirt helpers
Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalemp.com> Acked-by: Shai Fultheim <shai@scalemp.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/vsmp_64.c56
2 files changed, 58 insertions, 1 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6c70fed0f9a0..ed7f72b39113 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -328,7 +328,8 @@ config X86_RDC321X
config X86_VSMP
bool "Support for ScaleMP vSMP"
depends on X86_64 && PCI
- help
+ select PARAVIRT
+ help
Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
supposed to run on these EM64T-based machines. Only choose this option
if you have one of these machines.
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index fdf9fba6ba9c..b93ed66c754f 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -8,6 +8,8 @@
*
* Ravikiran Thirumalai <kiran@scalemp.com>,
* Shai Fultheim <shai@scalemp.com>
+ * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
+ * Ravikiran Thirumalai <kiran@scalemp.com>
*/
#include <linux/init.h>
@@ -15,6 +17,60 @@
#include <linux/pci_regs.h>
#include <asm/pci-direct.h>
#include <asm/io.h>
+#include <asm/paravirt.h>
+
+/*
+ * Interrupt control on vSMPowered systems:
+ * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
+ * and vice versa.
+ */
+
+static unsigned long vsmp_save_fl(void)
+{
+ unsigned long flags = native_save_fl();
+
+ if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
+ flags &= ~X86_EFLAGS_IF;
+ return flags;
+}
+
+static void vsmp_restore_fl(unsigned long flags)
+{
+ if (flags & X86_EFLAGS_IF)
+ flags &= ~X86_EFLAGS_AC;
+ else
+ flags |= X86_EFLAGS_AC;
+ native_restore_fl(flags);
+}
+
+static void vsmp_irq_disable(void)
+{
+ unsigned long flags = native_save_fl();
+
+ native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
+}
+
+static void vsmp_irq_enable(void)
+{
+ unsigned long flags = native_save_fl();
+
+ native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+}
+
+static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+ unsigned long addr, unsigned len)
+{
+ switch (type) {
+ case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
+ case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
+ case PARAVIRT_PATCH(pv_irq_ops.save_fl):
+ case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
+ return paravirt_patch_default(type, clobbers, ibuf, addr, len);
+ default:
+ return native_patch(type, clobbers, ibuf, addr, len);
+ }
+
+}
void __init vsmp_init(void)
{