summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorAlex Nixon <alex.nixon@citrix.com>2008-08-22 11:52:15 +0100
committerIngo Molnar <mingo@elte.hu>2008-08-25 11:25:14 +0200
commitd68d82afd4c88e25763b23cd9cd4974573a3706f (patch)
tree42a3fb93a5cef70db7ad01fda1ed0dc68dbe6110 /arch/x86/xen
parent8227dce7dc2cfdcc28ee0eadfb482a7ee77fba03 (diff)
xen: implement CPU hotplugging
Note the changes from 2.6.18-xen CPU hotplugging: A vcpu_down request from the remote admin via Xenbus both hotunplugs the CPU, and disables it by removing it from the cpu_present map, and removing its entry in /sys. A vcpu_up request from the remote admin only re-enables the CPU, and does not immediately bring the CPU up. A udev event is emitted, which can be caught by the user if he wishes to automatically re-up CPUs when available, or implement a more complex policy. Signed-off-by: Alex Nixon <alex.nixon@citrix.com> Acked-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/smp.c60
-rw-r--r--arch/x86/xen/spinlock.c5
-rw-r--r--arch/x86/xen/time.c8
-rw-r--r--arch/x86/xen/xen-ops.h6
4 files changed, 67 insertions, 12 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index baca7f2fbd8a..be5cbb2b7c60 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -11,8 +11,6 @@
* useful topology information for the kernel to make use of. As a
* result, all CPUs are treated as if they're single-core and
* single-threaded.
- *
- * This does not handle HOTPLUG_CPU yet.
*/
#include <linux/sched.h>
#include <linux/err.h>
@@ -61,11 +59,12 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static __cpuinit void cpu_bringup_and_idle(void)
+static __cpuinit void cpu_bringup(void)
{
int cpu = smp_processor_id();
cpu_init();
+ touch_softlockup_watchdog();
preempt_disable();
xen_enable_sysenter();
@@ -86,6 +85,11 @@ static __cpuinit void cpu_bringup_and_idle(void)
local_irq_enable();
wmb(); /* make sure everything is out */
+}
+
+static __cpuinit void cpu_bringup_and_idle(void)
+{
+ cpu_bringup();
cpu_idle();
}
@@ -209,8 +213,6 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
cpu_set(cpu, cpu_present_map);
}
-
- //init_xenbus_allowed_cpumask();
}
static __cpuinit int
@@ -278,12 +280,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
struct task_struct *idle = idle_task(cpu);
int rc;
-#if 0
- rc = cpu_up_check(cpu);
- if (rc)
- return rc;
-#endif
-
#ifdef CONFIG_X86_64
/* Allocate node local memory for AP pdas */
WARN_ON(cpu == 0);
@@ -336,6 +332,42 @@ static void xen_smp_cpus_done(unsigned int max_cpus)
{
}
+int xen_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ cpu_disable_common();
+
+ load_cr3(swapper_pg_dir);
+ return 0;
+}
+
+void xen_cpu_die(unsigned int cpu)
+{
+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(HZ/10);
+ }
+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+
+ if (num_online_cpus() == 1)
+ alternatives_smp_switch(0);
+}
+
+void xen_play_dead(void)
+{
+ play_dead_common();
+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ cpu_bringup();
+}
+
static void stop_self(void *v)
{
int cpu = smp_processor_id();
@@ -419,9 +451,13 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
static const struct smp_ops xen_smp_ops __initdata = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
- .cpu_up = xen_cpu_up,
.smp_cpus_done = xen_smp_cpus_done,
+ .cpu_up = xen_cpu_up,
+ .cpu_die = xen_cpu_die,
+ .cpu_disable = xen_cpu_disable,
+ .play_dead = xen_play_dead,
+
.smp_send_stop = xen_smp_send_stop,
.smp_send_reschedule = xen_smp_send_reschedule,
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d072823bc06d..dd71e3a021cd 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -357,6 +357,11 @@ void __cpuinit xen_init_lock_cpu(int cpu)
printk("cpu %d spinlock event irq %d\n", cpu, irq);
}
+void xen_uninit_lock_cpu(int cpu)
+{
+ unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+}
+
void __init xen_init_spinlocks(void)
{
pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 20182d9072c4..004ba86326ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -450,6 +450,14 @@ void xen_setup_timer(int cpu)
setup_runstate_info(cpu);
}
+void xen_teardown_timer(int cpu)
+{
+ struct clock_event_device *evt;
+ BUG_ON(cpu == 0);
+ evt = &per_cpu(xen_clock_events, cpu);
+ unbind_from_irqhandler(evt->irq, NULL);
+}
+
void xen_setup_cpu_clockevents(void)
{
BUG_ON(preemptible());
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e8bfdaa20d3..8dbd97fd7f18 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -34,6 +34,7 @@ void __init xen_build_dynamic_phys_to_machine(void);
void xen_init_irq_ops(void);
void xen_setup_timer(int cpu);
+void xen_teardown_timer(int cpu);
cycle_t xen_clocksource_read(void);
void xen_setup_cpu_clockevents(void);
unsigned long xen_tsc_khz(void);
@@ -50,11 +51,16 @@ void xen_mark_init_mm_pinned(void);
void __init xen_setup_vcpu_info_placement(void);
+void xen_play_dead(void);
+void xen_cpu_die(unsigned int cpu);
+int xen_cpu_disable(void);
+
#ifdef CONFIG_SMP
void xen_smp_init(void);
void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu);
+void xen_uninit_lock_cpu(int cpu);
extern cpumask_t xen_cpu_initialized_map;
#else