summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c77
1 files changed, 37 insertions, 40 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 57db122a4f62..addbbe8028c2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,6 +58,8 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DECLARE_COMPLETION(cpu_running);
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
ret = boot_secondary(cpu, idle);
if (ret == 0) {
- unsigned long timeout;
-
/*
* CPU was successfully started, wait for it
* to come online or time out.
*/
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout)) {
- if (cpu_online(cpu))
- break;
-
- udelay(10);
- barrier();
- }
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -233,20 +227,6 @@ void __ref cpu_die(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpu_logical_map[NR_CPUS];
-
-void __init smp_setup_processor_id(void)
-{
- int i;
- u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
-
- cpu_logical_map(0) = cpu;
- for (i = 1; i < NR_CPUS; ++i)
- cpu_logical_map(i) = i == cpu ? 0 : i;
-
- printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
-}
-
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
@@ -260,6 +240,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
+static void percpu_timer_setup(void);
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -300,22 +282,16 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
- * before we continue.
+ * before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+ complete(&cpu_running);
/*
* Setup the percpu timer for this CPU.
*/
percpu_timer_setup();
- while (!cpu_active(cpu))
- cpu_relax();
-
- /*
- * cpu_active bit is set, so it's safe to enalbe interrupts
- * now.
- */
local_irq_enable();
local_fiq_enable();
@@ -373,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* re-initialize the map in platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug).
*/
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
/*
* Initialise the SCU if there are more than one CPU
@@ -443,9 +419,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
static void ipi_timer(void)
{
struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
- irq_enter();
evt->event_handler(evt);
- irq_exit();
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -475,7 +449,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
clockevents_register_device(evt);
}
-void __cpuinit percpu_timer_setup(void)
+static struct local_timer_ops *lt_ops;
+
+#ifdef CONFIG_LOCAL_TIMERS
+int local_timer_register(struct local_timer_ops *ops)
+{
+ if (lt_ops)
+ return -EBUSY;
+
+ lt_ops = ops;
+ return 0;
+}
+#endif
+
+static void __cpuinit percpu_timer_setup(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
@@ -483,7 +470,7 @@ void __cpuinit percpu_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
evt->broadcast = smp_timer_broadcast;
- if (local_timer_setup(evt))
+ if (!lt_ops || lt_ops->setup(evt))
broadcast_timer_setup(evt);
}
@@ -498,7 +485,8 @@ static void percpu_timer_stop(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- local_timer_stop(evt);
+ if (lt_ops)
+ lt_ops->stop(evt);
}
#endif
@@ -548,7 +536,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
switch (ipinr) {
case IPI_TIMER:
+ irq_enter();
ipi_timer();
+ irq_exit();
break;
case IPI_RESCHEDULE:
@@ -556,15 +546,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
+ irq_enter();
generic_smp_call_function_interrupt();
+ irq_exit();
break;
case IPI_CALL_FUNC_SINGLE:
+ irq_enter();
generic_smp_call_function_single_interrupt();
+ irq_exit();
break;
case IPI_CPU_STOP:
+ irq_enter();
ipi_cpu_stop(cpu);
+ irq_exit();
break;
default:
@@ -585,8 +581,9 @@ void smp_send_stop(void)
unsigned long timeout;
if (num_online_cpus() > 1) {
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
+ struct cpumask mask;
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}