summaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c92
1 files changed, 56 insertions, 36 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8175a6968c6b..f03d52d0b88d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -745,12 +745,21 @@ struct call_data_struct {
int wait;
};
-static DEFINE_SPINLOCK(call_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
@@ -759,7 +768,6 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
{
struct call_data_struct data;
int cpus;
- long timeout;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
@@ -777,31 +785,18 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
goto out_unlock;
call_data = &data;
+ mb();
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
- /*
- * Wait for other cpus to complete function or at
- * least snap the call data.
- */
- timeout = 1000000;
- while (atomic_read(&data.finished) != cpus) {
- if (--timeout <= 0)
- goto out_timeout;
- barrier();
- udelay(1);
- }
+ /* Wait for response */
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
out_unlock:
spin_unlock(&call_lock);
return 0;
-
-out_timeout:
- spin_unlock(&call_lock);
- printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
- cpus, atomic_read(&data.finished));
- return 0;
}
int smp_call_function(void (*func)(void *info), void *info,
@@ -1269,7 +1264,6 @@ void __init smp_tick_init(void)
boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset;
- cpu_set(boot_cpu_id, cpu_online_map);
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
@@ -1285,7 +1279,7 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
spin_lock_irqsave(&prof_setup_lock, flags);
- for_each_cpu(i)
+ for_each_possible_cpu(i)
prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier);
spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1293,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+static void __init smp_tune_scheduling(void)
+{
+ int instance, node;
+ unsigned int def, smallest = ~0U;
+
+ def = ((tlb_type == hypervisor) ?
+ (3 * 1024 * 1024) :
+ (4 * 1024 * 1024));
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, &node, NULL)) {
+ unsigned int val;
+
+ val = prom_getintdefault(node, "ecache-size", def);
+ if (val < smallest)
+ smallest = val;
+
+ instance++;
+ }
+
+ /* Any value less than 256K is nonsense. */
+ if (smallest < (256U * 1024U))
+ smallest = 256 * 1024;
+
+ max_cache_size = smallest;
+
+ if (smallest < 1U * 1024U * 1024U)
+ printk(KERN_INFO "Using max_cache_size of %uKB\n",
+ smallest / 1024U);
+ else
+ printk(KERN_INFO "Using max_cache_size of %uMB\n",
+ smallest / 1024U / 1024U);
+}
+
/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -1313,12 +1341,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
if (tlb_type == hypervisor) {
int j;
/* XXX get this mapping from machine description */
- for_each_cpu(j) {
+ for_each_possible_cpu(j) {
if ((j >> 2) == (i >> 2))
cpu_set(j, cpu_sibling_map[i]);
}
@@ -1328,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
smp_store_cpu_info(boot_cpu_id);
+ smp_tune_scheduling();
}
/* Set this up early so that things like the scheduler can init
@@ -1350,18 +1379,6 @@ void __init smp_setup_cpu_possible_map(void)
void __devinit smp_prepare_boot_cpu(void)
{
- int cpu = hard_smp_processor_id();
-
- if (cpu >= NR_CPUS) {
- prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
- prom_halt();
- }
-
- current_thread_info()->cpu = cpu;
- __local_per_cpu_offset = __per_cpu_offset(cpu);
-
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), phys_cpu_present_map);
}
int __devinit __cpu_up(unsigned int cpu)
@@ -1438,4 +1455,7 @@ void __init setup_per_cpu_areas(void)
for (i = 0; i < NR_CPUS; i++, ptr += size)
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+ /* Setup %g5 for the boot cpu. */
+ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
}