diff options
Diffstat (limited to 'arch/x86')
30 files changed, 140 insertions, 406 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2642b4bf41b9..96e0c2ebc388 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -170,6 +170,7 @@ config GENERIC_PENDING_IRQ config X86_SMP bool depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) + select USE_GENERIC_SMP_HELPERS default y config X86_32_SMP diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3e58b676d23b..a437d027f20b 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c @@ -1340,6 +1340,10 @@ void __init smp_intr_init(void) /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + + /* IPI for single call function */ + set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); } #endif diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 987410745182..c4a7ec31394c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -364,7 +364,7 @@ static void mcheck_check_cpu(void *info) static void mcheck_timer(struct work_struct *work) { - on_each_cpu(mcheck_check_cpu, NULL, 1, 1); + on_each_cpu(mcheck_check_cpu, NULL, 1); /* * Alert userspace if needed. If we logged an MCE, reduce the @@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, * Collect entries that were still getting written before the * synchronize. */ - on_each_cpu(collect_tscs, cpu_tsc, 1, 1); + on_each_cpu(collect_tscs, cpu_tsc, 1); for (i = next; i < MCE_LOG_LEN; i++) { if (mcelog.entry[i].finished && mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { @@ -746,7 +746,7 @@ static void mce_restart(void) if (next_interval) cancel_delayed_work(&mcheck_work); /* Timer race is harmless here */ - on_each_cpu(mce_init, NULL, 1, 1); + on_each_cpu(mce_init, NULL, 1); next_interval = check_interval * HZ; if (next_interval) schedule_delayed_work(&mcheck_work, diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c @@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); static void mce_work_fn(struct work_struct *work) { - on_each_cpu(mce_checkregs, NULL, 1, 1); + on_each_cpu(mce_checkregs, NULL, 1); schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); } diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 105afe12beb0..6f23969c8faf 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, atomic_set(&data.gate,0); /* Start the ball rolling on other CPUs */ - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) + if (smp_call_function(ipi_handler, &data, 0) != 0) panic("mtrr: timed out waiting for other CPUs\n"); local_irq_save(flags); @@ -1682,7 +1682,7 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); + smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); } static int __init mtrr_init_finialize(void) diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 2e9bef6e3aa3..6d4bdc02388a 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void) if (atomic_read(&nmi_active) <= 0) return; - on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); if (wd_ops) wd_ops->unreserve(); @@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void) return; } - on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); + on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); touch_nmi_watchdog(); } diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 71f1c2654bec..2de5fa2bbf77 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -96,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, for (; count; count -= 16) { cmd.eax = pos; cmd.ecx = pos >> 32; - smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); + smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); if (copy_to_user(tmp, &cmd, 16)) return -EFAULT; tmp += 16; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index ba41bf42748d..ae63e584c340 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -816,6 +816,9 @@ END(invalidate_interrupt\num) ENTRY(call_function_interrupt) apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt END(call_function_interrupt) +ENTRY(call_function_single_interrupt) + apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt +END(call_function_single_interrupt) ENTRY(irq_move_cleanup_interrupt) apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt END(irq_move_cleanup_interrupt) diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 603261a5885c..558abf4c796a 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -1569,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy) void print_all_local_APICs(void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void /*__init*/ print_PIC(void) diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index b16ef029cf88..6510cde36b35 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -1160,7 +1160,7 @@ void __apicdebuginit print_local_APIC(void * dummy) void print_all_local_APICs (void) { - on_each_cpu(print_local_APIC, NULL, 1, 1); + on_each_cpu(print_local_APIC, NULL, 1); } void __apicdebuginit print_PIC(void) diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31f49e8f46a7..0373e88de95a 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -199,6 +199,10 @@ void __init native_init_IRQ(void) /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + /* IPI for generic single function call */ + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); + /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); #endif diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 21f2bae98c15..a8449571858a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) load_LDT(pc); mask = cpumask_of_cpu(smp_processor_id()); if (!cpus_equal(current->mm->cpu_vm_mask, mask)) - smp_call_function(flush_ldt, current->mm, 1, 1); + smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else load_LDT(pc); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 716b89284be0..ec024b3baad0 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void) #ifdef CONFIG_SMP if (nmi_watchdog == NMI_LOCAL_APIC) - smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); + smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); #endif for_each_possible_cpu(cpu) @@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused) void acpi_nmi_enable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_enable, NULL, 1); } static void __acpi_nmi_disable(void *__unused) @@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused) void acpi_nmi_disable(void) { if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) - on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); + on_each_cpu(__acpi_nmi_disable, NULL, 1); } void setup_apic_nmi_watchdog(void *unused) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4061d63aabe7..7dceea947232 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -132,7 +132,7 @@ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ - smp_call_function(do_nothing, NULL, 0, 1); + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aadc87cd..361b7a4c640c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; - -void lock_ipi_call_lock(void) +void native_send_call_func_single_ipi(int cpu) { - spin_lock_irq(&call_lock); -} - -void unlock_ipi_call_lock(void) -{ - spin_unlock_irq(&call_lock); -} - -static struct call_data_struct *call_data; - -static void __smp_call_function(void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - struct call_data_struct data; - int cpus = num_online_cpus() - 1; - - if (!cpus) - return; - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); - - /* Send a message to all other CPUs and wait for them to respond */ - send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); + send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); } - -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int -native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +void native_send_call_func_ipi(cpumask_t mask) { - struct call_data_struct data; cpumask_t allbutself; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); - cpus_and(mask, mask, allbutself); - cpus = cpus_weight(mask); - - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - wmb(); - - /* Send a message to other CPUs */ if (cpus_equal(mask, allbutself) && cpus_equal(cpu_online_map, cpu_callout_map)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus) - cpu_relax(); - - if (wait) - while (atomic_read(&data.finished) != cpus) - cpu_relax(); - spin_unlock(&call_lock); - - return 0; } static void stop_this_cpu(void *dummy) @@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) static void native_smp_send_stop(void) { - int nolock; unsigned long flags; if (reboot_force) return; - /* Don't deadlock on the call lock in panic */ - nolock = !spin_trylock(&call_lock); + smp_call_function(stop_this_cpu, NULL, 0); local_irq_save(flags); - __smp_call_function(stop_this_cpu, NULL, 0, 0); - if (!nolock) - spin_unlock(&call_lock); disable_local_APIC(); local_irq_restore(flags); } @@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - ack_APIC_irq(); - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); #ifdef CONFIG_X86_32 __get_cpu_var(irq_stat).irq_call_count++; #else add_pda(irq_call_count, 1); #endif irq_exit(); +} - if (wait) { - mb(); - atomic_inc(&call_data->finished); - } +void smp_call_function_single_interrupt(struct pt_regs *regs) +{ + ack_APIC_irq(); + irq_enter(); + generic_smp_call_function_single_interrupt(); +#ifdef CONFIG_X86_32 + __get_cpu_var(irq_stat).irq_call_count++; +#else + add_pda(irq_call_count, 1); +#endif + irq_exit(); } struct smp_ops smp_ops = { @@ -338,7 +219,8 @@ struct smp_ops smp_ops = { .smp_send_stop = native_smp_send_stop, .smp_send_reschedule = native_smp_send_reschedule, - .smp_call_function_mask = native_smp_call_function_mask, + + .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops); - diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f35c2d8016ac..687376ab07e8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -327,12 +327,12 @@ static void __cpuinit start_secondary(void *unused) * lock helps us to not include this cpu in a currently in progress * smp_call_function(). */ - lock_ipi_call_lock(); + ipi_call_lock_irq(); #ifdef CONFIG_X86_IO_APIC setup_vector_irq(smp_processor_id()); #endif cpu_set(smp_processor_id(), cpu_online_map); - unlock_ipi_call_lock(); + ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; setup_secondary_clock(); diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064d141a..99941b37eca0 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c @@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) per_cpu(cpu_number, cpu) = cpu; } #endif - -/** - * smp_call_function(): Run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int smp_call_function(void (*func) (void *info), void *info, int nonatomic, - int wait) -{ - return smp_call_function_mask(cpu_online_map, func, info, wait); -} -EXPORT_SYMBOL(smp_call_function); - -/** - * smp_call_function_single - Run a function on a specific CPU - * @cpu: The target CPU. Cannot be the calling CPU. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: Unused. - * @wait: If true, wait until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - */ -int smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait) -{ - /* prevent preemption and reschedule on another processor */ - int ret; - int me = get_cpu(); - if (cpu == me) { - local_irq_disable(); - func(info); - local_irq_enable(); - put_cpu(); - return 0; - } - - ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); - - put_cpu(); - return ret; -} -EXPORT_SYMBOL(smp_call_function_single); diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 5039d0f097a2..dcbf7a1159ea 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, NULL, 1, 1); + on_each_cpu(do_flush_tlb_all, NULL, 1); } diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index e50740d32314..0b8b6690a86d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -279,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); + smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); return NOTIFY_DONE; } @@ -302,7 +302,7 @@ static int __init vsyscall_init(void) #ifdef CONFIG_SYSCTL register_sysctl_table(kernel_root_table2); #endif - on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); + on_each_cpu(cpu_vsyscall_init, NULL, 1); hotcpu_notifier(cpu_vsyscall_notifier, 0); return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 540e95179074..10ce6ee4c491 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) { if (vmx->vcpu.cpu == -1) return; - smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); + smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); vmx->launched = 0; } @@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->vmcs) { - on_each_cpu(__vcpu_clear, vmx, 0, 1); + on_each_cpu(__vcpu_clear, vmx, 1); free_vmcs(vmx->vmcs); vmx->vmcs = NULL; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63a77caa59f1..0faa2546b1cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) * So need not to call smp_call_function_single() in that case. */ if (vcpu->guest_mode && vcpu->cpu != cpu) - smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); + smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); put_cpu(); } diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 57d043fa893e..d5a2b39f882b 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c @@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) rv.msr_no = msr_no; if (safe) { - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); err = rv.err; } else { - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); } *l = rv.l; *h = rv.h; @@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) rv.l = l; rv.h = h; if (safe) { - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); err = rv.err; } else { - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); + smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); } return err; diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8dedd01e909f..ee0fba092157 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -950,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy) halt(); } -static DEFINE_SPINLOCK(call_lock); - -struct call_data_struct { - void (*func) (void *info); - void *info; - volatile unsigned long started; - volatile unsigned long finished; - int wait; -}; - -static struct call_data_struct *call_data; - /* execute a thread on a new CPU. The function to be called must be * previously set up. This is used to schedule a function for * execution on all CPUs - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */ static void smp_call_function_interrupt(void) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - /* must take copy of wait because call_data may be replaced - * unless the function is waiting for us to finish */ - int wait = call_data->wait; - __u8 cpu = smp_processor_id(); - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - if (!test_and_clear_bit(cpu, &call_data->started)) { - /* If the bit wasn't set, this could be a replay */ - printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" - " with no call pending\n", cpu); - return; - } - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func) (info); + generic_smp_call_function_interrupt(); __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); - if (wait) { - mb(); - clear_bit(cpu, &call_data->finished); - } } -static int -voyager_smp_call_function_mask(cpumask_t cpumask, - void (*func) (void *info), void *info, int wait) +static void smp_call_function_single_interrupt(void) { - struct call_data_struct data; - u32 mask = cpus_addr(cpumask)[0]; - - mask &= ~(1 << smp_processor_id()); - - if (!mask) - return 0; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - data.started = mask; - data.wait = wait; - if (wait) - data.finished = mask; - - spin_lock(&call_lock); - call_data = &data; - wmb(); - /* Send a message to all other CPUs and wait for them to respond */ - send_CPI(mask, VIC_CALL_FUNCTION_CPI); - - /* Wait for response */ - while (data.started) - barrier(); - - if (wait) - while (data.finished) - barrier(); - - spin_unlock(&call_lock); - - return 0; + irq_enter(); + generic_smp_call_function_single_interrupt(); + __get_cpu_var(irq_stat).irq_call_count++; + irq_exit(); } /* Sorry about the name. In an APIC based system, the APICs @@ -1094,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) smp_call_function_interrupt(); } +void smp_qic_call_function_single_interrupt(struct pt_regs *regs) +{ + ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); + smp_call_function_single_interrupt(); +} + void smp_vic_cpi_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -1114,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) smp_enable_irq_interrupt(); if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) smp_call_function_interrupt(); + if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) + smp_call_function_single_interrupt(); set_irq_regs(old_regs); } @@ -1129,7 +1067,7 @@ static void do_flush_tlb_all(void *info) /* flush the TLB of every active CPU in the system */ void flush_tlb_all(void) { - on_each_cpu(do_flush_tlb_all, 0, 1, 1); + on_each_cpu(do_flush_tlb_all, 0, 1); } /* send a reschedule CPI to one CPU by physical CPU number*/ @@ -1161,7 +1099,7 @@ int safe_smp_processor_id(void) /* broadcast a halt to all other CPUs */ static void voyager_smp_send_stop(void) { - smp_call_function(smp_stop_cpu_function, NULL, 1, 1); + smp_call_function(smp_stop_cpu_function, NULL, 1); } /* this function is triggered in time.c when a clock tick fires @@ -1848,5 +1786,7 @@ struct smp_ops smp_ops = { .smp_send_stop = voyager_smp_send_stop, .smp_send_reschedule = voyager_smp_send_reschedule, - .smp_call_function_mask = voyager_smp_call_function_mask, + + .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_single_ipi = native_send_call_func_single_ipi, }; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 47f4e2e4a096..65c6e46bf059 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -141,7 +141,7 @@ static void cpa_flush_all(unsigned long cache) { BUG_ON(irqs_disabled()); - on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); + on_each_cpu(__cpa_flush_all, (void *) cache, 1); } static void __cpa_flush_range(void *arg) @@ -162,7 +162,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) BUG_ON(irqs_disabled()); WARN_ON(PAGE_ALIGN(start) != start); - on_each_cpu(__cpa_flush_range, NULL, 1, 1); + on_each_cpu(__cpa_flush_range, NULL, 1); if (!cache) return; diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2b6ad5b9f9d5..7f3329b55d2e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -218,8 +218,8 @@ static int nmi_setup(void) } } - on_each_cpu(nmi_save_registers, NULL, 0, 1); - on_each_cpu(nmi_cpu_setup, NULL, 0, 1); + on_each_cpu(nmi_save_registers, NULL, 1); + on_each_cpu(nmi_cpu_setup, NULL, 1); nmi_enabled = 1; return 0; } @@ -271,7 +271,7 @@ static void nmi_shutdown(void) { struct op_msrs *msrs = &get_cpu_var(cpu_msrs); nmi_enabled = 0; - on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); + on_each_cpu(nmi_cpu_shutdown, NULL, 1); unregister_die_notifier(&profile_exceptions_nb); model->shutdown(msrs); free_msrs(); @@ -286,7 +286,7 @@ static void nmi_cpu_start(void *dummy) static int nmi_start(void) { - on_each_cpu(nmi_cpu_start, NULL, 0, 1); + on_each_cpu(nmi_cpu_start, NULL, 1); return 0; } @@ -298,7 +298,7 @@ static void nmi_cpu_stop(void *dummy) static void nmi_stop(void) { - on_each_cpu(nmi_cpu_stop, NULL, 0, 1); + on_each_cpu(nmi_cpu_stop, NULL, 1); } struct op_counter_config counter_config[OP_MAX_COUNTER]; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dcd4e51f2f16..bb508456ef52 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1214,7 +1214,9 @@ static const struct smp_ops xen_smp_ops __initdata = { .smp_send_stop = xen_smp_send_stop, .smp_send_reschedule = xen_smp_send_reschedule, - .smp_call_function_mask = xen_smp_call_function_mask, + + .send_call_func_ipi = xen_smp_send_call_function_ipi, + .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, }; #endif /* CONFIG_SMP */ diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42b3b9ed641d..ff0aa74afaa1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -796,7 +796,7 @@ static void drop_mm_ref(struct mm_struct *mm) } if (!cpus_empty(mask)) - xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); + smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); } #else static void drop_mm_ref(struct mm_struct *mm) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d2e3c20127d7..233156f39b7f 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -36,27 +36,14 @@ #include "mmu.h" cpumask_t xen_cpu_initialized_map; -static DEFINE_PER_CPU(int, resched_irq) = -1; -static DEFINE_PER_CPU(int, callfunc_irq) = -1; -static DEFINE_PER_CPU(int, debug_irq) = -1; - -/* - * Structure and data for smp_call_function(). This is designed to minimise - * static memory requirements. It also looks cleaner. - */ -static DEFINE_SPINLOCK(call_lock); -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t started; - atomic_t finished; - int wait; -}; +static DEFINE_PER_CPU(int, resched_irq); +static DEFINE_PER_CPU(int, callfunc_irq); +static DEFINE_PER_CPU(int, callfuncsingle_irq); +static DEFINE_PER_CPU(int, debug_irq) = -1; static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); - -static struct call_data_struct *call_data; +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); /* * Reschedule call back. Nothing to do, @@ -128,6 +115,17 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(debug_irq, cpu) = rc; + callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, + cpu, + xen_call_function_single_interrupt, + IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(callfuncsingle_irq, cpu) = rc; + return 0; fail: @@ -137,6 +135,9 @@ static int xen_smp_intr_init(unsigned int cpu) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); if (per_cpu(debug_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); + if (per_cpu(callfuncsingle_irq, cpu) >= 0) + unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); + return rc; } @@ -336,7 +337,7 @@ static void stop_self(void *v) void xen_smp_send_stop(void) { - smp_call_function(stop_self, NULL, 0, 0); + smp_call_function(stop_self, NULL, 0); } void xen_smp_send_reschedule(int cpu) @@ -344,7 +345,6 @@ void xen_smp_send_reschedule(int cpu) xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); } - static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) { unsigned cpu; @@ -355,83 +355,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) xen_send_IPI_one(cpu, vector); } +void xen_smp_send_call_function_ipi(cpumask_t mask) +{ + int cpu; + + xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); + + /* Make sure other vcpus get a chance to run if they need to. */ + for_each_cpu_mask(cpu, mask) { + if (xen_vcpu_stolen(cpu)) { + HYPERVISOR_sched_op(SCHEDOP_yield, 0); + break; + } + } +} + +void xen_smp_send_call_function_single_ipi(int cpu) +{ + xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); +} + static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; - int wait = call_data->wait; - - /* - * Notify initiating CPU that I've grabbed the data and am - * about to execute the function - */ - mb(); - atomic_inc(&call_data->started); - /* - * At this point the info structure may be out of scope unless wait==1 - */ irq_enter(); - (*func)(info); + generic_smp_call_function_interrupt(); __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); - if (wait) { - mb(); /* commit everything before setting finished */ - atomic_inc(&call_data->finished); - } - return IRQ_HANDLED; } -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait) +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { - struct call_data_struct data; - int cpus, cpu; - bool yield; - - /* Holding any lock stops cpus from going down. */ - spin_lock(&call_lock); - - cpu_clear(smp_processor_id(), mask); - - cpus = cpus_weight(mask); - if (!cpus) { - spin_unlock(&call_lock); - return 0; - } - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.started, 0); - data.wait = wait; - if (wait) - atomic_set(&data.finished, 0); - - call_data = &data; - mb(); /* write everything before IPI */ - - /* Send a message to other CPUs and wait for them to respond */ - xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); - - /* Make sure other vcpus get a chance to run if they need to. */ - yield = false; - for_each_cpu_mask(cpu, mask) - if (xen_vcpu_stolen(cpu)) - yield = true; - - if (yield) - HYPERVISOR_sched_op(SCHEDOP_yield, 0); - - /* Wait for response */ - while (atomic_read(&data.started) != cpus || - (wait && atomic_read(&data.finished) != cpus)) - cpu_relax(); - - spin_unlock(&call_lock); + irq_enter(); + generic_smp_call_function_single_interrupt(); + __get_cpu_var(irq_stat).irq_call_count++; + irq_exit(); - return 0; + return IRQ_HANDLED; } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d852ddbb3448..6f4b1045c1c2 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -55,13 +55,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); void xen_smp_send_stop(void); void xen_smp_send_reschedule(int cpu); -int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, - int wait); -int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, - int nonatomic, int wait); - -int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), - void *info, int wait); +void xen_smp_send_call_function_ipi(cpumask_t mask); +void xen_smp_send_call_function_single_ipi(int cpu); extern cpumask_t xen_cpu_initialized_map; |