From ac61c1156623455c46701654abd8c99720bceea1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 25 Apr 2018 12:17:53 +1000 Subject: powerpc: Fix smp_send_stop NMI IPI handling The NMI IPI handler for a receiving CPU increments nmi_ipi_busy_count over the handler function call, which causes later smp_send_nmi_ipi() callers to spin until the call is finished. The stop_this_cpu() function never returns, so the busy count is never decremeted, which can cause the system to hang in some cases. For example panic() will call smp_send_stop() early on which calls stop_this_cpu() on other CPUs, then later in the reboot path, pnv_restart() will call smp_send_stop() again, which hangs. Fix this by adding a special case to the stop_this_cpu() handler to decrement the busy count, because it will never return. Now that the NMI/non-NMI versions of stop_this_cpu() are different, split them out into separate functions rather than doing #ifdef tricks to share the body between the two functions. Fixes: 6bed3237624e3 ("powerpc: use NMI IPI for smp_send_stop") Reported-by: Abdul Haleem Signed-off-by: Nicholas Piggin [mpe: Split out the functions, tweak change log a bit] Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/smp.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel/smp.c') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e16ec7b3b427..3582f30b60b7 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -565,11 +565,7 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif -#ifdef CONFIG_NMI_IPI -static void stop_this_cpu(struct pt_regs *regs) -#else static void stop_this_cpu(void *dummy) -#endif { /* Remove this CPU */ set_cpu_online(smp_processor_id(), false); @@ -580,10 +576,26 @@ static void stop_this_cpu(void *dummy) spin_cpu_relax(); } +#ifdef CONFIG_NMI_IPI +static void nmi_stop_this_cpu(struct pt_regs *regs) +{ + /* + * This is a special case because it never returns, so the NMI IPI + * handling would never mark it as done, which makes any later + * smp_send_nmi_ipi() call spin forever. Mark it done now. + */ + nmi_ipi_lock(); + nmi_ipi_busy_count--; + nmi_ipi_unlock(); + + stop_this_cpu(NULL); +} +#endif + void smp_send_stop(void) { #ifdef CONFIG_NMI_IPI - smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); #else smp_call_function(stop_this_cpu, NULL, 0); #endif -- cgit v1.2.3 From 6029755eed95e5c90f763188c87ae3ff41e48e5c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 27 Apr 2018 11:51:59 +1000 Subject: powerpc: Fix deadlock with multiple calls to smp_send_stop smp_send_stop can lock up the IPI path for any subsequent calls, because the receiving CPUs spin in their handler function. This started becoming a problem with the addition of an smp_send_stop call in the reboot path, because panics can reboot after doing their own smp_send_stop. The NMI IPI variant was fixed with ac61c11566 ("powerpc: Fix smp_send_stop NMI IPI handling"), which leaves the smp_call_function variant. This is fixed by having smp_send_stop only ever do the smp_call_function once. This is a bit less robust than the NMI IPI fix, because any other call to smp_call_function after smp_send_stop could deadlock, but that has always been the case, and it was not been a problem before. Fixes: f2748bdfe1573 ("powerpc/powernv: Always stop secondaries before reboot/shutdown") Reported-by: Abdul Haleem Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/smp.c | 55 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 16 deletions(-) (limited to 'arch/powerpc/kernel/smp.c') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3582f30b60b7..9ca7148b5881 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -565,17 +565,6 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif -static void stop_this_cpu(void *dummy) -{ - /* Remove this CPU */ - set_cpu_online(smp_processor_id(), false); - - hard_irq_disable(); - spin_begin(); - while (1) - spin_cpu_relax(); -} - #ifdef CONFIG_NMI_IPI static void nmi_stop_this_cpu(struct pt_regs *regs) { @@ -583,23 +572,57 @@ static void nmi_stop_this_cpu(struct pt_regs *regs) * This is a special case because it never returns, so the NMI IPI * handling would never mark it as done, which makes any later * smp_send_nmi_ipi() call spin forever. Mark it done now. + * + * IRQs are already hard disabled by the smp_handle_nmi_ipi. */ nmi_ipi_lock(); nmi_ipi_busy_count--; nmi_ipi_unlock(); - stop_this_cpu(NULL); + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + spin_begin(); + while (1) + spin_cpu_relax(); } -#endif void smp_send_stop(void) { -#ifdef CONFIG_NMI_IPI smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); -#else +} + +#else /* CONFIG_NMI_IPI */ + +static void stop_this_cpu(void *dummy) +{ + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + hard_irq_disable(); + spin_begin(); + while (1) + spin_cpu_relax(); +} + +void smp_send_stop(void) +{ + static bool stopped = false; + + /* + * Prevent waiting on csd lock from a previous smp_send_stop. + * This is racy, but in general callers try to do the right + * thing and only fire off one smp_send_stop (e.g., see + * kernel/panic.c) + */ + if (stopped) + return; + + stopped = true; + smp_call_function(stop_this_cpu, NULL, 0); -#endif } +#endif /* CONFIG_NMI_IPI */ struct thread_info *current_set[NR_CPUS]; -- cgit v1.2.3