summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorEugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>2018-02-23 19:41:53 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-05-30 07:52:07 +0200
commitf7f78191c910d741c5971094003dfe7233c29743 (patch)
treeb9c39f765507ea86fa9ca42b82ecbfe431b48dba /arch
parent50de7f4347cf0dbe7b9c28e273a8cf498067790e (diff)
ARC: mcip: update MCIP debug mask when the new cpu came online
[ Upstream commit f3205de98db2fc8083796dd5ad81b191e436fab8 ] As of today we use hardcoded MCIP debug mask, so if we launch kernel via debugger and kick fever cores than HW has all cpus hang at the momemt of setup MCIP debug mask. So update MCIP debug mask when the new cpu came online, instead of use hardcoded MCIP debug mask. Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/kernel/mcip.c37
1 files changed, 32 insertions, 5 deletions
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 1119029ae7fc..5fe84e481654 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -51,6 +51,34 @@ static void mcip_update_gfrc_halt_mask(int cpu)
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
+static void mcip_update_debug_halt_mask(int cpu)
+{
+ u32 mcip_mask = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /*
+ * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
+ * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
+ * and CMD_DEBUG_READ_SELECT.
+ */
+ __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
+ mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+ mcip_mask |= BIT(cpu);
+
+ __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
+ /*
+ * Parameter specified halt cause:
+ * STATUS32[H]/actionpoint/breakpoint/self-halt
+ * We choose all of them (0xF).
+ */
+ __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
+
+ raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
static void mcip_setup_per_cpu(int cpu)
{
struct mcip_bcr mp;
@@ -63,6 +91,10 @@ static void mcip_setup_per_cpu(int cpu)
/* Update GFRC halt mask as new CPU came online */
if (mp.gfrc)
mcip_update_gfrc_halt_mask(cpu);
+
+ /* Update MCIP debug mask as new CPU came online */
+ if (mp.dbg)
+ mcip_update_debug_halt_mask(cpu);
}
static void mcip_ipi_send(int cpu)
@@ -138,11 +170,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
-
- if (mp.dbg) {
- __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
- __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
- }
}
struct plat_smp_ops plat_smp_ops = {