summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2015-12-18 10:35:54 +0000
committerSasha Levin <sasha.levin@oracle.com>2016-02-01 12:22:13 -0500
commit5de8e1eed3221cb8737c2617c4567b70833795e6 (patch)
tree1c9ff445d7448f19839014e8a0f1d8cf1fb35700 /arch
parent1d4c425164b922424b3f86f93f3f0b7f85293fa7 (diff)
arm64: kernel: enforce pmuserenr_el0 initialization and restore
[ Upstream commit d2d39a3b91628ef5abdf58e83905b173e63d5ecf ] commit 60792ad349f3c6dc5735aafefe5dc9121c79e320 upstream. The pmuserenr_el0 register value is architecturally UNKNOWN on reset. Current kernel code resets that register value iff the core pmu device is correctly probed in the kernel. On platforms with missing DT pmu nodes (or disabled perf events in the kernel), the pmu is not probed, therefore the pmuserenr_el0 register is not reset in the kernel, which means that its value retains the reset value that is architecturally UNKNOWN (system may run with eg pmuserenr_el0 == 0x1, which means that PMU counters access is available at EL0, which must be disallowed). This patch adds code that resets pmuserenr_el0 on cold boot and restores it on core resume from shutdown, so that the pmuserenr_el0 setup is always enforced in the kernel. Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/mm/proc.S2
2 files changed, 2 insertions, 3 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 7778453762d8..b67b01cb5109 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1242,9 +1242,6 @@ static void armv8pmu_reset(void *info)
/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
-
- /* Disable access from userspace. */
- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
}
static int armv8_pmuv3_map_event(struct perf_event *event)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index d2c2e3b6c0f9..55b3f1400b3e 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -165,6 +165,7 @@ ENTRY(cpu_do_resume)
*/
ubfx x11, x11, #1, #1
msr oslar_el1, x11
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
mov x0, x12
dsb nsh // Make sure local tlb invalidation completed
isb
@@ -204,6 +205,7 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
*