summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordavidcunado-arm <david.cunado@arm.com>2018-01-12 09:02:24 +0000
committerGitHub <noreply@github.com>2018-01-12 09:02:24 +0000
commit5f3c7ce4adcc3fbde52582880c0e7680af4a557a (patch)
treef6f8ebc9d2f1192f12301145401d97be1b4f614c
parent31dfea92048fa2a71495650d77f5c68ae582ab73 (diff)
parent53bfb94ececbed0fd6eb3550c79254a928a13067 (diff)
Merge pull request #1197 from dp-arm/dp/amu
AMUv1 support
-rw-r--r--bl31/bl31.mk3
-rw-r--r--docs/porting-guide.rst16
-rw-r--r--include/lib/aarch32/arch_helpers.h5
-rw-r--r--include/lib/aarch64/arch.h41
-rw-r--r--include/lib/aarch64/arch_helpers.h1
-rw-r--r--include/lib/cpus/aarch64/cortex_a75.h12
-rw-r--r--include/lib/el3_runtime/pubsub_events.h7
-rw-r--r--include/lib/extensions/amu.h32
-rw-r--r--include/lib/extensions/amu_private.h19
-rw-r--r--lib/cpus/aarch64/cortex_a75.S98
-rw-r--r--lib/cpus/aarch64/cortex_a75_pubsub.c75
-rw-r--r--lib/extensions/amu/aarch32/amu.c107
-rw-r--r--lib/extensions/amu/aarch64/amu.c190
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S281
-rw-r--r--lib/psci/psci_suspend.c5
-rw-r--r--plat/arm/board/fvp/platform.mk4
16 files changed, 858 insertions, 38 deletions
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 8ff8f89b..2db48564 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -51,7 +51,8 @@ BL31_SOURCES += lib/extensions/spe/spe.c
endif
ifeq (${ENABLE_AMU},1)
-BL31_SOURCES += lib/extensions/amu/aarch64/amu.c
+BL31_SOURCES += lib/extensions/amu/aarch64/amu.c \
+ lib/extensions/amu/aarch64/amu_helpers.S
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index f020ec97..51d2e64a 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -549,6 +549,22 @@ behaviour of the ``assert()`` function (for example, to save memory).
doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
defined, it defaults to ``LOG_LEVEL``.
+If the platform port uses the Activity Monitor Unit, the following constants
+may be defined:
+
+- **PLAT\_AMU\_GROUP1\_COUNTERS\_MASK**
+ This mask reflects the set of group counters that should be enabled. The
+ maximum number of group 1 counters supported by AMUv1 is 16 so the mask
+ can be at most 0xffff. If the platform does not define this mask, no group 1
+ counters are enabled. If the platform defines this mask, the following
+ constant needs to also be defined.
+
+- **PLAT\_AMU\_GROUP1\_NR\_COUNTERS**
+ This value is used to allocate an array to save and restore the counters
+ specified by ``PLAT_AMU_GROUP1_COUNTERS_MASK`` on CPU suspend.
+ This value should be equal to the highest bit position set in the
+ mask, plus 1. The maximum number of group 1 counters in AMUv1 is 16.
+
File : plat\_macros.S [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 0230195a..beae5d06 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -287,6 +287,11 @@ DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
/*
* TLBI operation prototypes
*/
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 7f3e9faa..91aa484f 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -664,4 +664,45 @@
#define AMEVTYPER02_EL0 S3_3_C13_C6_2
#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 831dfb06..485ed432 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -322,6 +322,7 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index d68c9572..940125da 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -50,7 +50,19 @@
* CPUAMEVTYPER<n> register and are disabled by default. Platforms may
* enable this with suitable programming.
*/
+#define CORTEX_A75_AMU_NR_COUNTERS 5
#define CORTEX_A75_AMU_GROUP0_MASK 0x7
#define CORTEX_A75_AMU_GROUP1_MASK (0 << 3)
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+uint64_t cortex_a75_amu_cnt_read(int idx);
+void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+#endif /* __ASSEMBLY__ */
+
#endif /* __CORTEX_A75_H__ */
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
index 9cfedb4d..64b3f630 100644
--- a/include/lib/el3_runtime/pubsub_events.h
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -17,6 +17,13 @@
*/
REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+/*
+ * These events are published before/after a CPU has been powered down/up
+ * via the PSCI CPU SUSPEND API.
+ */
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
+
#ifdef AARCH64
/*
* These events are published by the AArch64 context management framework
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index bbefe8ff..faa0ee12 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -7,9 +7,39 @@
#ifndef __AMU_H__
#define __AMU_H__
-/* Enable all group 0 counters */
+#include <sys/cdefs.h> /* for CASSERT() */
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK 0xf
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK 0
+#endif
+
+#ifdef PLAT_AMU_GROUP1_NR_COUNTERS
+#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS
+#else
+#define AMU_GROUP1_NR_COUNTERS 0
+#endif
+
+CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
+CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters);
+
+int amu_supported(void);
void amu_enable(int el2_unused);
+/* Group 0 configuration helpers */
+uint64_t amu_group0_cnt_read(int idx);
+void amu_group0_cnt_write(int idx, uint64_t val);
+
+/* Group 1 configuration helpers */
+uint64_t amu_group1_cnt_read(int idx);
+void amu_group1_cnt_write(int idx, uint64_t val);
+void amu_group1_set_evtype(int idx, unsigned int val);
+
#endif /* __AMU_H__ */
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
new file mode 100644
index 00000000..0c660bb8
--- /dev/null
+++ b/include/lib/extensions/amu_private.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AMU_PRIVATE_H__
+#define __AMU_PRIVATE_H__
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(int idx);
+void amu_group0_cnt_write_internal(int idx, uint64_t);
+
+uint64_t amu_group1_cnt_read_internal(int idx);
+void amu_group1_cnt_write_internal(int idx, uint64_t);
+void amu_group1_set_evtype_internal(int idx, unsigned int val);
+
+#endif /* __AMU_PRIVATE_H__ */
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 9b54b48f..e66ad066 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,6 +11,104 @@
#include <plat_macros.S>
#include <cortex_a75.h>
+ .globl cortex_a75_amu_cnt_read
+ .globl cortex_a75_amu_cnt_write
+ .globl cortex_a75_amu_read_cpuamcntenset_el0
+ .globl cortex_a75_amu_read_cpuamcntenclr_el0
+ .globl cortex_a75_amu_write_cpuamcntenset_el0
+ .globl cortex_a75_amu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cortex_a75_amu_cnt_read(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cortex_a75_amu_cnt_read
+ adr x1, 1f
+ lsl x0, x0, #3
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, CPUAMEVCNTR0_EL0
+ ret
+ mrs x0, CPUAMEVCNTR1_EL0
+ ret
+ mrs x0, CPUAMEVCNTR2_EL0
+ ret
+ mrs x0, CPUAMEVCNTR3_EL0
+ ret
+ mrs x0, CPUAMEVCNTR4_EL0
+ ret
+endfunc cortex_a75_amu_cnt_read
+
+/*
+ * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cortex_a75_amu_cnt_write
+ adr x2, 1f
+ lsl x0, x0, #3
+ add x2, x2, x0
+ br x2
+
+1:
+ msr CPUAMEVCNTR0_EL0, x0
+ ret
+ msr CPUAMEVCNTR1_EL0, x0
+ ret
+ msr CPUAMEVCNTR2_EL0, x0
+ ret
+ msr CPUAMEVCNTR3_EL0, x0
+ ret
+ msr CPUAMEVCNTR4_EL0, x0
+ ret
+endfunc cortex_a75_amu_cnt_write
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenclr_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenset_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenclr_el0
+
func cortex_a75_reset_func
#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
mrs x0, id_aa64pfr0_el1
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 00000000..c1089a60
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <pubsub_events.h>
+#include <platform.h>
+
+struct amu_ctx {
+ uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
+ uint16_t mask;
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ /* Save counter configuration */
+ ctx->mask = cortex_a75_amu_read_cpuamcntenset_el0();
+
+ /* Ensure counters are disabled */
+ cortex_a75_amu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ ctx->cnts[i] = cortex_a75_amu_cnt_read(i);
+
+ return 0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `cortex_a75_context_save()` */
+ assert(cortex_a75_amu_read_cpuamcntenset_el0() == 0);
+
+ /* Restore counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ cortex_a75_amu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cortex_a75_amu_write_cpuamcntenset_el0(ctx->mask);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index d450bd69..55462cbf 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -7,26 +7,103 @@
#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <debug.h>
+#include <platform.h>
+#include <pubsub_events.h>
+
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
void amu_enable(int el2_unused)
{
uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- if ((features & ID_PFR0_AMU_MASK) == 1) {
- if (el2_unused) {
- uint64_t v;
-
- /*
- * Non-secure access from EL0 or EL1 to the Activity Monitor
- * registers do not trap to EL2.
- */
- v = read_hcptr();
- v &= ~TAM_BIT;
- write_hcptr(v);
- }
-
- /* Enable group 0 counters */
- write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+ if ((features & ID_PFR0_AMU_MASK) != 1) {
+ WARN("Cannot enable AMU - not supported\n");
+ return;
+ }
+
+ if (el2_unused) {
+ uint64_t v;
+
+ /*
+ * Non-secure access from EL0 or EL1 to the Activity Monitor
+ * registers do not trap to EL2.
+ */
+ v = read_hcptr();
+ v &= ~TAM_BIT;
+ write_hcptr(v);
}
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Assert that group 0 counter configuration is what we expect */
+ assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
+
+ /*
+ * Disable group 0 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+ isb();
+
+ ctx->group0_cnts[0] = read64_amevcntr00();
+ ctx->group0_cnts[1] = read64_amevcntr01();
+ ctx->group0_cnts[2] = read64_amevcntr02();
+ ctx->group0_cnts[3] = read64_amevcntr03();
+
+ return 0;
}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0() == 0);
+
+ /* Restore group 0 counters */
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
+ write64_amevcntr00(ctx->group0_cnts[0]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
+ write64_amevcntr01(ctx->group0_cnts[1]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
+ write64_amevcntr02(ctx->group0_cnts[2]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
+ write64_amevcntr03(ctx->group0_cnts[3]);
+ isb();
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 007b3494..f743e209 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -5,36 +5,184 @@
*/
#include <amu.h>
+#include <amu_private.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <pubsub_events.h>
-void amu_enable(int el2_unused)
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+ uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+int amu_supported(void)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- if ((features & ID_AA64PFR0_AMU_MASK) == 1) {
- uint64_t v;
-
- if (el2_unused) {
- /*
- * CPTR_EL2.TAM: Set to zero so any accesses to
- * the Activity Monitor registers do not trap to EL2.
- */
- v = read_cptr_el2();
- v &= ~CPTR_EL2_TAM_BIT;
- write_cptr_el2(v);
- }
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
+void amu_enable(int el2_unused)
+{
+ uint64_t v;
+
+ if (!amu_supported()) {
+ WARN("Cannot enable AMU - not supported\n");
+ return;
+ }
+
+ if (el2_unused) {
/*
- * CPTR_EL3.TAM: Set to zero so that any accesses to
- * the Activity Monitor registers do not trap to EL3.
+ * CPTR_EL2.TAM: Set to zero so any accesses to
+ * the Activity Monitor registers do not trap to EL2.
*/
- v = read_cptr_el3();
- v &= ~TAM_BIT;
- write_cptr_el3(v);
-
- /* Enable group 0 counters */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ v = read_cptr_el2();
+ v &= ~CPTR_EL2_TAM_BIT;
+ write_cptr_el2(v);
}
+
+ /*
+ * CPTR_EL3.TAM: Set to zero so that any accesses to
+ * the Activity Monitor registers do not trap to EL3.
+ */
+ v = read_cptr_el3();
+ v &= ~TAM_BIT;
+ write_cptr_el3(v);
+
+ /* Enable group 0 counters */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Enable group 1 counters */
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Write the group 0 counter identified by the given `idx` with `val`. */
+void amu_group0_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ amu_group0_cnt_write_internal(idx, val);
+ isb();
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
+
+/* Write the group 1 counter identified by the given `idx` with `val`. */
+void amu_group1_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_cnt_write_internal(idx, val);
+ isb();
+}
+
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`.
+ */
+void amu_group1_set_evtype(int idx, unsigned int val)
+{
+ assert(amu_supported());
+ assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_set_evtype_internal(idx, val);
+ isb();
}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
+ read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /*
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+ isb();
+
+ /* Save group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+
+ /* Save group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /* Restore group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+
+ /* Restore group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ isb();
+
+ /* Restore group 0/1 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 00000000..e0b1f564
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group0_cnt_write_internal
+ .globl amu_group1_cnt_read_internal
+ .globl amu_group1_cnt_write_internal
+ .globl amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #2
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR00_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR01_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR02_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR03_EL0, x1 /* index 3 */
+ ret
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVCNTR14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVCNTR15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVCNTR16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVCNTR17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVCNTR18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVCNTR19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVCNTR1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVCNTR1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVCNTR1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVCNTR1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVCNTR1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVCNTR1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+
+ /* val should be between [0, 65535] */
+ mov x2, x1
+ lsr x2, x2, #16
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of msr/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVTYPER10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVTYPER11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVTYPER12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVTYPER13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVTYPER14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVTYPER15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVTYPER16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVTYPER17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVTYPER18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVTYPER19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVTYPER1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVTYPER1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVTYPER1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVTYPER1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVTYPER1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVTYPER1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_set_evtype_internal
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index d9490672..a77972d3 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -14,6 +14,7 @@
#include <debug.h>
#include <platform.h>
#include <pmf.h>
+#include <pubsub_events.h>
#include <runtime_instr.h>
#include <stddef.h>
#include "psci_private.h"
@@ -68,6 +69,8 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
{
unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+ PUBLISH_EVENT(psci_suspend_pwrdown_start);
+
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
@@ -308,6 +311,8 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+ PUBLISH_EVENT(psci_suspend_pwrdown_finish);
+
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 7edbd3df..9d3c5f6b 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -150,6 +150,10 @@ ENABLE_PLAT_COMPAT := 0
# Enable Activity Monitor Unit extensions by default
ENABLE_AMU := 1
+ifeq (${ENABLE_AMU},1)
+BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c
+endif
+
ifneq (${ENABLE_STACK_PROTECTOR},0)
PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_stack_protector.c
endif