summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch64/cortex_a57.S5
-rw-r--r--lib/cpus/aarch64/cortex_a72.S6
-rw-r--r--lib/cpus/aarch64/cortex_a73.S5
-rw-r--r--lib/cpus/aarch64/cortex_a75.S113
-rw-r--r--lib/cpus/aarch64/cortex_a75_pubsub.c75
-rw-r--r--lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S372
-rw-r--r--lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S114
-rw-r--r--lib/cpus/cpu-ops.mk5
-rw-r--r--lib/extensions/amu/aarch32/amu.c106
-rw-r--r--lib/extensions/amu/aarch64/amu.c189
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S281
-rw-r--r--lib/psci/psci_suspend.c5
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c4
13 files changed, 1240 insertions, 40 deletions
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index a720e984..683be47e 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -383,6 +383,11 @@ func cortex_a57_reset_func
bl errata_a57_859972_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index b0341256..93821b74 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,12 @@ func cortex_a72_reset_func
mov x0, x18
bl errata_a72_859971_wa
#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index f642816e..c43f07ec 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -36,6 +36,11 @@ func cortex_a73_disable_smp
endfunc cortex_a73_disable_smp
func cortex_a73_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* Clobbers : x0
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 4cab9e4f..e66ad066 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,7 +11,120 @@
#include <plat_macros.S>
#include <cortex_a75.h>
+ .globl cortex_a75_amu_cnt_read
+ .globl cortex_a75_amu_cnt_write
+ .globl cortex_a75_amu_read_cpuamcntenset_el0
+ .globl cortex_a75_amu_read_cpuamcntenclr_el0
+ .globl cortex_a75_amu_write_cpuamcntenset_el0
+ .globl cortex_a75_amu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cortex_a75_amu_cnt_read(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cortex_a75_amu_cnt_read
+ adr x1, 1f
+ lsl x0, x0, #3
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, CPUAMEVCNTR0_EL0
+ ret
+ mrs x0, CPUAMEVCNTR1_EL0
+ ret
+ mrs x0, CPUAMEVCNTR2_EL0
+ ret
+ mrs x0, CPUAMEVCNTR3_EL0
+ ret
+ mrs x0, CPUAMEVCNTR4_EL0
+ ret
+endfunc cortex_a75_amu_cnt_read
+
+/*
+ * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cortex_a75_amu_cnt_write
+ adr x2, 1f
+ lsl x0, x0, #3
+ add x2, x2, x0
+ br x2
+
+1:
+ msr CPUAMEVCNTR0_EL0, x0
+ ret
+ msr CPUAMEVCNTR1_EL0, x0
+ ret
+ msr CPUAMEVCNTR2_EL0, x0
+ ret
+ msr CPUAMEVCNTR3_EL0, x0
+ ret
+ msr CPUAMEVCNTR4_EL0, x0
+ ret
+endfunc cortex_a75_amu_cnt_write
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenclr_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenset_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenclr_el0
+
func cortex_a75_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
+ /*
+ * If the field equals to 1 then branch targets trained in one
+ * context cannot affect speculative execution in a different context.
+ */
+ cmp x0, #1
+ beq 1f
+
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+1:
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 00000000..c1089a60
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <pubsub_events.h>
+#include <platform.h>
+
+struct amu_ctx {
+ uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
+ uint16_t mask;
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ /* Save counter configuration */
+ ctx->mask = cortex_a75_amu_read_cpuamcntenset_el0();
+
+ /* Ensure counters are disabled */
+ cortex_a75_amu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ ctx->cnts[i] = cortex_a75_amu_cnt_read(i);
+
+ return 0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `cortex_a75_context_save()` */
+ assert(cortex_a75_amu_read_cpuamcntenset_el0() == 0);
+
+ /* Restore counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ cortex_a75_amu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cortex_a75_amu_write_cpuamcntenset_el0(ctx->mask);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
new file mode 100644
index 00000000..cd29266e
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_bpiall_vbar0_runtime_exceptions
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
+#define EMIT_SMC 0xe1600070
+
+ .macro enter_workaround _stub_name
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ adr x4, \_stub_name
+
+ /*
+ * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have
+ * all interrupts masked in preparation to running the workaround
+ * stub in S-EL1. VBAR_EL3 points to the vector table that
+ * will handle the SMC back from the workaround stub.
+ */
+ ldp x0, x1, [x4, #0]
+
+ /*
+ * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable
+ * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
+ */
+ ldp x2, x3, [x4, #16]
+
+ mrs x4, scr_el3
+ mrs x5, spsr_el3
+ mrs x6, elr_el3
+ mrs x7, sctlr_el1
+ mrs x8, esr_el3
+
+ /* Preserve system registers in the workaround context */
+ stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+
+ /*
+ * Reload system registers with the crafted values
+ * in preparation for entry in S-EL1.
+ */
+ msr spsr_el3, x0
+ msr vbar_el3, x1
+ msr sctlr_el1, x2
+ msr elr_el3, x3
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar0_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+aarch32_stub_smc:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(1)
+ .word EMIT_SMC
+aarch32_stub_ctx_smc:
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+
+ /*
+ * VBAR_EL3 points to vbar1 which is the vector table
+ * used while the workaround is executing.
+ */
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+ /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
+ .quad aarch32_stub_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar0_irq_sp_el0
+ b irq_sp_el0
+aarch32_stub_irq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(2)
+ .word EMIT_SMC
+aarch32_stub_ctx_irq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_irq
+ check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+ b fiq_sp_el0
+aarch32_stub_fiq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(4)
+ .word EMIT_SMC
+aarch32_stub_ctx_fiq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_serror_sp_el0
+ b serror_sp_el0
+aarch32_stub_serror:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(8)
+ .word EMIT_SMC
+aarch32_stub_ctx_serror:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_serror
+ check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar0_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar0_irq_aarch64
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch64
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch64
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar0_serror_aarch64
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar0_irq_aarch32
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch32
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch32
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar0_serror_aarch32
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar1_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar1_irq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_serror_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar1_irq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_serror_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar1_irq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_aarch64
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar1_serror_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+ /* Restore register state from the workaround context */
+ ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /* Apply the restored system register state */
+ msr scr_el3, x2
+ msr spsr_el3, x3
+ msr elr_el3, x4
+ msr sctlr_el1, x5
+ msr esr_el3, x6
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x2, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x2
+
+ /*
+ * Restore all GP regs except x0 and x1. The value in x0
+ * indicates the type of the original exception.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /*
+ * Each of these handlers will first restore x0 and x1 from
+ * the context and the branch to the common implementation for
+ * each of the exception types.
+ */
+ tbnz x0, #1, workaround_bpiall_vbar1_irq
+ tbnz x0, #2, workaround_bpiall_vbar1_fiq
+ tbnz x0, #3, workaround_bpiall_vbar1_serror
+
+ /* Fallthrough case for Sync exception */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b sync_exception_aarch64
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar1_irq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_irq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b irq_aarch64
+ check_vector_size workaround_bpiall_vbar1_irq_aarch32
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_fiq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b fiq_aarch64
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar1_serror_aarch32
+ b report_unhandled_exception
+workaround_bpiall_vbar1_serror:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b serror_aarch64
+ check_vector_size workaround_bpiall_vbar1_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
new file mode 100644
index 00000000..f4781484
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_mmu_runtime_exceptions
+
+vector_base workaround_mmu_runtime_exceptions
+
+ .macro apply_workaround
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x0, sctlr_el3
+ /* Disable MMU */
+ bic x1, x0, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Restore MMU config */
+ msr sctlr_el3, x0
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size workaround_mmu_sync_exception_sp_el0
+
+vector_entry workaround_mmu_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size workaround_mmu_irq_sp_el0
+
+vector_entry workaround_mmu_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size workaround_mmu_fiq_sp_el0
+
+vector_entry workaround_mmu_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size workaround_mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_mmu_sync_exception_sp_elx
+
+vector_entry workaround_mmu_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_mmu_irq_sp_elx
+
+vector_entry workaround_mmu_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_mmu_fiq_sp_elx
+
+vector_entry workaround_mmu_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch64
+ apply_workaround
+ b sync_exception_aarch64
+ check_vector_size workaround_mmu_sync_exception_aarch64
+
+vector_entry workaround_mmu_irq_aarch64
+ apply_workaround
+ b irq_aarch64
+ check_vector_size workaround_mmu_irq_aarch64
+
+vector_entry workaround_mmu_fiq_aarch64
+ apply_workaround
+ b fiq_aarch64
+ check_vector_size workaround_mmu_fiq_aarch64
+
+vector_entry workaround_mmu_serror_aarch64
+ apply_workaround
+ b serror_aarch64
+ check_vector_size workaround_mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch32
+ apply_workaround
+ b sync_exception_aarch32
+ check_vector_size workaround_mmu_sync_exception_aarch32
+
+vector_entry workaround_mmu_irq_aarch32
+ apply_workaround
+ b irq_aarch32
+ check_vector_size workaround_mmu_irq_aarch32
+
+vector_entry workaround_mmu_fiq_aarch32
+ apply_workaround
+ b fiq_aarch32
+ check_vector_size workaround_mmu_fiq_aarch32
+
+vector_entry workaround_mmu_serror_aarch32
+ apply_workaround
+ b serror_aarch32
+ check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 31adfb42..3ba8c1fc 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -16,6 +16,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1
# It is enabled by default.
A57_DISABLE_NON_TEMPORAL_HINT ?=1
+WORKAROUND_CVE_2017_5715 ?=1
+
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
$(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -28,6 +30,9 @@ $(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
$(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
$(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index d450bd69..effc5bd3 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,26 +7,100 @@
#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <platform.h>
+#include <pubsub_events.h>
+
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
void amu_enable(int el2_unused)
{
uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- if ((features & ID_PFR0_AMU_MASK) == 1) {
- if (el2_unused) {
- uint64_t v;
-
- /*
- * Non-secure access from EL0 or EL1 to the Activity Monitor
- * registers do not trap to EL2.
- */
- v = read_hcptr();
- v &= ~TAM_BIT;
- write_hcptr(v);
- }
-
- /* Enable group 0 counters */
- write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return;
+
+ if (el2_unused) {
+ uint64_t v;
+
+ /*
+ * Non-secure access from EL0 or EL1 to the Activity Monitor
+ * registers do not trap to EL2.
+ */
+ v = read_hcptr();
+ v &= ~TAM_BIT;
+ write_hcptr(v);
}
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Assert that group 0 counter configuration is what we expect */
+ assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
+
+ /*
+ * Disable group 0 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+ isb();
+
+ ctx->group0_cnts[0] = read64_amevcntr00();
+ ctx->group0_cnts[1] = read64_amevcntr01();
+ ctx->group0_cnts[2] = read64_amevcntr02();
+ ctx->group0_cnts[3] = read64_amevcntr03();
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0() == 0);
+
+ /* Restore group 0 counters */
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
+ write64_amevcntr00(ctx->group0_cnts[0]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
+ write64_amevcntr01(ctx->group0_cnts[1]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
+ write64_amevcntr02(ctx->group0_cnts[2]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
+ write64_amevcntr03(ctx->group0_cnts[3]);
+ isb();
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 007b3494..d7645a9e 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -1,40 +1,185 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <amu.h>
+#include <amu_private.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <assert.h>
+#include <platform.h>
+#include <pubsub_events.h>
-void amu_enable(int el2_unused)
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+ uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+int amu_supported(void)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- if ((features & ID_AA64PFR0_AMU_MASK) == 1) {
- uint64_t v;
-
- if (el2_unused) {
- /*
- * CPTR_EL2.TAM: Set to zero so any accesses to
- * the Activity Monitor registers do not trap to EL2.
- */
- v = read_cptr_el2();
- v &= ~CPTR_EL2_TAM_BIT;
- write_cptr_el2(v);
- }
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
+void amu_enable(int el2_unused)
+{
+ uint64_t v;
+
+ if (!amu_supported())
+ return;
+
+ if (el2_unused) {
/*
- * CPTR_EL3.TAM: Set to zero so that any accesses to
- * the Activity Monitor registers do not trap to EL3.
+ * CPTR_EL2.TAM: Set to zero so any accesses to
+ * the Activity Monitor registers do not trap to EL2.
*/
- v = read_cptr_el3();
- v &= ~TAM_BIT;
- write_cptr_el3(v);
-
- /* Enable group 0 counters */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ v = read_cptr_el2();
+ v &= ~CPTR_EL2_TAM_BIT;
+ write_cptr_el2(v);
}
+
+ /*
+ * CPTR_EL3.TAM: Set to zero so that any accesses to
+ * the Activity Monitor registers do not trap to EL3.
+ */
+ v = read_cptr_el3();
+ v &= ~TAM_BIT;
+ write_cptr_el3(v);
+
+ /* Enable group 0 counters */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Enable group 1 counters */
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Write the group 0 counter identified by the given `idx` with `val`. */
+void amu_group0_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ amu_group0_cnt_write_internal(idx, val);
+ isb();
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
+
+/* Write the group 1 counter identified by the given `idx` with `val`. */
+void amu_group1_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_cnt_write_internal(idx, val);
+ isb();
+}
+
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`.
+ */
+void amu_group1_set_evtype(int idx, unsigned int val)
+{
+ assert(amu_supported());
+ assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_set_evtype_internal(idx, val);
+ isb();
+}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
+ read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /*
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+ isb();
+
+ /* Save group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+
+ /* Save group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /* Restore group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+
+ /* Restore group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ isb();
+
+ /* Restore group 0/1 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 00000000..e0b1f564
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group0_cnt_write_internal
+ .globl amu_group1_cnt_read_internal
+ .globl amu_group1_cnt_write_internal
+ .globl amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #2
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR00_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR01_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR02_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR03_EL0, x1 /* index 3 */
+ ret
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVCNTR14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVCNTR15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVCNTR16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVCNTR17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVCNTR18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVCNTR19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVCNTR1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVCNTR1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVCNTR1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVCNTR1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVCNTR1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVCNTR1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+
+ /* val should be between [0, 65535] */
+ mov x2, x1
+ lsr x2, x2, #16
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of msr/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVTYPER10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVTYPER11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVTYPER12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVTYPER13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVTYPER14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVTYPER15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVTYPER16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVTYPER17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVTYPER18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVTYPER19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVTYPER1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVTYPER1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVTYPER1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVTYPER1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVTYPER1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVTYPER1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_set_evtype_internal
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index d9490672..a77972d3 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -14,6 +14,7 @@
#include <debug.h>
#include <platform.h>
#include <pmf.h>
+#include <pubsub_events.h>
#include <runtime_instr.h>
#include <stddef.h>
#include "psci_private.h"
@@ -68,6 +69,8 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
{
unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+ PUBLISH_EVENT(psci_suspend_pwrdown_start);
+
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
@@ -308,6 +311,8 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+ PUBLISH_EVENT(psci_suspend_pwrdown_finish);
+
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 0acfacbf..75c5a912 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -893,7 +893,7 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* Check if the mapping function actually managed to map
* anything. If not, just return now.
*/
- if (mm_cursor->base_va >= end_va)
+ if (mm->base_va >= end_va)
return -ENOMEM;
/*