summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/arm32_aeabi_divmod.c47
-rw-r--r--lib/cpus/aarch64/aem_generic.S38
-rw-r--r--lib/cpus/aarch64/neoverse_n1.S1
-rw-r--r--lib/el3_runtime/aarch64/context.S65
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c70
-rw-r--r--lib/extensions/ras/ras_common.c5
-rw-r--r--lib/psci/psci_common.c22
-rw-r--r--lib/xlat_tables/aarch32/nonlpae_tables.c5
8 files changed, 147 insertions, 106 deletions
diff --git a/lib/aarch32/arm32_aeabi_divmod.c b/lib/aarch32/arm32_aeabi_divmod.c
index 0b36cb6c..ea8e2bbc 100644
--- a/lib/aarch32/arm32_aeabi_divmod.c
+++ b/lib/aarch32/arm32_aeabi_divmod.c
@@ -33,13 +33,11 @@ static void uint_div_qr(unsigned int numerator, unsigned int denominator,
unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator);
unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator);
-unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator);
/* returns in R0 and R1 by tail calling an asm function */
signed int __aeabi_idivmod(signed int numerator, signed int denominator);
signed int __aeabi_idiv(signed int numerator, signed int denominator);
-signed int __aeabi_imod(signed int numerator, signed int denominator);
/*
* __ste_idivmod_ret_t __aeabi_idivmod(signed numerator, signed denominator)
@@ -106,15 +104,6 @@ unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator)
return qr.q;
}
-unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator)
-{
- struct qr qr = { .q_n = 0, .r_n = 0 };
-
- uint_div_qr(numerator, denominator, &qr);
-
- return qr.r;
-}
-
unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator)
{
struct qr qr = { .q_n = 0, .r_n = 0 };
@@ -145,42 +134,6 @@ signed int __aeabi_idiv(signed int numerator, signed int denominator)
return qr.q;
}
-signed int __aeabi_imod(signed int numerator, signed int denominator)
-{
- signed int s;
- signed int i;
- signed int j;
- signed int h;
- struct qr qr = { .q_n = 0, .r_n = 0 };
-
- /* in case modulo of a power of 2 */
- for (i = 0, j = 0, h = 0, s = denominator; (s != 0) || (h > 1); i++) {
- if (s & 1) {
- j = i;
- h++;
- }
- s = s >> 1;
- }
- if (h == 1)
- return numerator >> j;
-
- if (((numerator < 0) && (denominator > 0)) ||
- ((numerator > 0) && (denominator < 0)))
- qr.q_n = 1; /* quotient shall be negate */
-
- if (numerator < 0) {
- numerator = -numerator;
- qr.r_n = 1; /* remainder shall be negate */
- }
-
- if (denominator < 0)
- denominator = -denominator;
-
- uint_div_qr(numerator, denominator, &qr);
-
- return qr.r;
-}
-
signed int __aeabi_idivmod(signed int numerator, signed int denominator)
{
struct qr qr = { .q_n = 0, .r_n = 0 };
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 51b5ce91..6291e43e 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,15 +18,43 @@ func aem_generic_core_pwr_dwn
msr sctlr_el3, x1
isb
+ /* ---------------------------------------------
+ * AEM model supports L3 caches in which case L2
+ * will be private per core caches and flush
+ * from L1 to L2 is not sufficient.
+ * ---------------------------------------------
+ */
+ mrs x1, clidr_el1
+
+ /* ---------------------------------------------
+ * Check if L3 cache is implemented.
+ * ---------------------------------------------
+ */
+ tst x1, ((1 << CLIDR_FIELD_WIDTH) - 1) << CTYPE_SHIFT(3)
+
+ /* ---------------------------------------------
+ * There is no L3 cache, flush L1 to L2 only.
+ * ---------------------------------------------
+ */
mov x0, #DCCISW
+ b.eq dcsw_op_level1
+
+ mov x18, x30
/* ---------------------------------------------
- * Flush L1 cache to PoU.
+ * Flush L1 cache to L2.
* ---------------------------------------------
*/
- b dcsw_op_louis
-endfunc aem_generic_core_pwr_dwn
+ bl dcsw_op_level1
+ mov x30, x18
+ /* ---------------------------------------------
+ * Flush L2 cache to L3.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_level2
+endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* ---------------------------------------------
@@ -39,7 +67,7 @@ func aem_generic_cluster_pwr_dwn
isb
/* ---------------------------------------------
- * Flush L1 and L2 caches to PoC.
+ * Flush all caches to PoC.
* ---------------------------------------------
*/
mov x0, #DCCISW
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index b143a2e7..31e7a3a7 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -43,6 +43,7 @@ func errata_n1_1043202_wa
msr CPUPMR_EL3, x0
ldr x0, =0x800200071
msr CPUPCR_EL3, x0
+ isb
1:
ret x17
endfunc errata_n1_1043202_wa
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index e6ab19bc..53dc02e6 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -24,9 +24,45 @@
.global save_gp_registers
.global restore_gp_registers
.global restore_gp_registers_eret
+ .global save_pmcr_disable_pmu
.global el3_exit
/* -----------------------------------------------------
+ * If ARMv8.5-PMU is implemented, cycle counting is
+ * disabled by seting MDCR_EL3.SCCD to 1.
+ * -----------------------------------------------------
+ */
+func save_pmcr_disable_pmu
+ /* -----------------------------------------------------
+ * Check if earlier initialization MDCR_EL3.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR_EL0 should be saved in non-secure context.
+ * -----------------------------------------------------
+ */
+ mrs x9, mdcr_el3
+ tst x9, #MDCR_SCCD_BIT
+ bne 1f
+
+ /* Secure Cycle Counter is not disabled */
+ mrs x9, pmcr_el0
+
+ /* Check caller's security state */
+ mrs x10, scr_el3
+ tst x10, #SCR_NS_BIT
+ beq 2f
+
+ /* Save PMCR_EL0 if called from Non-secure state */
+ str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
+
+ /* Disable cycle counter when event counting is prohibited */
+2: orr x9, x9, #PMCR_EL0_DP_BIT
+ msr pmcr_el0, x9
+
+ isb
+1: ret
+endfunc save_pmcr_disable_pmu
+
+/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
* to save EL1 system register context. It assumes that
@@ -80,9 +116,6 @@ func el1_sysregs_context_save
mrs x9, vbar_el1
stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
- mrs x10, pmcr_el0
- str x10, [x0, #CTX_PMCR_EL0]
-
/* Save AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
mrs x11, spsr_abt
@@ -169,9 +202,6 @@ func el1_sysregs_context_restore
msr contextidr_el1, x17
msr vbar_el1, x9
- ldr x10, [x0, #CTX_PMCR_EL0]
- msr pmcr_el0, x10
-
/* Restore AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
ldp x11, x12, [x0, #CTX_SPSR_ABT]
@@ -503,6 +533,29 @@ func el3_exit
msr spsr_el3, x16
msr elr_el3, x17
+ /* -----------------------------------------------------
+ * Restore PMCR_EL0 when returning to Non-secure state
+ * if Secure Cycle Counter is not disabled in MDCR_EL3
+ * when ARMv8.5-PMU is implemented
+ * -----------------------------------------------------
+ */
+ tst x18, #SCR_NS_BIT
+ beq 2f
+
+ /* -----------------------------------------------------
+ * Back to Non-secure state.
+ * Check if earlier initialization MDCR_EL3.SCCD to 1
+ * failed, meaning that ARMv8-PMU is not implemented and
+ * PMCR_EL0 should be restored from non-secure context.
+ * -----------------------------------------------------
+ */
+ mrs x17, mdcr_el3
+ tst x17, #MDCR_SCCD_BIT
+ bne 2f
+ ldr x17, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
+ msr pmcr_el0, x17
+2:
+
#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
/* Restore mitigation state as it was on entry to EL3 */
ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 05ba5ed6..bd5b3aa6 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -66,7 +66,7 @@ void __init cm_init(void)
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
- uint32_t scr_el3, pmcr_el0;
+ uint32_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx, actlr_elx;
@@ -225,31 +225,10 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
actlr_elx = read_actlr_el1();
write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx));
- if (security_state == SECURE) {
- /*
- * Initialise PMCR_EL0 for secure context only, setting all
- * fields rather than relying on hw. Some fields are
- * architecturally UNKNOWN on reset.
- *
- * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
- * is recorded in PMOVSCLR_EL0[31], occurs on the increment
- * that changes PMCCNTR_EL0[63] from 1 to 0.
- *
- * PMCR_EL0.DP: Set to one so that the cycle counter,
- * PMCCNTR_EL0 does not count when event counting is prohibited.
- *
- * PMCR_EL0.X: Set to zero to disable export of events.
- *
- * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
- * counts on every clock cycle.
- */
- pmcr_el0 = ((PMCR_EL0_RESET_VAL | PMCR_EL0_LC_BIT
- | PMCR_EL0_DP_BIT)
- & ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT));
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_PMCR_EL0, pmcr_el0);
- }
-
- /* Populate EL3 state so that we've the right context before doing ERET */
+ /*
+ * Populate EL3 state so that we've the right context
+ * before doing ERET
+ */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
@@ -441,6 +420,29 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
+ * MDCR_EL2.HLP: Set to one so that event counter
+ * overflow, that is recorded in PMOVSCLR_EL0[0-30],
+ * occurs on the increment that changes
+ * PMEVCNTR<n>_EL0[63] from 1 to 0, when ARMv8.5-PMU is
+ * implemented. This bit is RES0 in versions of the
+ * architecture earlier than ARMv8.5, setting it to 1
+ * doesn't have any effect on them.
+ *
+ * MDCR_EL2.TTRF: Set to zero so that access to Trace
+ * Filter Control register TRFCR_EL1 at EL1 is not
+ * trapped to EL2. This bit is RES0 in versions of
+ * the architecture earlier than ARMv8.4.
+ *
+ * MDCR_EL2.HPMD: Set to one so that event counting is
+ * prohibited at EL2. This bit is RES0 in versions of
+ * the architecture earlier than ARMv8.1, setting it
+ * to 1 doesn't have any effect on them.
+ *
+ * MDCR_EL2.TPMS: Set to zero so that accesses to
+ * Statistical Profiling control registers from EL1
+ * do not trap to EL2. This bit is RES0 when SPE is
+ * not implemented.
+ *
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@@ -469,13 +471,15 @@ void cm_prepare_el3_exit(uint32_t security_state)
* MDCR_EL2.HPMN: Set to value of PMCR_EL0.N which is the
* architecturally-defined reset value.
*/
- mdcr_el2 = ((MDCR_EL2_RESET_VAL |
- ((read_pmcr_el0() & PMCR_EL0_N_BITS)
- >> PMCR_EL0_N_SHIFT)) &
- ~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT
- | MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT
- | MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
- | MDCR_EL2_TPMCR_BIT));
+ mdcr_el2 = ((MDCR_EL2_RESET_VAL | MDCR_EL2_HLP |
+ MDCR_EL2_HPMD) |
+ ((read_pmcr_el0() & PMCR_EL0_N_BITS)
+ >> PMCR_EL0_N_SHIFT)) &
+ ~(MDCR_EL2_TTRF | MDCR_EL2_TPMS |
+ MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
+ MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
+ MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT |
+ MDCR_EL2_TPMCR_BIT);
write_mdcr_el2(mdcr_el2);
diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c
index be8becee..64a48524 100644
--- a/lib/extensions/ras/ras_common.c
+++ b/lib/extensions/ras/ras_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -83,7 +83,8 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags,
{
struct ras_interrupt *ras_inrs = ras_interrupt_mappings.intrs;
struct ras_interrupt *selected = NULL;
- int start, end, mid, probe_data, ret __unused;
+ int probe_data = 0;
+ int start, end, mid, ret __unused;
const struct err_handler_data err_data = {
.version = ERR_HANDLER_VERSION,
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 5d24356c..022c8775 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -198,21 +198,17 @@ static unsigned int get_power_on_target_pwrlvl(void)
/******************************************************************************
* Helper function to update the requested local power state array. This array
* does not store the requested state for the CPU power level. Hence an
- * assertion is added to prevent us from accessing the wrong index.
+ * assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
unsigned int cpu_idx,
plat_local_state_t req_pwr_state)
{
- /*
- * This should never happen, we have this here to avoid
- * "array subscript is above array bounds" errors in GCC.
- */
assert(pwrlvl > PSCI_CPU_PWR_LVL);
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Warray-bounds"
- psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
-#pragma GCC diagnostic pop
+ if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
+ (cpu_idx < PLATFORM_CORE_COUNT)) {
+ psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
+ }
}
/******************************************************************************
@@ -245,7 +241,11 @@ static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
{
assert(pwrlvl > PSCI_CPU_PWR_LVL);
- return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
+ if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
+ (cpu_idx < PLATFORM_CORE_COUNT)) {
+ return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
+ } else
+ return NULL;
}
/*
diff --git a/lib/xlat_tables/aarch32/nonlpae_tables.c b/lib/xlat_tables/aarch32/nonlpae_tables.c
index e31f9d84..bd6b152e 100644
--- a/lib/xlat_tables/aarch32/nonlpae_tables.c
+++ b/lib/xlat_tables/aarch32/nonlpae_tables.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2017, Linaro Limited. All rights reserved.
- * Copyright (c) 2014-2017, Arm Limited. All rights reserved.
+ * Copyright (c) 2014-2019, Arm Limited. All rights reserved.
* Copyright (c) 2014, STMicroelectronics International N.V.
* All rights reserved.
*
@@ -445,7 +445,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
} else {
xlat_table = (unsigned long)mmu_l2_base +
next_xlat * MMU32B_L2_TABLE_SIZE;
- assert(++next_xlat <= MAX_XLAT_TABLES);
+ next_xlat++;
+ assert(next_xlat <= MAX_XLAT_TABLES);
memset((char *)xlat_table, 0,
MMU32B_L2_TABLE_SIZE);