summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authordavidcunado-arm <david.cunado@arm.com>2017-11-23 23:41:24 +0000
committerGitHub <noreply@github.com>2017-11-23 23:41:24 +0000
commit71f8a6a9b0cdfcc773542844e1fcb89ba93bcbf5 (patch)
tree19c1299ef5fc877d4982f14f881f49cfe35a05c2 /lib
parent1c64838d4b0e061f22b1b31b8fd64ea4510e1ded (diff)
parent1d791530d0f3a4a02e285a38f35fecac4feec70c (diff)
Merge pull request #1145 from etienne-lms/rfc-armv7-2
Support ARMv7 architectures
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/arm32_aeabi_divmod.c203
-rw-r--r--lib/aarch32/arm32_aeabi_divmod_a32.S30
-rw-r--r--lib/cpus/aarch32/cortex_a12.S75
-rw-r--r--lib/cpus/aarch32/cortex_a15.S81
-rw-r--r--lib/cpus/aarch32/cortex_a17.S75
-rw-r--r--lib/cpus/aarch32/cortex_a5.S75
-rw-r--r--lib/cpus/aarch32/cortex_a7.S75
-rw-r--r--lib/cpus/aarch32/cortex_a9.S75
-rw-r--r--lib/locks/exclusive/aarch32/spinlock.S12
-rw-r--r--lib/psci/psci_setup.c2
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c4
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c4
12 files changed, 711 insertions, 0 deletions
diff --git a/lib/aarch32/arm32_aeabi_divmod.c b/lib/aarch32/arm32_aeabi_divmod.c
new file mode 100644
index 00000000..a8f2e742
--- /dev/null
+++ b/lib/aarch32/arm32_aeabi_divmod.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Form ABI specifications:
+ * int __aeabi_idiv(int numerator, int denominator);
+ * unsigned __aeabi_uidiv(unsigned numerator, unsigned denominator);
+ *
+ * typedef struct { int quot; int rem; } idiv_return;
+ * typedef struct { unsigned quot; unsigned rem; } uidiv_return;
+ *
+ * __value_in_regs idiv_return __aeabi_idivmod(int numerator,
+ * int *denominator);
+ * __value_in_regs uidiv_return __aeabi_uidivmod(unsigned *numerator,
+ * unsigned denominator);
+ */
+
+/* struct qr - stores qutient/remainder to handle divmod EABI interfaces. */
+struct qr {
+ unsigned int q; /* computed quotient */
+ unsigned int r; /* computed remainder */
+ unsigned int q_n; /* specficies if quotient shall be negative */
+ unsigned int r_n; /* specficies if remainder shall be negative */
+};
+
+static void uint_div_qr(unsigned int numerator, unsigned int denominator,
+ struct qr *qr);
+
+/* returns in R0 and R1 by tail calling an asm function */
+unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator);
+
+unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator);
+unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator);
+
+/* returns in R0 and R1 by tail calling an asm function */
+signed int __aeabi_idivmod(signed int numerator, signed int denominator);
+
+signed int __aeabi_idiv(signed int numerator, signed int denominator);
+signed int __aeabi_imod(signed int numerator, signed int denominator);
+
+/*
+ * __ste_idivmod_ret_t __aeabi_idivmod(signed numerator, signed denominator)
+ * Numerator and Denominator are received in R0 and R1.
+ * Where __ste_idivmod_ret_t is returned in R0 and R1.
+ *
+ * __ste_uidivmod_ret_t __aeabi_uidivmod(unsigned numerator,
+ * unsigned denominator)
+ * Numerator and Denominator are received in R0 and R1.
+ * Where __ste_uidivmod_ret_t is returned in R0 and R1.
+ */
+#ifdef __GNUC__
+signed int ret_idivmod_values(signed int quotient, signed int remainder);
+unsigned int ret_uidivmod_values(unsigned int quotient, unsigned int remainder);
+#else
+#error "Compiler not supported"
+#endif
+
+static void division_qr(unsigned int n, unsigned int p, struct qr *qr)
+{
+ unsigned int i = 1, q = 0;
+
+ if (p == 0) {
+ qr->r = 0xFFFFFFFF; /* division by 0 */
+ return;
+ }
+
+ while ((p >> 31) == 0) {
+ i = i << 1; /* count the max division steps */
+ p = p << 1; /* increase p until it has maximum size*/
+ }
+
+ while (i > 0) {
+ q = q << 1; /* write bit in q at index (size-1) */
+ if (n >= p) {
+ n -= p;
+ q++;
+ }
+ p = p >> 1; /* decrease p */
+ i = i >> 1; /* decrease remaining size in q */
+ }
+ qr->r = n;
+ qr->q = q;
+}
+
+static void uint_div_qr(unsigned int numerator, unsigned int denominator,
+ struct qr *qr)
+{
+ division_qr(numerator, denominator, qr);
+
+ /* negate quotient and/or remainder according to requester */
+ if (qr->q_n)
+ qr->q = -qr->q;
+ if (qr->r_n)
+ qr->r = -qr->r;
+}
+
+unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.q;
+}
+
+unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.r;
+}
+
+unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return ret_uidivmod_values(qr.q, qr.r);
+}
+
+signed int __aeabi_idiv(signed int numerator, signed int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.q;
+}
+
+signed int __aeabi_imod(signed int numerator, signed int denominator)
+{
+ signed int s;
+ signed int i;
+ signed int j;
+ signed int h;
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ /* in case modulo of a power of 2 */
+ for (i = 0, j = 0, h = 0, s = denominator; (s != 0) || (h > 1); i++) {
+ if (s & 1) {
+ j = i;
+ h++;
+ }
+ s = s >> 1;
+ }
+ if (h == 1)
+ return numerator >> j;
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.r;
+}
+
+signed int __aeabi_idivmod(signed int numerator, signed int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return ret_idivmod_values(qr.q, qr.r);
+}
diff --git a/lib/aarch32/arm32_aeabi_divmod_a32.S b/lib/aarch32/arm32_aeabi_divmod_a32.S
new file mode 100644
index 00000000..6915dcd8
--- /dev/null
+++ b/lib/aarch32/arm32_aeabi_divmod_a32.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * EABI wrappers from the udivmod and idivmod functions
+ */
+
+ .globl ret_uidivmod_values
+ .globl ret_idivmod_values
+
+/*
+ * signed ret_idivmod_values(signed quot, signed rem);
+ * return quotient and remaining the EABI way (regs r0,r1)
+ */
+func ret_idivmod_values
+ bx lr
+endfunc ret_idivmod_values
+
+/*
+ * unsigned ret_uidivmod_values(unsigned quot, unsigned rem);
+ * return quotient and remaining the EABI way (regs r0,r1)
+ */
+func ret_uidivmod_values
+ bx lr
+endfunc ret_uidivmod_values
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
new file mode 100644
index 00000000..73c97507
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a12.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a12_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a12_disable_smp
+
+func cortex_a12_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a12_enable_smp
+
+func cortex_a12_reset_func
+ b cortex_a12_enable_smp
+endfunc cortex_a12_reset_func
+
+func cortex_a12_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_core_pwr_dwn
+
+func cortex_a12_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
+ cortex_a12_reset_func, \
+ cortex_a12_core_pwr_dwn, \
+ cortex_a12_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
new file mode 100644
index 00000000..0d5a1165
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a15.h>
+#include <cpu_macros.S>
+
+/*
+ * Cortex-A15 support LPAE and Virtualization Extensions.
+ * Don't care if confiugration uses or not LPAE and VE.
+ * Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE
+ */
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a15_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a15_disable_smp
+
+func cortex_a15_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a15_enable_smp
+
+func cortex_a15_reset_func
+ b cortex_a15_enable_smp
+endfunc cortex_a15_reset_func
+
+func cortex_a15_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_core_pwr_dwn
+
+func cortex_a15_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
+ cortex_a15_reset_func, \
+ cortex_a15_core_pwr_dwn, \
+ cortex_a15_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
new file mode 100644
index 00000000..316d4f05
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a17.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a17_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a17_disable_smp
+
+func cortex_a17_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a17_enable_smp
+
+func cortex_a17_reset_func
+ b cortex_a17_enable_smp
+endfunc cortex_a17_reset_func
+
+func cortex_a17_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_core_pwr_dwn
+
+func cortex_a17_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \
+ cortex_a17_reset_func, \
+ cortex_a17_core_pwr_dwn, \
+ cortex_a17_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
new file mode 100644
index 00000000..c07c13ea
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a5.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a5_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a5_disable_smp
+
+func cortex_a5_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a5_enable_smp
+
+func cortex_a5_reset_func
+ b cortex_a5_enable_smp
+endfunc cortex_a5_reset_func
+
+func cortex_a5_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_core_pwr_dwn
+
+func cortex_a5_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
+ cortex_a5_reset_func, \
+ cortex_a5_core_pwr_dwn, \
+ cortex_a5_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
new file mode 100644
index 00000000..0278d1fd
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a7.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a7_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a7_disable_smp
+
+func cortex_a7_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a7_enable_smp
+
+func cortex_a7_reset_func
+ b cortex_a7_enable_smp
+endfunc cortex_a7_reset_func
+
+func cortex_a7_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_core_pwr_dwn
+
+func cortex_a7_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
+ cortex_a7_reset_func, \
+ cortex_a7_core_pwr_dwn, \
+ cortex_a7_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
new file mode 100644
index 00000000..4f30f84a
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a9.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a9_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a9_disable_smp
+
+func cortex_a9_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a9_enable_smp
+
+func cortex_a9_reset_func
+ b cortex_a9_enable_smp
+endfunc cortex_a9_reset_func
+
+func cortex_a9_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_core_pwr_dwn
+
+func cortex_a9_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \
+ cortex_a9_reset_func, \
+ cortex_a9_core_pwr_dwn, \
+ cortex_a9_cluster_pwr_dwn
diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S
index bc77bc9c..9492cc08 100644
--- a/lib/locks/exclusive/aarch32/spinlock.S
+++ b/lib/locks/exclusive/aarch32/spinlock.S
@@ -9,6 +9,17 @@
.globl spin_lock
.globl spin_unlock
+#if ARM_ARCH_AT_LEAST(8, 0)
+/*
+ * According to the ARMv8-A Architecture Reference Manual, "when the global
+ * monitor for a PE changes from Exclusive Access state to Open Access state,
+ * an event is generated.". This applies to both AArch32 and AArch64 modes of
+ * ARMv8-A. As a result, no explicit SEV with unlock is required.
+ */
+#define COND_SEV()
+#else
+#define COND_SEV() sev
+#endif
func spin_lock
mov r2, #1
@@ -27,5 +38,6 @@ endfunc spin_lock
func spin_unlock
mov r1, #0
stl r1, [r0]
+ COND_SEV()
bx lr
endfunc spin_unlock
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index a841ddab..c00bd94a 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -266,8 +266,10 @@ int psci_setup(const psci_lib_args_t *lib_args)
******************************************************************************/
void psci_arch_setup(void)
{
+#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
+#endif
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index c7e34f20..720d4461 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -13,6 +13,10 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
#define XLAT_TABLE_LEVEL_BASE \
GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 642f799a..fc7ca46a 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -14,6 +14,10 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
#if ENABLE_ASSERTIONS
unsigned long long xlat_arch_get_max_supported_pa(void)
{