summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bl31/aarch64/runtime_exceptions.S20
-rw-r--r--bl31/bl31.mk8
-rw-r--r--docs/cpu-specific-build-macros.rst10
-rw-r--r--docs/firmware-design.rst8
-rw-r--r--docs/porting-guide.rst19
-rw-r--r--docs/user-guide.rst18
-rw-r--r--drivers/synopsys/emmc/dw_mmc.c2
-rw-r--r--include/bl32/payloads/tlk.h3
-rw-r--r--include/common/aarch64/el3_common_macros.S20
-rw-r--r--include/lib/aarch32/arch_helpers.h5
-rw-r--r--include/lib/aarch64/arch.h49
-rw-r--r--include/lib/aarch64/arch_helpers.h1
-rw-r--r--include/lib/cpus/aarch64/cortex_a75.h12
-rw-r--r--include/lib/el3_runtime/aarch64/context.h26
-rw-r--r--include/lib/el3_runtime/pubsub_events.h7
-rw-r--r--include/lib/extensions/amu.h32
-rw-r--r--include/lib/extensions/amu_private.h19
-rw-r--r--include/lib/utils.h2
-rw-r--r--include/plat/arm/board/common/board_arm_def.h2
-rw-r--r--include/services/mm_svc.h31
-rw-r--r--include/services/spm_svc.h24
-rw-r--r--lib/cpus/aarch64/cortex_a57.S5
-rw-r--r--lib/cpus/aarch64/cortex_a72.S6
-rw-r--r--lib/cpus/aarch64/cortex_a73.S5
-rw-r--r--lib/cpus/aarch64/cortex_a75.S113
-rw-r--r--lib/cpus/aarch64/cortex_a75_pubsub.c75
-rw-r--r--lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S372
-rw-r--r--lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S114
-rw-r--r--lib/cpus/cpu-ops.mk5
-rw-r--r--lib/extensions/amu/aarch32/amu.c106
-rw-r--r--lib/extensions/amu/aarch64/amu.c189
-rw-r--r--lib/extensions/amu/aarch64/amu_helpers.S281
-rw-r--r--lib/psci/psci_suspend.c5
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c4
-rw-r--r--make_helpers/build_macros.mk12
-rw-r--r--plat/arm/board/fvp/platform.mk4
-rw-r--r--plat/hisilicon/hikey/platform.mk2
-rw-r--r--plat/hisilicon/hikey960/aarch64/hikey960_helpers.S31
-rw-r--r--plat/hisilicon/hikey960/hikey960_bl1_setup.c7
-rw-r--r--plat/hisilicon/hikey960/hikey960_pm.c91
-rw-r--r--plat/hisilicon/hikey960/include/hi3660.h2
-rw-r--r--plat/hisilicon/hikey960/include/platform_def.h6
-rw-r--r--plat/hisilicon/hikey960/platform.mk2
-rw-r--r--plat/socionext/uniphier/platform.mk2
-rw-r--r--plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c129
-rw-r--r--plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h39
-rw-r--r--plat/xilinx/zynqmp/platform.mk7
-rw-r--r--plat/xilinx/zynqmp/pm_service/pm_api_sys.c5
-rw-r--r--plat/xilinx/zynqmp/pm_service/pm_common.h8
-rw-r--r--plat/xilinx/zynqmp/pm_service/pm_ipi.c88
-rw-r--r--plat/xilinx/zynqmp/pm_service/pm_ipi.h9
-rw-r--r--plat/xilinx/zynqmp/pm_service/pm_svc_main.c17
-rw-r--r--plat/xilinx/zynqmp/sip_svc_setup.c14
-rw-r--r--plat/xilinx/zynqmp/zynqmp_ipi.c283
-rw-r--r--plat/xilinx/zynqmp/zynqmp_ipi.h70
-rw-r--r--services/spd/tlkd/tlkd_main.c10
-rw-r--r--services/std_svc/spm/spm_main.c32
-rw-r--r--services/std_svc/spm/spm_private.h3
-rw-r--r--tools/fiptool/fiptool.c17
59 files changed, 2182 insertions, 306 deletions
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index d8fbb9b2..9b7735f1 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -14,6 +14,26 @@
.globl runtime_exceptions
+ .globl sync_exception_sp_el0
+ .globl irq_sp_el0
+ .globl fiq_sp_el0
+ .globl serror_sp_el0
+
+ .globl sync_exception_sp_elx
+ .globl irq_sp_elx
+ .globl fiq_sp_elx
+ .globl serror_sp_elx
+
+ .globl sync_exception_aarch64
+ .globl irq_aarch64
+ .globl fiq_aarch64
+ .globl serror_aarch64
+
+ .globl sync_exception_aarch32
+ .globl irq_aarch32
+ .globl fiq_aarch32
+ .globl serror_aarch32
+
/* ---------------------------------------------------------------------
* This macro handles Synchronous exceptions.
* Only SMC exceptions are supported.
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index fdcc9313..2db48564 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -51,13 +51,19 @@ BL31_SOURCES += lib/extensions/spe/spe.c
endif
ifeq (${ENABLE_AMU},1)
-BL31_SOURCES += lib/extensions/amu/aarch64/amu.c
+BL31_SOURCES += lib/extensions/amu/aarch64/amu.c \
+ lib/extensions/amu/aarch64/amu_helpers.S
endif
ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES += lib/extensions/sve/sve.c
endif
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S \
+ lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
+endif
+
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index f74b4593..014817d3 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -11,6 +11,15 @@ This document describes the various build options present in the CPU specific
operations framework to enable errata workarounds and to enable optimizations
for a specific CPU on a platform.
+Security Vulnerability Workarounds
+----------------------------------
+
+ARM Trusted Firmware exports a series of build flags which control which
+security vulnerability workarounds should be applied at runtime.
+
+- ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for
+ `CVE-2017-5715`_. Defaults to 1.
+
CPU Errata Workarounds
----------------------
@@ -142,6 +151,7 @@ architecture that can be enabled by the platform as desired.
*Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
+.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715
.. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
.. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
.. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 27ee38b8..1f8fcc86 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -1925,9 +1925,11 @@ Firmware Image Package layout
The FIP layout consists of a table of contents (ToC) followed by payload data.
The ToC itself has a header followed by one or more table entries. The ToC is
-terminated by an end marker entry. All ToC entries describe some payload data
-that has been appended to the end of the binary package. With the information
-provided in the ToC entry the corresponding payload data can be retrieved.
+terminated by an end marker entry, and since the size of the ToC is 0 bytes,
+the offset equals the total size of the FIP file. All ToC entries describe some
+payload data that has been appended to the end of the binary package. With the
+information provided in the ToC entry the corresponding payload data can be
+retrieved.
::
diff --git a/docs/porting-guide.rst b/docs/porting-guide.rst
index 0eb4ac3a..84bd2cd4 100644
--- a/docs/porting-guide.rst
+++ b/docs/porting-guide.rst
@@ -549,6 +549,22 @@ behaviour of the ``assert()`` function (for example, to save memory).
doesn't print anything to the console. If ``PLAT_LOG_LEVEL_ASSERT`` isn't
defined, it defaults to ``LOG_LEVEL``.
+If the platform port uses the Activity Monitor Unit, the following constants
+may be defined:
+
+- **PLAT\_AMU\_GROUP1\_COUNTERS\_MASK**
+ This mask reflects the set of group counters that should be enabled. The
+ maximum number of group 1 counters supported by AMUv1 is 16 so the mask
+ can be at most 0xffff. If the platform does not define this mask, no group 1
+ counters are enabled. If the platform defines this mask, the following
+ constant needs to also be defined.
+
+- **PLAT\_AMU\_GROUP1\_NR\_COUNTERS**
+ This value is used to allocate an array to save and restore the counters
+ specified by ``PLAT_AMU_GROUP1_COUNTERS_MASK`` on CPU suspend.
+ This value should be equal to the highest bit position set in the
+ mask, plus 1. The maximum number of group 1 counters in AMUv1 is 16.
+
File : plat\_macros.S [mandatory]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1128,6 +1144,9 @@ This function executes with the MMU and data caches enabled. It is responsible
for performing any remaining platform-specific setup that can occur after the
MMU and data cache have been enabled.
+if support for multiple boot sources is required, it initializes the boot
+sequence used by plat\_try\_next\_boot\_source().
+
In ARM standard platforms, this function initializes the storage abstraction
layer used to load the next bootloader image.
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index 274faa81..ed5ba184 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -55,7 +55,7 @@ command:
sudo apt-get install build-essential gcc make git libssl-dev
-ARM TF has been tested with `Linaro Release 17.04`_.
+ARM TF has been tested with `Linaro Release 17.10`_.
Download and install the AArch32 or AArch64 little-endian GCC cross compiler.
The `Linaro Release Notes`_ documents which version of the compiler to use for a
@@ -1009,7 +1009,7 @@ images with support for these features:
modules by checking out a recent version of the `mbed TLS Repository`_. It
is important to use a version that is compatible with TF and fixes any
known security vulnerabilities. See `mbed TLS Security Center`_ for more
- information. The latest version of TF is tested with tag ``mbedtls-2.4.2``.
+ information. The latest version of TF is tested with tag ``mbedtls-2.6.0``.
The ``drivers/auth/mbedtls/mbedtls_*.mk`` files contain the list of mbed TLS
source files the modules depend upon.
@@ -1478,10 +1478,10 @@ Running the software on FVP
The latest version of the AArch64 build of ARM Trusted Firmware has been tested
on the following ARM FVPs (64-bit host machine only).
-NOTE: Unless otherwise stated, the model version is Version 11.1 Build 11.1.22.
+NOTE: Unless otherwise stated, the model version is Version 11.2 Build 11.2.33.
- ``Foundation_Platform``
-- ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.7, Build 0.8.8702)
+- ``FVP_Base_AEMv8A-AEMv8A`` (Version 9.0, Build 0.8.9005)
- ``FVP_Base_Cortex-A35x4``
- ``FVP_Base_Cortex-A53x4``
- ``FVP_Base_Cortex-A57x4-A53x4``
@@ -1494,7 +1494,7 @@ NOTE: Unless otherwise stated, the model version is Version 11.1 Build 11.1.22.
The latest version of the AArch32 build of ARM Trusted Firmware has been tested
on the following ARM FVPs (64-bit host machine only).
-- ``FVP_Base_AEMv8A-AEMv8A`` (Version 8.7, Build 0.8.8702)
+- ``FVP_Base_AEMv8A-AEMv8A`` (Version 9.0, Build 0.8.9005)
- ``FVP_Base_Cortex-A32x4``
NOTE: The build numbers quoted above are those reported by launching the FVP
@@ -1871,10 +1871,10 @@ wakeup interrupt from RTC.
.. _Linaro: `Linaro Release Notes`_
.. _Linaro Release: `Linaro Release Notes`_
-.. _Linaro Release Notes: https://community.arm.com/tools/dev-platforms/b/documents/posts/linaro-release-notes-deprecated
-.. _Linaro Release 17.04: https://community.arm.com/tools/dev-platforms/b/documents/posts/linaro-release-notes-deprecated#LinaroRelease17.04
-.. _Linaro instructions: https://community.arm.com/dev-platforms/b/documents/posts/instructions-for-using-the-linaro-software-deliverables
-.. _Instructions for using Linaro's deliverables on Juno: https://community.arm.com/dev-platforms/b/documents/posts/using-linaros-deliverables-on-juno
+.. _Linaro Release Notes: https://community.arm.com/dev-platforms/w/docs/226/old-linaro-release-notes
+.. _Linaro Release 17.10: https://community.arm.com/dev-platforms/w/docs/226/old-linaro-release-notes#1710
+.. _Linaro instructions: https://community.arm.com/dev-platforms/w/docs/304/linaro-software-deliverables
+.. _Instructions for using Linaro's deliverables on Juno: https://community.arm.com/dev-platforms/w/docs/303/juno
.. _ARM Platforms Portal: https://community.arm.com/dev-platforms/
.. _Development Studio 5 (DS-5): http://www.arm.com/products/tools/software-tools/ds-5/index.php
.. _Dia: https://wiki.gnome.org/Apps/Dia/Download
diff --git a/drivers/synopsys/emmc/dw_mmc.c b/drivers/synopsys/emmc/dw_mmc.c
index e6904d14..701e6d53 100644
--- a/drivers/synopsys/emmc/dw_mmc.c
+++ b/drivers/synopsys/emmc/dw_mmc.c
@@ -146,7 +146,7 @@ static void dw_update_clk(void)
if ((data & CMD_START) == 0)
break;
data = mmio_read_32(dw_params.reg_base + DWMMC_RINTSTS);
- assert(data & INT_HLE);
+ assert((data & INT_HLE) == 0);
}
}
diff --git a/include/bl32/payloads/tlk.h b/include/bl32/payloads/tlk.h
index 4e06bcd9..941b6cc0 100644
--- a/include/bl32/payloads/tlk.h
+++ b/include/bl32/payloads/tlk.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -20,6 +20,7 @@
*/
#define TLK_REGISTER_LOGBUF TLK_TOS_YIELD_FID(0x1)
#define TLK_REGISTER_REQBUF TLK_TOS_YIELD_FID(0x2)
+#define TLK_REGISTER_NS_DRAM TLK_TOS_YIELD_FID(0x4)
#define TLK_RESUME_FID TLK_TOS_YIELD_FID(0x100)
#define TLK_SYSTEM_SUSPEND TLK_TOS_YIELD_FID(0xE001)
#define TLK_SYSTEM_RESUME TLK_TOS_YIELD_FID(0xE002)
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index ebaf7aa1..4ebf77bb 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -13,7 +13,7 @@
/*
* Helper macro to initialise EL3 registers we care about.
*/
- .macro el3_arch_init_common _exception_vectors
+ .macro el3_arch_init_common
/* ---------------------------------------------------------------------
* SCTLR_EL3 has already been initialised - read current value before
* modifying.
@@ -50,14 +50,6 @@
#endif /* IMAGE_BL31 */
/* ---------------------------------------------------------------------
- * Set the exception vectors.
- * ---------------------------------------------------------------------
- */
- adr x0, \_exception_vectors
- msr vbar_el3, x0
- isb
-
- /* ---------------------------------------------------------------------
* Initialise SCR_EL3, setting all fields rather than relying on hw.
* All fields are architecturally UNKNOWN on reset. The following fields
* do not change during the TF lifetime. The remaining fields are set to
@@ -221,6 +213,14 @@
.endif /* _warm_boot_mailbox */
/* ---------------------------------------------------------------------
+ * Set the exception vectors.
+ * ---------------------------------------------------------------------
+ */
+ adr x0, \_exception_vectors
+ msr vbar_el3, x0
+ isb
+
+ /* ---------------------------------------------------------------------
* It is a cold boot.
* Perform any processor specific actions upon reset e.g. cache, TLB
* invalidations etc.
@@ -228,7 +228,7 @@
*/
bl reset_handler
- el3_arch_init_common \_exception_vectors
+ el3_arch_init_common
.if \_secondary_cold_boot
/* -------------------------------------------------------------
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index 0230195a..beae5d06 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -287,6 +287,11 @@ DEFINE_COPROCR_RW_FUNCS(amcntenset1, AMCNTENSET1)
DEFINE_COPROCR_RW_FUNCS(amcntenclr0, AMCNTENCLR0)
DEFINE_COPROCR_RW_FUNCS(amcntenclr1, AMCNTENCLR1)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr00, AMEVCNTR00)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr01, AMEVCNTR01)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr02, AMEVCNTR02)
+DEFINE_COPROCR_RW_FUNCS_64(amevcntr03, AMEVCNTR03)
+
/*
* TLBI operation prototypes
*/
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index 96e2d5fe..91aa484f 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -117,6 +117,9 @@
#define ID_AA64PFR0_SVE_SHIFT U(32)
#define ID_AA64PFR0_SVE_MASK U(0xf)
#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK U(0xf)
+#define ID_AA64PFR0_CSV2_LENGTH U(4)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
#define ID_AA64DFR0_PMS_SHIFT U(32)
@@ -337,6 +340,11 @@
#define SPSR_T_ARM U(0x0)
#define SPSR_T_THUMB U(0x1)
+#define SPSR_M_SHIFT U(4)
+#define SPSR_M_MASK U(0x1)
+#define SPSR_M_AARCH64 U(0x0)
+#define SPSR_M_AARCH32 U(0x1)
+
#define DISABLE_ALL_EXCEPTIONS \
(DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT)
@@ -656,4 +664,45 @@
#define AMEVTYPER02_EL0 S3_3_C13_C6_2
#define AMEVTYPER03_EL0 S3_3_C13_C6_3
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10_EL0 S3_3_C13_C12_0
+#define AMEVCNTR11_EL0 S3_3_C13_C12_1
+#define AMEVCNTR12_EL0 S3_3_C13_C12_2
+#define AMEVCNTR13_EL0 S3_3_C13_C12_3
+#define AMEVCNTR14_EL0 S3_3_C13_C12_4
+#define AMEVCNTR15_EL0 S3_3_C13_C12_5
+#define AMEVCNTR16_EL0 S3_3_C13_C12_6
+#define AMEVCNTR17_EL0 S3_3_C13_C12_7
+#define AMEVCNTR18_EL0 S3_3_C13_C13_0
+#define AMEVCNTR19_EL0 S3_3_C13_C13_1
+#define AMEVCNTR1A_EL0 S3_3_C13_C13_2
+#define AMEVCNTR1B_EL0 S3_3_C13_C13_3
+#define AMEVCNTR1C_EL0 S3_3_C13_C13_4
+#define AMEVCNTR1D_EL0 S3_3_C13_C13_5
+#define AMEVCNTR1E_EL0 S3_3_C13_C13_6
+#define AMEVCNTR1F_EL0 S3_3_C13_C13_7
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10_EL0 S3_3_C13_C14_0
+#define AMEVTYPER11_EL0 S3_3_C13_C14_1
+#define AMEVTYPER12_EL0 S3_3_C13_C14_2
+#define AMEVTYPER13_EL0 S3_3_C13_C14_3
+#define AMEVTYPER14_EL0 S3_3_C13_C14_4
+#define AMEVTYPER15_EL0 S3_3_C13_C14_5
+#define AMEVTYPER16_EL0 S3_3_C13_C14_6
+#define AMEVTYPER17_EL0 S3_3_C13_C14_7
+#define AMEVTYPER18_EL0 S3_3_C13_C15_0
+#define AMEVTYPER19_EL0 S3_3_C13_C15_1
+#define AMEVTYPER1A_EL0 S3_3_C13_C15_2
+#define AMEVTYPER1B_EL0 S3_3_C13_C15_3
+#define AMEVTYPER1C_EL0 S3_3_C13_C15_4
+#define AMEVTYPER1D_EL0 S3_3_C13_C15_5
+#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
+#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
+
+/* AMCGCR_EL0 definitions */
+#define AMCGCR_EL0_CG1NC_SHIFT U(8)
+#define AMCGCR_EL0_CG1NC_LENGTH U(8)
+#define AMCGCR_EL0_CG1NC_MASK U(0xff)
+
#endif /* __ARCH_H__ */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 831dfb06..485ed432 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -322,6 +322,7 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir0_el1, ICC_EOIR0_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr0_el0, AMCNTENCLR0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
index d68c9572..940125da 100644
--- a/include/lib/cpus/aarch64/cortex_a75.h
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -50,7 +50,19 @@
* CPUAMEVTYPER<n> register and are disabled by default. Platforms may
* enable this with suitable programming.
*/
+#define CORTEX_A75_AMU_NR_COUNTERS 5
#define CORTEX_A75_AMU_GROUP0_MASK 0x7
#define CORTEX_A75_AMU_GROUP1_MASK (0 << 3)
+#ifndef __ASSEMBLY__
+#include <stdint.h>
+
+uint64_t cortex_a75_amu_cnt_read(int idx);
+void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+#endif /* __ASSEMBLY__ */
+
#endif /* __CORTEX_A75_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index 58899049..5e212ec3 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -46,12 +46,26 @@
#define CTX_GPREG_SP_EL0 U(0xf8)
#define CTX_GPREGS_END U(0x100)
+#if WORKAROUND_CVE_2017_5715
+#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_CVE_2017_5715_QUAD0 U(0x0)
+#define CTX_CVE_2017_5715_QUAD1 U(0x8)
+#define CTX_CVE_2017_5715_QUAD2 U(0x10)
+#define CTX_CVE_2017_5715_QUAD3 U(0x18)
+#define CTX_CVE_2017_5715_QUAD4 U(0x20)
+#define CTX_CVE_2017_5715_QUAD5 U(0x28)
+#define CTX_CVE_2017_5715_END U(0x30)
+#else
+#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET
+#define CTX_CVE_2017_5715_END CTX_GPREGS_END
+#endif
+
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
-#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END)
#define CTX_SCR_EL3 U(0x0)
#define CTX_RUNTIME_SP U(0x8)
#define CTX_SPSR_EL3 U(0x10)
@@ -186,6 +200,9 @@
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
+#if WORKAROUND_CVE_2017_5715
+#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT)
+#endif
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#if CTX_INCLUDE_FPREGS
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
@@ -201,6 +218,10 @@
*/
DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+#if WORKAROUND_CVE_2017_5715
+DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL);
+#endif
+
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
@@ -242,6 +263,9 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
*/
typedef struct cpu_context {
gp_regs_t gpregs_ctx;
+#if WORKAROUND_CVE_2017_5715
+ cve_2017_5715_regs_t cve_2017_5715_regs_ctx;
+#endif
el3_state_t el3state_ctx;
el1_sys_regs_t sysregs_ctx;
#if CTX_INCLUDE_FPREGS
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
index 9cfedb4d..64b3f630 100644
--- a/include/lib/el3_runtime/pubsub_events.h
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -17,6 +17,13 @@
*/
REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+/*
+ * These events are published before/after a CPU has been powered down/up
+ * via the PSCI CPU SUSPEND API.
+ */
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
+
#ifdef AARCH64
/*
* These events are published by the AArch64 context management framework
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
index bbefe8ff..faa0ee12 100644
--- a/include/lib/extensions/amu.h
+++ b/include/lib/extensions/amu.h
@@ -7,9 +7,39 @@
#ifndef __AMU_H__
#define __AMU_H__
-/* Enable all group 0 counters */
+#include <sys/cdefs.h> /* for CASSERT() */
+#include <cassert.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/* All group 0 counters */
#define AMU_GROUP0_COUNTERS_MASK 0xf
+#ifdef PLAT_AMU_GROUP1_COUNTERS_MASK
+#define AMU_GROUP1_COUNTERS_MASK PLAT_AMU_GROUP1_COUNTERS_MASK
+#else
+#define AMU_GROUP1_COUNTERS_MASK 0
+#endif
+
+#ifdef PLAT_AMU_GROUP1_NR_COUNTERS
+#define AMU_GROUP1_NR_COUNTERS PLAT_AMU_GROUP1_NR_COUNTERS
+#else
+#define AMU_GROUP1_NR_COUNTERS 0
+#endif
+
+CASSERT(AMU_GROUP1_COUNTERS_MASK <= 0xffff, invalid_amu_group1_counters_mask);
+CASSERT(AMU_GROUP1_NR_COUNTERS <= 16, invalid_amu_group1_nr_counters);
+
+int amu_supported(void);
void amu_enable(int el2_unused);
+/* Group 0 configuration helpers */
+uint64_t amu_group0_cnt_read(int idx);
+void amu_group0_cnt_write(int idx, uint64_t val);
+
+/* Group 1 configuration helpers */
+uint64_t amu_group1_cnt_read(int idx);
+void amu_group1_cnt_write(int idx, uint64_t val);
+void amu_group1_set_evtype(int idx, unsigned int val);
+
#endif /* __AMU_H__ */
diff --git a/include/lib/extensions/amu_private.h b/include/lib/extensions/amu_private.h
new file mode 100644
index 00000000..0c660bb8
--- /dev/null
+++ b/include/lib/extensions/amu_private.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __AMU_PRIVATE_H__
+#define __AMU_PRIVATE_H__
+
+#include <stdint.h>
+
+uint64_t amu_group0_cnt_read_internal(int idx);
+void amu_group0_cnt_write_internal(int idx, uint64_t);
+
+uint64_t amu_group1_cnt_read_internal(int idx);
+void amu_group1_cnt_write_internal(int idx, uint64_t);
+void amu_group1_set_evtype_internal(int idx, unsigned int val);
+
+#endif /* __AMU_PRIVATE_H__ */
diff --git a/include/lib/utils.h b/include/lib/utils.h
index cfc83022..3d215c32 100644
--- a/include/lib/utils.h
+++ b/include/lib/utils.h
@@ -19,7 +19,7 @@
#include <types.h>
-typedef struct mem_region_t {
+typedef struct mem_region {
uintptr_t base;
size_t nbytes;
} mem_region_t;
diff --git a/include/plat/arm/board/common/board_arm_def.h b/include/plat/arm/board/common/board_arm_def.h
index e0c3c868..888629e3 100644
--- a/include/plat/arm/board/common/board_arm_def.h
+++ b/include/plat/arm/board/common/board_arm_def.h
@@ -90,7 +90,7 @@
* PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
* little space for growth.
*/
-#define PLAT_ARM_MAX_BL31_SIZE 0x1E000
+#define PLAT_ARM_MAX_BL31_SIZE 0x20000
#ifdef AARCH32
/*
diff --git a/include/services/mm_svc.h b/include/services/mm_svc.h
new file mode 100644
index 00000000..7a8a3eba
--- /dev/null
+++ b/include/services/mm_svc.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __MM_SVC_H__
+#define __MM_SVC_H__
+
+#include <utils_def.h>
+
+#define MM_VERSION_MAJOR U(1)
+#define MM_VERSION_MAJOR_SHIFT 16
+#define MM_VERSION_MAJOR_MASK U(0x7FFF)
+#define MM_VERSION_MINOR U(0)
+#define MM_VERSION_MINOR_SHIFT 0
+#define MM_VERSION_MINOR_MASK U(0xFFFF)
+#define MM_VERSION_FORM(major, minor) ((major << MM_VERSION_MAJOR_SHIFT) | (minor))
+#define MM_VERSION_COMPILED MM_VERSION_FORM(MM_VERSION_MAJOR, MM_VERSION_MINOR)
+
+/*
+ * SMC IDs defined in [1] for accessing MM services from the Non-secure world.
+ * These FIDs occupy the range 0x40 - 0x5f.
+ * [1] DEN0060A_ARM_MM_Interface_Specification.pdf
+ */
+#define MM_VERSION_AARCH32 U(0x84000040)
+
+#define MM_COMMUNICATE_AARCH64 U(0xC4000041)
+#define MM_COMMUNICATE_AARCH32 U(0x84000041)
+
+#endif /* __MM_SVC_H__ */
diff --git a/include/services/spm_svc.h b/include/services/spm_svc.h
index 738979eb..8f872c39 100644
--- a/include/services/spm_svc.h
+++ b/include/services/spm_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,15 +10,14 @@
#include <utils_def.h>
#define SPM_VERSION_MAJOR U(0)
+#define SPM_VERSION_MAJOR_SHIFT 16
+#define SPM_VERSION_MAJOR_MASK U(0x7FFF)
#define SPM_VERSION_MINOR U(1)
-#define SPM_VERSION_FORM(major, minor) ((major << 16) | (minor))
+#define SPM_VERSION_MINOR_SHIFT 0
+#define SPM_VERSION_MINOR_MASK U(0xFFFF)
+#define SPM_VERSION_FORM(major, minor) ((major << SPM_VERSION_MAJOR_SHIFT) | (minor))
#define SPM_VERSION_COMPILED SPM_VERSION_FORM(SPM_VERSION_MAJOR, SPM_VERSION_MINOR)
-#define SP_VERSION_MAJOR U(1)
-#define SP_VERSION_MINOR U(0)
-#define SP_VERSION_FORM(major, minor) ((major << 16) | (minor))
-#define SP_VERSION_COMPILED SP_VERSION_FORM(SP_VERSION_MAJOR, SP_VERSION_MINOR)
-
/* The macros below are used to identify SPM calls from the SMC function ID */
#define SPM_FID_MASK U(0xffff)
#define SPM_FID_MIN_VALUE U(0x40)
@@ -31,6 +30,7 @@
* SMC IDs defined for accessing services implemented by the Secure Partition
* Manager from the Secure Partition(s). These services enable a partition to
* handle delegated events and request privileged operations from the manager.
+ * They occupy the range 0x60-0x7f.
*/
#define SPM_VERSION_AARCH32 U(0x84000060)
#define SP_EVENT_COMPLETE_AARCH64 U(0xC4000061)
@@ -51,16 +51,6 @@
#define SP_MEMORY_ATTRIBUTES_EXEC (U(0) << 2)
#define SP_MEMORY_ATTRIBUTES_NON_EXEC (U(1) << 2)
-/*
- * SMC IDs defined in [1] for accessing secure partition services from the
- * Non-secure world. These FIDs occupy the range 0x40 - 0x5f
- * [1] DEN0060A_ARM_MM_Interface_Specification.pdf
- */
-#define SP_VERSION_AARCH64 U(0xC4000040)
-#define SP_VERSION_AARCH32 U(0x84000040)
-
-#define MM_COMMUNICATE_AARCH64 U(0xC4000041)
-#define MM_COMMUNICATE_AARCH32 U(0x84000041)
/* SPM error codes. */
#define SPM_SUCCESS 0
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index a720e984..683be47e 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -383,6 +383,11 @@ func cortex_a57_reset_func
bl errata_a57_859972_wa
#endif
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index b0341256..93821b74 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,12 @@ func cortex_a72_reset_func
mov x0, x18
bl errata_a72_859971_wa
#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_mmu_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index f642816e..c43f07ec 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -36,6 +36,11 @@ func cortex_a73_disable_smp
endfunc cortex_a73_disable_smp
func cortex_a73_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+#endif
+
/* ---------------------------------------------
* Enable the SMP bit.
* Clobbers : x0
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 4cab9e4f..e66ad066 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,7 +11,120 @@
#include <plat_macros.S>
#include <cortex_a75.h>
+ .globl cortex_a75_amu_cnt_read
+ .globl cortex_a75_amu_cnt_write
+ .globl cortex_a75_amu_read_cpuamcntenset_el0
+ .globl cortex_a75_amu_read_cpuamcntenclr_el0
+ .globl cortex_a75_amu_write_cpuamcntenset_el0
+ .globl cortex_a75_amu_write_cpuamcntenclr_el0
+
+/*
+ * uint64_t cortex_a75_amu_cnt_read(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func cortex_a75_amu_cnt_read
+ adr x1, 1f
+ lsl x0, x0, #3
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, CPUAMEVCNTR0_EL0
+ ret
+ mrs x0, CPUAMEVCNTR1_EL0
+ ret
+ mrs x0, CPUAMEVCNTR2_EL0
+ ret
+ mrs x0, CPUAMEVCNTR3_EL0
+ ret
+ mrs x0, CPUAMEVCNTR4_EL0
+ ret
+endfunc cortex_a75_amu_cnt_read
+
+/*
+ * void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func cortex_a75_amu_cnt_write
+ adr x2, 1f
+ lsl x0, x0, #3
+ add x2, x2, x0
+ br x2
+
+1:
+ msr CPUAMEVCNTR0_EL0, x0
+ ret
+ msr CPUAMEVCNTR1_EL0, x0
+ ret
+ msr CPUAMEVCNTR2_EL0, x0
+ ret
+ msr CPUAMEVCNTR3_EL0, x0
+ ret
+ msr CPUAMEVCNTR4_EL0, x0
+ ret
+endfunc cortex_a75_amu_cnt_write
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+ *
+ * Read the `CPUAMCNTENSET_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenset_el0
+ mrs x0, CPUAMCNTENSET_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenset_el0
+
+/*
+ * unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+ *
+ * Read the `CPUAMCNTENCLR_EL0` CPU register and return
+ * it in `x0`.
+ */
+func cortex_a75_amu_read_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_read_cpuamcntenclr_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENSET_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenset_el0
+ msr CPUAMCNTENSET_EL0, x0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenset_el0
+
+/*
+ * void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+ *
+ * Write `mask` to the `CPUAMCNTENCLR_EL0` CPU register.
+ */
+func cortex_a75_amu_write_cpuamcntenclr_el0
+ mrs x0, CPUAMCNTENCLR_EL0
+ ret
+endfunc cortex_a75_amu_write_cpuamcntenclr_el0
+
func cortex_a75_reset_func
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+ mrs x0, id_aa64pfr0_el1
+ ubfx x0, x0, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
+ /*
+ * If the field equals to 1 then branch targets trained in one
+ * context cannot affect speculative execution in a different context.
+ */
+ cmp x0, #1
+ beq 1f
+
+ adr x0, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x0
+1:
+#endif
+
#if ENABLE_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
mrs x0, actlr_el3
diff --git a/lib/cpus/aarch64/cortex_a75_pubsub.c b/lib/cpus/aarch64/cortex_a75_pubsub.c
new file mode 100644
index 00000000..c1089a60
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a75_pubsub.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_a75.h>
+#include <pubsub_events.h>
+#include <platform.h>
+
+struct amu_ctx {
+ uint64_t cnts[CORTEX_A75_AMU_NR_COUNTERS];
+ uint16_t mask;
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+static void *cortex_a75_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ /* Save counter configuration */
+ ctx->mask = cortex_a75_amu_read_cpuamcntenset_el0();
+
+ /* Ensure counters are disabled */
+ cortex_a75_amu_write_cpuamcntenclr_el0(ctx->mask);
+ isb();
+
+ /* Save counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ ctx->cnts[i] = cortex_a75_amu_cnt_read(i);
+
+ return 0;
+}
+
+static void *cortex_a75_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ unsigned int midr;
+ unsigned int midr_mask;
+ int i;
+
+ midr = read_midr();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) |
+ (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ if ((midr & midr_mask) != (CORTEX_A75_MIDR & midr_mask))
+ return 0;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `cortex_a75_context_save()` */
+ assert(cortex_a75_amu_read_cpuamcntenset_el0() == 0);
+
+ /* Restore counters */
+ for (i = 0; i < CORTEX_A75_AMU_NR_COUNTERS; i++)
+ cortex_a75_amu_cnt_write(i, ctx->cnts[i]);
+ isb();
+
+ /* Restore counter configuration */
+ cortex_a75_amu_write_cpuamcntenset_el0(ctx->mask);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_a75_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_a75_context_restore);
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
new file mode 100644
index 00000000..cd29266e
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_bpiall_vbar0_runtime_exceptions
+
+#define EMIT_BPIALL 0xee070fd5
+#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v
+#define EMIT_SMC 0xe1600070
+
+ .macro enter_workaround _stub_name
+ /* Save GP regs */
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ adr x4, \_stub_name
+
+ /*
+ * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have
+ * all interrupts masked in preparation to running the workaround
+ * stub in S-EL1. VBAR_EL3 points to the vector table that
+ * will handle the SMC back from the workaround stub.
+ */
+ ldp x0, x1, [x4, #0]
+
+ /*
+ * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable
+ * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1.
+ */
+ ldp x2, x3, [x4, #16]
+
+ mrs x4, scr_el3
+ mrs x5, spsr_el3
+ mrs x6, elr_el3
+ mrs x7, sctlr_el1
+ mrs x8, esr_el3
+
+ /* Preserve system registers in the workaround context */
+ stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /*
+ * Setting SCR_EL3 to all zeroes means that the NS, RW
+ * and SMD bits are configured as expected.
+ */
+ msr scr_el3, xzr
+
+ /*
+ * Reload system registers with the crafted values
+ * in preparation for entry in S-EL1.
+ */
+ msr spsr_el3, x0
+ msr vbar_el3, x1
+ msr sctlr_el1, x2
+ msr elr_el3, x3
+
+ eret
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used at runtime to enter the workaround at
+ * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround
+ * is not enabled, the existing runtime exception vector table is used.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar0_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ /*
+ * Since each vector table entry is 128 bytes, we can store the
+ * stub context in the unused space to minimize memory footprint.
+ */
+aarch32_stub_smc:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(1)
+ .word EMIT_SMC
+aarch32_stub_ctx_smc:
+ /* Mask all interrupts and set AArch32 Supervisor mode */
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+
+ /*
+ * VBAR_EL3 points to vbar1 which is the vector table
+ * used while the workaround is executing.
+ */
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+
+ /* Setup SCTLR_EL1 with MMU off and I$ on */
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+
+ /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */
+ .quad aarch32_stub_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar0_irq_sp_el0
+ b irq_sp_el0
+aarch32_stub_irq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(2)
+ .word EMIT_SMC
+aarch32_stub_ctx_irq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_irq
+ check_vector_size workaround_bpiall_vbar0_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_el0
+ b fiq_sp_el0
+aarch32_stub_fiq:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(4)
+ .word EMIT_SMC
+aarch32_stub_ctx_fiq:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar0_serror_sp_el0
+ b serror_sp_el0
+aarch32_stub_serror:
+ .word EMIT_BPIALL
+ .word EMIT_MOV_R0_IMM(8)
+ .word EMIT_SMC
+aarch32_stub_ctx_serror:
+ .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \
+ SPSR_M_AARCH32 << SPSR_M_SHIFT | \
+ MODE32_svc << MODE32_SHIFT)
+ .quad workaround_bpiall_vbar1_runtime_exceptions
+ .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT
+ .quad aarch32_stub_serror
+ check_vector_size workaround_bpiall_vbar0_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar0_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_bpiall_vbar0_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar0_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_bpiall_vbar0_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch64
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar0_irq_aarch64
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch64
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch64
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar0_serror_aarch64
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar0_sync_exception_aarch32
+ enter_workaround aarch32_stub_ctx_smc
+ check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar0_irq_aarch32
+ enter_workaround aarch32_stub_ctx_irq
+ check_vector_size workaround_bpiall_vbar0_irq_aarch32
+
+vector_entry workaround_bpiall_vbar0_fiq_aarch32
+ enter_workaround aarch32_stub_ctx_fiq
+ check_vector_size workaround_bpiall_vbar0_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar0_serror_aarch32
+ enter_workaround aarch32_stub_ctx_serror
+ check_vector_size workaround_bpiall_vbar0_serror_aarch32
+
+ /* ---------------------------------------------------------------------
+ * This vector table is used while the workaround is executing. It
+ * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError
+ * workaround stubs to enter EL3 from S-EL1. It restores the previous
+ * EL3 state before proceeding with the normal runtime exception vector.
+ * ---------------------------------------------------------------------
+ */
+vector_base workaround_bpiall_vbar1_runtime_exceptions
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0
+
+vector_entry workaround_bpiall_vbar1_irq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_el0
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_el0
+
+vector_entry workaround_bpiall_vbar1_serror_sp_el0
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx
+
+vector_entry workaround_bpiall_vbar1_irq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_fiq_sp_elx
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_sp_elx
+
+vector_entry workaround_bpiall_vbar1_serror_sp_elx
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED)
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64
+
+vector_entry workaround_bpiall_vbar1_irq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_irq_aarch64
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch64
+ b report_unhandled_interrupt
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch64
+
+vector_entry workaround_bpiall_vbar1_serror_aarch64
+ b report_unhandled_exception
+ check_vector_size workaround_bpiall_vbar1_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_bpiall_vbar1_sync_exception_aarch32
+ /* Restore register state from the workaround context */
+ ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0]
+ ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2]
+ ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4]
+
+ /* Apply the restored system register state */
+ msr scr_el3, x2
+ msr spsr_el3, x3
+ msr elr_el3, x4
+ msr sctlr_el1, x5
+ msr esr_el3, x6
+
+ /*
+ * Workaround is complete, so swap VBAR_EL3 to point
+ * to workaround entry table in preparation for subsequent
+ * Sync/IRQ/FIQ/SError exceptions.
+ */
+ adr x2, workaround_bpiall_vbar0_runtime_exceptions
+ msr vbar_el3, x2
+
+ /*
+ * Restore all GP regs except x0 and x1. The value in x0
+ * indicates the type of the original exception.
+ */
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+
+ /*
+ * Each of these handlers will first restore x0 and x1 from
+ * the context and the branch to the common implementation for
+ * each of the exception types.
+ */
+ tbnz x0, #1, workaround_bpiall_vbar1_irq
+ tbnz x0, #2, workaround_bpiall_vbar1_fiq
+ tbnz x0, #3, workaround_bpiall_vbar1_serror
+
+ /* Fallthrough case for Sync exception */
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b sync_exception_aarch64
+ check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32
+
+vector_entry workaround_bpiall_vbar1_irq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_irq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b irq_aarch64
+ check_vector_size workaround_bpiall_vbar1_irq_aarch32
+
+vector_entry workaround_bpiall_vbar1_fiq_aarch32
+ b report_unhandled_interrupt
+workaround_bpiall_vbar1_fiq:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b fiq_aarch64
+ check_vector_size workaround_bpiall_vbar1_fiq_aarch32
+
+vector_entry workaround_bpiall_vbar1_serror_aarch32
+ b report_unhandled_exception
+workaround_bpiall_vbar1_serror:
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ b serror_aarch64
+ check_vector_size workaround_bpiall_vbar1_serror_aarch32
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
new file mode 100644
index 00000000..f4781484
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .globl workaround_mmu_runtime_exceptions
+
+vector_base workaround_mmu_runtime_exceptions
+
+ .macro apply_workaround
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ mrs x0, sctlr_el3
+ /* Disable MMU */
+ bic x1, x0, #SCTLR_M_BIT
+ msr sctlr_el3, x1
+ isb
+ /* Restore MMU config */
+ msr sctlr_el3, x0
+ isb
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ .endm
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_EL0 : 0x0 - 0x200
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_el0
+ b sync_exception_sp_el0
+ check_vector_size workaround_mmu_sync_exception_sp_el0
+
+vector_entry workaround_mmu_irq_sp_el0
+ b irq_sp_el0
+ check_vector_size workaround_mmu_irq_sp_el0
+
+vector_entry workaround_mmu_fiq_sp_el0
+ b fiq_sp_el0
+ check_vector_size workaround_mmu_fiq_sp_el0
+
+vector_entry workaround_mmu_serror_sp_el0
+ b serror_sp_el0
+ check_vector_size workaround_mmu_serror_sp_el0
+
+ /* ---------------------------------------------------------------------
+ * Current EL with SP_ELx: 0x200 - 0x400
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_sp_elx
+ b sync_exception_sp_elx
+ check_vector_size workaround_mmu_sync_exception_sp_elx
+
+vector_entry workaround_mmu_irq_sp_elx
+ b irq_sp_elx
+ check_vector_size workaround_mmu_irq_sp_elx
+
+vector_entry workaround_mmu_fiq_sp_elx
+ b fiq_sp_elx
+ check_vector_size workaround_mmu_fiq_sp_elx
+
+vector_entry workaround_mmu_serror_sp_elx
+ b serror_sp_elx
+ check_vector_size workaround_mmu_serror_sp_elx
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch64
+ apply_workaround
+ b sync_exception_aarch64
+ check_vector_size workaround_mmu_sync_exception_aarch64
+
+vector_entry workaround_mmu_irq_aarch64
+ apply_workaround
+ b irq_aarch64
+ check_vector_size workaround_mmu_irq_aarch64
+
+vector_entry workaround_mmu_fiq_aarch64
+ apply_workaround
+ b fiq_aarch64
+ check_vector_size workaround_mmu_fiq_aarch64
+
+vector_entry workaround_mmu_serror_aarch64
+ apply_workaround
+ b serror_aarch64
+ check_vector_size workaround_mmu_serror_aarch64
+
+ /* ---------------------------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * ---------------------------------------------------------------------
+ */
+vector_entry workaround_mmu_sync_exception_aarch32
+ apply_workaround
+ b sync_exception_aarch32
+ check_vector_size workaround_mmu_sync_exception_aarch32
+
+vector_entry workaround_mmu_irq_aarch32
+ apply_workaround
+ b irq_aarch32
+ check_vector_size workaround_mmu_irq_aarch32
+
+vector_entry workaround_mmu_fiq_aarch32
+ apply_workaround
+ b fiq_aarch32
+ check_vector_size workaround_mmu_fiq_aarch32
+
+vector_entry workaround_mmu_serror_aarch32
+ apply_workaround
+ b serror_aarch32
+ check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 31adfb42..3ba8c1fc 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -16,6 +16,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1
# It is enabled by default.
A57_DISABLE_NON_TEMPORAL_HINT ?=1
+WORKAROUND_CVE_2017_5715 ?=1
+
# Process SKIP_A57_L1_FLUSH_PWR_DWN flag
$(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
$(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -28,6 +30,9 @@ $(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
$(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
$(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
# CPU Errata Build flags.
# These should be enabled by the platform if the erratum workaround needs to be
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
index d450bd69..effc5bd3 100644
--- a/lib/extensions/amu/aarch32/amu.c
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,26 +7,100 @@
#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <platform.h>
+#include <pubsub_events.h>
+
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
void amu_enable(int el2_unused)
{
uint64_t features;
features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
- if ((features & ID_PFR0_AMU_MASK) == 1) {
- if (el2_unused) {
- uint64_t v;
-
- /*
- * Non-secure access from EL0 or EL1 to the Activity Monitor
- * registers do not trap to EL2.
- */
- v = read_hcptr();
- v &= ~TAM_BIT;
- write_hcptr(v);
- }
-
- /* Enable group 0 counters */
- write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return;
+
+ if (el2_unused) {
+ uint64_t v;
+
+ /*
+ * Non-secure access from EL0 or EL1 to the Activity Monitor
+ * registers do not trap to EL2.
+ */
+ v = read_hcptr();
+ v &= ~TAM_BIT;
+ write_hcptr(v);
}
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Assert that group 0 counter configuration is what we expect */
+ assert(read_amcntenset0() == AMU_GROUP0_COUNTERS_MASK);
+
+ /*
+ * Disable group 0 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
+ isb();
+
+ ctx->group0_cnts[0] = read64_amevcntr00();
+ ctx->group0_cnts[1] = read64_amevcntr01();
+ ctx->group0_cnts[2] = read64_amevcntr02();
+ ctx->group0_cnts[3] = read64_amevcntr03();
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx;
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) != 1)
+ return (void *)-1;
+
+ ctx = &amu_ctxs[plat_my_core_pos()];
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0() == 0);
+
+ /* Restore group 0 counters */
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 0))
+ write64_amevcntr00(ctx->group0_cnts[0]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 1))
+ write64_amevcntr01(ctx->group0_cnts[1]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 2))
+ write64_amevcntr02(ctx->group0_cnts[2]);
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << 3))
+ write64_amevcntr03(ctx->group0_cnts[3]);
+ isb();
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
index 007b3494..d7645a9e 100644
--- a/lib/extensions/amu/aarch64/amu.c
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -1,40 +1,185 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <amu.h>
+#include <amu_private.h>
#include <arch.h>
#include <arch_helpers.h>
+#include <assert.h>
+#include <platform.h>
+#include <pubsub_events.h>
-void amu_enable(int el2_unused)
+#define AMU_GROUP0_NR_COUNTERS 4
+
+struct amu_ctx {
+ uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
+ uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
+};
+
+static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+
+int amu_supported(void)
{
uint64_t features;
features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
- if ((features & ID_AA64PFR0_AMU_MASK) == 1) {
- uint64_t v;
-
- if (el2_unused) {
- /*
- * CPTR_EL2.TAM: Set to zero so any accesses to
- * the Activity Monitor registers do not trap to EL2.
- */
- v = read_cptr_el2();
- v &= ~CPTR_EL2_TAM_BIT;
- write_cptr_el2(v);
- }
+ return (features & ID_AA64PFR0_AMU_MASK) == 1;
+}
+/*
+ * Enable counters. This function is meant to be invoked
+ * by the context management library before exiting from EL3.
+ */
+void amu_enable(int el2_unused)
+{
+ uint64_t v;
+
+ if (!amu_supported())
+ return;
+
+ if (el2_unused) {
/*
- * CPTR_EL3.TAM: Set to zero so that any accesses to
- * the Activity Monitor registers do not trap to EL3.
+ * CPTR_EL2.TAM: Set to zero so any accesses to
+ * the Activity Monitor registers do not trap to EL2.
*/
- v = read_cptr_el3();
- v &= ~TAM_BIT;
- write_cptr_el3(v);
-
- /* Enable group 0 counters */
- write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ v = read_cptr_el2();
+ v &= ~CPTR_EL2_TAM_BIT;
+ write_cptr_el2(v);
}
+
+ /*
+ * CPTR_EL3.TAM: Set to zero so that any accesses to
+ * the Activity Monitor registers do not trap to EL3.
+ */
+ v = read_cptr_el3();
+ v &= ~TAM_BIT;
+ write_cptr_el3(v);
+
+ /* Enable group 0 counters */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ /* Enable group 1 counters */
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
}
+
+/* Read the group 0 counter identified by the given `idx`. */
+uint64_t amu_group0_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ return amu_group0_cnt_read_internal(idx);
+}
+
+/* Write the group 0 counter identified by the given `idx` with `val`. */
+void amu_group0_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP0_NR_COUNTERS);
+
+ amu_group0_cnt_write_internal(idx, val);
+ isb();
+}
+
+/* Read the group 1 counter identified by the given `idx`. */
+uint64_t amu_group1_cnt_read(int idx)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ return amu_group1_cnt_read_internal(idx);
+}
+
+/* Write the group 1 counter identified by the given `idx` with `val`. */
+void amu_group1_cnt_write(int idx, uint64_t val)
+{
+ assert(amu_supported());
+ assert(idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_cnt_write_internal(idx, val);
+ isb();
+}
+
+/*
+ * Program the event type register for the given `idx` with
+ * the event number `val`.
+ */
+void amu_group1_set_evtype(int idx, unsigned int val)
+{
+ assert(amu_supported());
+ assert (idx >= 0 && idx < AMU_GROUP1_NR_COUNTERS);
+
+ amu_group1_set_evtype_internal(idx, val);
+ isb();
+}
+
+static void *amu_context_save(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Assert that group 0/1 counter configuration is what we expect */
+ assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK &&
+ read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /*
+ * Disable group 0/1 counters to avoid other observers like SCP sampling
+ * counter values from the future via the memory mapped view.
+ */
+ write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
+ isb();
+
+ /* Save group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ ctx->group0_cnts[i] = amu_group0_cnt_read(i);
+
+ /* Save group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ ctx->group1_cnts[i] = amu_group1_cnt_read(i);
+
+ return 0;
+}
+
+static void *amu_context_restore(const void *arg)
+{
+ struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+ int i;
+
+ if (!amu_supported())
+ return (void *)-1;
+
+ /* Counters were disabled in `amu_context_save()` */
+ assert(read_amcntenset0_el0() == 0 && read_amcntenset1_el0() == 0);
+
+ assert((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK)
+ <= AMU_GROUP1_NR_COUNTERS);
+
+ /* Restore group 0 counters */
+ for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
+ if (AMU_GROUP0_COUNTERS_MASK & (1U << i))
+ amu_group0_cnt_write(i, ctx->group0_cnts[i]);
+
+ /* Restore group 1 counters */
+ for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
+ if (AMU_GROUP1_COUNTERS_MASK & (1U << i))
+ amu_group1_cnt_write(i, ctx->group1_cnts[i]);
+ isb();
+
+ /* Restore group 0/1 counter configuration */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
diff --git a/lib/extensions/amu/aarch64/amu_helpers.S b/lib/extensions/amu/aarch64/amu_helpers.S
new file mode 100644
index 00000000..e0b1f564
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu_helpers.S
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+ .globl amu_group0_cnt_read_internal
+ .globl amu_group0_cnt_write_internal
+ .globl amu_group1_cnt_read_internal
+ .globl amu_group1_cnt_write_internal
+ .globl amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #2
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR00_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR01_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR02_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR03_EL0 /* index 3 */
+ ret
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #2
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR00_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR01_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR02_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR03_EL0, x1 /* index 3 */
+ ret
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `x0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x1, x0
+ lsr x1, x1, #4
+ cmp x1, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x1, 1f
+ lsl x0, x0, #3 /* each mrs/ret sequence is 8 bytes */
+ add x1, x1, x0
+ br x1
+
+1:
+ mrs x0, AMEVCNTR10_EL0 /* index 0 */
+ ret
+ mrs x0, AMEVCNTR11_EL0 /* index 1 */
+ ret
+ mrs x0, AMEVCNTR12_EL0 /* index 2 */
+ ret
+ mrs x0, AMEVCNTR13_EL0 /* index 3 */
+ ret
+ mrs x0, AMEVCNTR14_EL0 /* index 4 */
+ ret
+ mrs x0, AMEVCNTR15_EL0 /* index 5 */
+ ret
+ mrs x0, AMEVCNTR16_EL0 /* index 6 */
+ ret
+ mrs x0, AMEVCNTR17_EL0 /* index 7 */
+ ret
+ mrs x0, AMEVCNTR18_EL0 /* index 8 */
+ ret
+ mrs x0, AMEVCNTR19_EL0 /* index 9 */
+ ret
+ mrs x0, AMEVCNTR1A_EL0 /* index 10 */
+ ret
+ mrs x0, AMEVCNTR1B_EL0 /* index 11 */
+ ret
+ mrs x0, AMEVCNTR1C_EL0 /* index 12 */
+ ret
+ mrs x0, AMEVCNTR1D_EL0 /* index 13 */
+ ret
+ mrs x0, AMEVCNTR1E_EL0 /* index 14 */
+ ret
+ mrs x0, AMEVCNTR1F_EL0 /* index 15 */
+ ret
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of mrs/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVCNTR10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVCNTR11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVCNTR12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVCNTR13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVCNTR14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVCNTR15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVCNTR16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVCNTR17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVCNTR18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVCNTR19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVCNTR1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVCNTR1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVCNTR1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVCNTR1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVCNTR1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVCNTR1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+ /*
+ * It can be dangerous to call this function with an
+ * out of bounds index. Ensure `idx` is valid.
+ */
+ mov x2, x0
+ lsr x2, x2, #4
+ cmp x2, #0
+ ASM_ASSERT(eq)
+
+ /* val should be between [0, 65535] */
+ mov x2, x1
+ lsr x2, x2, #16
+ cmp x2, #0
+ ASM_ASSERT(eq)
+#endif
+
+ /*
+ * Given `idx` calculate address of msr/ret instruction pair
+ * in the table below.
+ */
+ adr x2, 1f
+ lsl x0, x0, #3 /* each msr/ret sequence is 8 bytes */
+ add x2, x2, x0
+ br x2
+
+1:
+ msr AMEVTYPER10_EL0, x1 /* index 0 */
+ ret
+ msr AMEVTYPER11_EL0, x1 /* index 1 */
+ ret
+ msr AMEVTYPER12_EL0, x1 /* index 2 */
+ ret
+ msr AMEVTYPER13_EL0, x1 /* index 3 */
+ ret
+ msr AMEVTYPER14_EL0, x1 /* index 4 */
+ ret
+ msr AMEVTYPER15_EL0, x1 /* index 5 */
+ ret
+ msr AMEVTYPER16_EL0, x1 /* index 6 */
+ ret
+ msr AMEVTYPER17_EL0, x1 /* index 7 */
+ ret
+ msr AMEVTYPER18_EL0, x1 /* index 8 */
+ ret
+ msr AMEVTYPER19_EL0, x1 /* index 9 */
+ ret
+ msr AMEVTYPER1A_EL0, x1 /* index 10 */
+ ret
+ msr AMEVTYPER1B_EL0, x1 /* index 11 */
+ ret
+ msr AMEVTYPER1C_EL0, x1 /* index 12 */
+ ret
+ msr AMEVTYPER1D_EL0, x1 /* index 13 */
+ ret
+ msr AMEVTYPER1E_EL0, x1 /* index 14 */
+ ret
+ msr AMEVTYPER1F_EL0, x1 /* index 15 */
+ ret
+endfunc amu_group1_set_evtype_internal
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index d9490672..a77972d3 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -14,6 +14,7 @@
#include <debug.h>
#include <platform.h>
#include <pmf.h>
+#include <pubsub_events.h>
#include <runtime_instr.h>
#include <stddef.h>
#include "psci_private.h"
@@ -68,6 +69,8 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
{
unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+ PUBLISH_EVENT(psci_suspend_pwrdown_start);
+
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
@@ -308,6 +311,8 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx,
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+ PUBLISH_EVENT(psci_suspend_pwrdown_finish);
+
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 0acfacbf..75c5a912 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -893,7 +893,7 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* Check if the mapping function actually managed to map
* anything. If not, just return now.
*/
- if (mm_cursor->base_va >= end_va)
+ if (mm->base_va >= end_va)
return -ENOMEM;
/*
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
index e1bfbbe7..a4fbc5ac 100644
--- a/make_helpers/build_macros.mk
+++ b/make_helpers/build_macros.mk
@@ -101,7 +101,7 @@ endef
# FIP_ADD_PAYLOAD appends the command line arguments required by fiptool
# to package a new payload. Optionally, it adds the dependency on this payload
# $(1) = payload filename (i.e. bl31.bin)
-# $(2) = command line option for the specified payload (i.e. --bl31)
+# $(2) = command line option for the specified payload (i.e. --soc-fw)
# $(3) = fip target dependency (optional) (i.e. bl31)
define FIP_ADD_PAYLOAD
$(eval FIP_ARGS += $(2) $(1))
@@ -121,14 +121,15 @@ endef
# using a build option. It also adds a dependency on the image file, aborting
# the build if the file does not exist.
# $(1) = build option to specify the image filename (SCP_BL2, BL33, etc)
-# $(2) = command line option for fiptool (scp_bl2, bl33, etc)
+# $(2) = command line option for fiptool (--scp-fw, --nt-fw, etc)
# Example:
-# $(eval $(call FIP_ADD_IMG,BL33,--bl33))
+# $(eval $(call FIP_ADD_IMG,BL33,--nt-fw))
define FIP_ADD_IMG
CRT_DEPS += check_$(1)
FIP_DEPS += check_$(1)
$(call FIP_ADD_PAYLOAD,$(value $(1)),$(2))
+.PHONY: check_$(1)
check_$(1):
$$(if $(value $(1)),,$$(error "Platform '${PLAT}' requires $(1). Please set $(1) to point to the right file"))
endef
@@ -154,14 +155,15 @@ endef
# FWU_FIP_ADD_IMG allows the platform to pack a binary image in the FWU FIP
# $(1) build option to specify the image filename (BL2U, NS_BL2U, etc)
-# $(2) command line option for fiptool (bl2u, ns_bl2u, etc)
+# $(2) command line option for fiptool (--ap-fwu-cfg, --fwu, etc)
# Example:
-# $(eval $(call FWU_FIP_ADD_IMG,BL2U,--bl2u))
+# $(eval $(call FWU_FIP_ADD_IMG,BL2U,--ap-fwu-cfg))
define FWU_FIP_ADD_IMG
FWU_CRT_DEPS += check_$(1)
FWU_FIP_DEPS += check_$(1)
$(call FWU_FIP_ADD_PAYLOAD,$(value $(1)),$(2))
+.PHONY: check_$(1)
check_$(1):
$$(if $(value $(1)),,$$(error "Platform '${PLAT}' requires $(1). Please set $(1) to point to the right file"))
endef
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 632eb1f7..a257784c 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -157,6 +157,10 @@ ENABLE_PLAT_COMPAT := 0
# Enable Activity Monitor Unit extensions by default
ENABLE_AMU := 1
+ifeq (${ENABLE_AMU},1)
+BL31_SOURCES += lib/cpus/aarch64/cortex_a75_pubsub.c
+endif
+
ifneq (${ENABLE_STACK_PROTECTOR},0)
PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_stack_protector.c
endif
diff --git a/plat/hisilicon/hikey/platform.mk b/plat/hisilicon/hikey/platform.mk
index 18b5e15e..524fa6a4 100644
--- a/plat/hisilicon/hikey/platform.mk
+++ b/plat/hisilicon/hikey/platform.mk
@@ -120,3 +120,5 @@ endif
ERRATA_A53_836870 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+
+FIP_ALIGN := 512
diff --git a/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S b/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S
index c88f68ee..d18399fb 100644
--- a/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S
+++ b/plat/hisilicon/hikey960/aarch64/hikey960_helpers.S
@@ -16,8 +16,6 @@
.globl plat_crash_console_putc
.globl plat_report_exception
.globl plat_reset_handler
- .globl set_retention_ticks
- .globl clr_retention_ticks
.globl clr_ex
.globl nop
@@ -139,35 +137,6 @@ func plat_reset_handler
endfunc plat_reset_handler
/* -----------------------------------------------------
- * void set_retention_ticks(unsigned int val);
- * Clobber list : x0
- * -----------------------------------------------------
- */
-func set_retention_ticks
- mrs x0, CORTEX_A53_ECTLR_EL1
- bic x0, x0, #CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK
- orr x0, x0, #RETENTION_ENTRY_TICKS_8
- msr CORTEX_A53_ECTLR_EL1, x0
- isb
- dsb sy
- ret
-endfunc set_retention_ticks
-
- /* -----------------------------------------------------
- * void clr_retention_ticks(unsigned int val);
- * Clobber list : x0
- * -----------------------------------------------------
- */
-func clr_retention_ticks
- mrs x0, CORTEX_A53_ECTLR_EL1
- bic x0, x0, #CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK
- msr CORTEX_A53_ECTLR_EL1, x0
- isb
- dsb sy
- ret
-endfunc clr_retention_ticks
-
- /* -----------------------------------------------------
* void clrex(void);
* -----------------------------------------------------
*/
diff --git a/plat/hisilicon/hikey960/hikey960_bl1_setup.c b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
index 6dfada75..11f143a8 100644
--- a/plat/hisilicon/hikey960/hikey960_bl1_setup.c
+++ b/plat/hisilicon/hikey960/hikey960_bl1_setup.c
@@ -519,6 +519,11 @@ static void hikey960_regulator_enable(void)
set_audio_power_up();
set_pcie_power_up();
set_isp_srt_power_up();
+
+ /* set ISP_CORE_CTRL_S to unsecure mode */
+ mmio_write_32(0xe8583800, 0x7);
+ /* set ISP_SUB_CTRL_S to unsecure mode */
+ mmio_write_32(0xe8583804, 0xf);
}
static void hikey960_ufs_reset(void)
@@ -642,6 +647,8 @@ static void hikey960_pinmux_init(void)
}
/* GPIO005 - PMU SSI, 10mA */
mmio_write_32(IOCG_006_REG, 2 << 4);
+ /* GPIO213 - PCIE_CLKREQ_N */
+ mmio_write_32(IOMG_AO_033_REG, 1);
}
/*
diff --git a/plat/hisilicon/hikey960/hikey960_pm.c b/plat/hisilicon/hikey960/hikey960_pm.c
index 078f0d81..6609530c 100644
--- a/plat/hisilicon/hikey960/hikey960_pm.c
+++ b/plat/hisilicon/hikey960/hikey960_pm.c
@@ -26,38 +26,6 @@
#define SYSTEM_PWR_STATE(state) \
((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
-#define PSTATE_WIDTH 4
-#define PSTATE_MASK ((1 << PSTATE_WIDTH) - 1)
-
-#define MAKE_PWRSTATE(lvl2_state, lvl1_state, lvl0_state, pwr_lvl, type) \
- (((lvl2_state) << (PSTATE_ID_SHIFT + PSTATE_WIDTH * 2)) | \
- ((lvl1_state) << (PSTATE_ID_SHIFT + PSTATE_WIDTH)) | \
- ((lvl0_state) << (PSTATE_ID_SHIFT)) | \
- ((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
- ((type) << PSTATE_TYPE_SHIFT))
-
-/*
- * The table storing the valid idle power states. Ensure that the
- * array entries are populated in ascending order of state-id to
- * enable us to use binary search during power state validation.
- * The table must be terminated by a NULL entry.
- */
-const unsigned int hikey960_pwr_idle_states[] = {
- /* State-id - 0x001 */
- MAKE_PWRSTATE(PLAT_MAX_RUN_STATE, PLAT_MAX_RUN_STATE,
- PLAT_MAX_STB_STATE, MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY),
- /* State-id - 0x002 */
- MAKE_PWRSTATE(PLAT_MAX_RUN_STATE, PLAT_MAX_RUN_STATE,
- PLAT_MAX_RET_STATE, MPIDR_AFFLVL0, PSTATE_TYPE_STANDBY),
- /* State-id - 0x003 */
- MAKE_PWRSTATE(PLAT_MAX_RUN_STATE, PLAT_MAX_RUN_STATE,
- PLAT_MAX_OFF_STATE, MPIDR_AFFLVL0, PSTATE_TYPE_POWERDOWN),
- /* State-id - 0x033 */
- MAKE_PWRSTATE(PLAT_MAX_RUN_STATE, PLAT_MAX_OFF_STATE,
- PLAT_MAX_OFF_STATE, MPIDR_AFFLVL1, PSTATE_TYPE_POWERDOWN),
- 0,
-};
-
#define DMAC_GLB_REG_SEC 0x694
#define AXI_CONF_BASE 0x820
@@ -66,24 +34,17 @@ static uintptr_t hikey960_sec_entrypoint;
static void hikey960_pwr_domain_standby(plat_local_state_t cpu_state)
{
unsigned long scr;
- unsigned int val = 0;
-
- assert(cpu_state == PLAT_MAX_STB_STATE ||
- cpu_state == PLAT_MAX_RET_STATE);
scr = read_scr_el3();
- /* Enable Physical IRQ and FIQ to wake the CPU*/
+ /* Enable Physical IRQ and FIQ to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
- if (cpu_state == PLAT_MAX_RET_STATE)
- set_retention_ticks(val);
-
+ /* Add barrier before CPU enter WFI state */
+ isb();
+ dsb();
wfi();
- if (cpu_state == PLAT_MAX_RET_STATE)
- clr_retention_ticks(val);
-
/*
* Restore SCR to the original value, synchronisazion of
* scr_el3 is done by eret while el3_exit to save some
@@ -161,34 +122,38 @@ static void __dead2 hikey960_system_reset(void)
int hikey960_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
- unsigned int state_id;
+ unsigned int pstate = psci_get_pstate_type(power_state);
+ unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
int i;
assert(req_state);
- /*
- * Currently we are using a linear search for finding the matching
- * entry in the idle power state array. This can be made a binary
- * search if the number of entries justify the additional complexity.
- */
- for (i = 0; !!hikey960_pwr_idle_states[i]; i++) {
- if (power_state == hikey960_pwr_idle_states[i])
- break;
- }
-
- /* Return error if entry not found in the idle state array */
- if (!hikey960_pwr_idle_states[i])
+ if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
- i = 0;
- state_id = psci_get_pstate_id(power_state);
-
- /* Parse the State ID and populate the state info parameter */
- while (state_id) {
- req_state->pwr_domain_state[i++] = state_id & PSTATE_MASK;
- state_id >>= PSTATE_WIDTH;
+ /* Sanity check the requested state */
+ if (pstate == PSTATE_TYPE_STANDBY) {
+ /*
+ * It's possible to enter standby only on power level 0
+ * Ignore any other power level.
+ */
+ if (pwr_lvl != MPIDR_AFFLVL0)
+ return PSCI_E_INVALID_PARAMS;
+
+ req_state->pwr_domain_state[MPIDR_AFFLVL0] =
+ PLAT_MAX_RET_STATE;
+ } else {
+ for (i = MPIDR_AFFLVL0; i <= pwr_lvl; i++)
+ req_state->pwr_domain_state[i] =
+ PLAT_MAX_OFF_STATE;
}
+ /*
+ * We expect the 'state id' to be zero.
+ */
+ if (psci_get_pstate_id(power_state))
+ return PSCI_E_INVALID_PARAMS;
+
return PSCI_E_SUCCESS;
}
diff --git a/plat/hisilicon/hikey960/include/hi3660.h b/plat/hisilicon/hikey960/include/hi3660.h
index 83d1b363..ab7b8aa4 100644
--- a/plat/hisilicon/hikey960/include/hi3660.h
+++ b/plat/hisilicon/hikey960/include/hi3660.h
@@ -335,6 +335,8 @@
#define IOMG_AO_026_REG (IOMG_AO_REG_BASE + 0x068)
/* GPIO219: PD interrupt. pull up */
#define IOMG_AO_039_REG (IOMG_AO_REG_BASE + 0x09C)
+/* GPIO213: PCIE_CLKREQ_N */
+#define IOMG_AO_033_REG (IOMG_AO_REG_BASE + 0x084)
#define IOCG_AO_REG_BASE 0xFFF1187C
/* GPIO219: PD interrupt. pull up */
diff --git a/plat/hisilicon/hikey960/include/platform_def.h b/plat/hisilicon/hikey960/include/platform_def.h
index 2ac7f2a7..cb760907 100644
--- a/plat/hisilicon/hikey960/include/platform_def.h
+++ b/plat/hisilicon/hikey960/include/platform_def.h
@@ -31,10 +31,8 @@
#define PLAT_NUM_PWR_DOMAINS (PLATFORM_CORE_COUNT + \
PLATFORM_CLUSTER_COUNT + 1)
-#define PLAT_MAX_RUN_STATE 0
-#define PLAT_MAX_STB_STATE 1
-#define PLAT_MAX_RET_STATE 2
-#define PLAT_MAX_OFF_STATE 3
+#define PLAT_MAX_RET_STATE 1
+#define PLAT_MAX_OFF_STATE 2
#define MAX_IO_DEVICES 3
#define MAX_IO_HANDLES 4
diff --git a/plat/hisilicon/hikey960/platform.mk b/plat/hisilicon/hikey960/platform.mk
index 695f0923..cb97deb6 100644
--- a/plat/hisilicon/hikey960/platform.mk
+++ b/plat/hisilicon/hikey960/platform.mk
@@ -101,3 +101,5 @@ BL31_SOURCES += drivers/arm/cci/cci.c \
ERRATA_A53_836870 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+
+FIP_ALIGN := 512
diff --git a/plat/socionext/uniphier/platform.mk b/plat/socionext/uniphier/platform.mk
index 41d04448..f99bbf58 100644
--- a/plat/socionext/uniphier/platform.mk
+++ b/plat/socionext/uniphier/platform.mk
@@ -117,4 +117,4 @@ endif
bl1_gzip: $(BUILD_PLAT)/bl1.bin.gzip
%.gzip: %
@echo " GZIP $@"
- $(Q)(cat $< | gzip -n -f -9 > $@) || (rm -f $@ || false)
+ $(Q)gzip -n -f -9 $< --stdout > $@
diff --git a/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
new file mode 100644
index 00000000..bfc19d33
--- /dev/null
+++ b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Top-level SMC handler for ZynqMP IPI Mailbox doorbell functions.
+ */
+
+#include <bakery_lock.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <string.h>
+#include "ipi_mailbox_svc.h"
+#include "../zynqmp_ipi.h"
+#include "../zynqmp_private.h"
+#include "../../../services/spd/trusty/smcall.h"
+
+/*********************************************************************
+ * Macros definitions
+ ********************************************************************/
+
+/* IPI SMC calls macros: */
+#define IPI_SMC_OPEN_IRQ_MASK 0x00000001U /* IRQ enable bit in IPI
+ * open SMC call
+ */
+#define IPI_SMC_NOTIFY_BLOCK_MASK 0x00000001U /* Flag to indicate if
+ * IPI notification needs
+ * to be blocking.
+ */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001U /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001U /* Flag to indicate if
+ * notification interrupt
+ * to be enable.
+ */
+
+#define UNSIGNED32_MASK 0xFFFFFFFFU /* 32bit mask */
+
+/**
+ * ipi_smc_handler() - SMC handler for IPI SMC calls
+ *
+ * @smc_fid - Function identifier
+ * @x1 - x4 - Arguments
+ * @cookie - Unused
+ * @handler - Pointer to caller's context structure
+ *
+ * @return - Unused
+ *
+ * Determines that smc_fid is valid and supported PM SMC Function ID from the
+ * list of pm_api_ids, otherwise completes the request with
+ * the unknown SMC Function ID
+ *
+ * The SMC calls for PM service are forwarded from SIP Service SMC handler
+ * function with rt_svc_handle signature
+ */
+uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie,
+ void *handle, uint64_t flags)
+{
+ int ret;
+ uint32_t ipi_local_id;
+ uint32_t ipi_remote_id;
+ unsigned int is_secure;
+
+ ipi_local_id = x1 & UNSIGNED32_MASK;
+ ipi_remote_id = x2 & UNSIGNED32_MASK;
+
+ if (SMC_ENTITY(smc_fid) >= SMC_ENTITY_TRUSTED_APP)
+ is_secure = 1;
+ else
+ is_secure = 0;
+
+ /* Validate IPI mailbox access */
+ ret = ipi_mb_validate(ipi_local_id, ipi_remote_id, is_secure);
+ if (ret)
+ SMC_RET1(handle, ret);
+
+ switch (SMC_FUNCTION(smc_fid)) {
+ case IPI_MAILBOX_OPEN:
+ ipi_mb_open(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_RELEASE:
+ ipi_mb_release(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_STATUS_ENQUIRY:
+ {
+ int disable_irq;
+
+ disable_irq = (x3 & IPI_SMC_ENQUIRY_DIRQ_MASK) ? 1 : 0;
+ ret = ipi_mb_enquire_status(ipi_local_id, ipi_remote_id);
+ if ((ret & IPI_MB_STATUS_RECV_PENDING) && disable_irq)
+ ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, ret);
+ }
+ case IPI_MAILBOX_NOTIFY:
+ {
+ uint32_t is_blocking;
+
+ is_blocking = (x3 & IPI_SMC_NOTIFY_BLOCK_MASK) ? 1 : 0;
+ ipi_mb_notify(ipi_local_id, ipi_remote_id, is_blocking);
+ SMC_RET1(handle, 0);
+ }
+ case IPI_MAILBOX_ACK:
+ {
+ int enable_irq;
+
+ enable_irq = (x3 & IPI_SMC_ACK_EIRQ_MASK) ? 1 : 0;
+ ipi_mb_ack(ipi_local_id, ipi_remote_id);
+ if (enable_irq)
+ ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ }
+ case IPI_MAILBOX_ENABLE_IRQ:
+ ipi_mb_enable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ case IPI_MAILBOX_DISABLE_IRQ:
+ ipi_mb_disable_irq(ipi_local_id, ipi_remote_id);
+ SMC_RET1(handle, 0);
+ default:
+ WARN("Unimplemented IPI service call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
diff --git a/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h
new file mode 100644
index 00000000..387ffd23
--- /dev/null
+++ b/plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* ZynqMP IPI mailbox doorbell service enums and defines */
+
+#ifndef _IPI_MAILBOX_SVC_H_
+#define _IPI_MAILBOX_SVC_H_
+
+#include <stdint.h>
+
+/*********************************************************************
+ * Enum definitions
+ ********************************************************************/
+
+/* IPI SMC function numbers enum definition */
+enum ipi_api_id {
+ /* IPI mailbox operations functions: */
+ IPI_MAILBOX_OPEN = 0x1000,
+ IPI_MAILBOX_RELEASE,
+ IPI_MAILBOX_STATUS_ENQUIRY,
+ IPI_MAILBOX_NOTIFY,
+ IPI_MAILBOX_ACK,
+ IPI_MAILBOX_ENABLE_IRQ,
+ IPI_MAILBOX_DISABLE_IRQ
+};
+
+/*********************************************************************
+ * IPI mailbox service APIs declarations
+ ********************************************************************/
+
+/* IPI SMC handler */
+uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+ uint64_t x3, uint64_t x4, void *cookie, void *handle,
+ uint64_t flags);
+
+#endif /* _IPI_MAILBOX_SVC_H_ */
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
index cb3b4421..bdd194bd 100644
--- a/plat/xilinx/zynqmp/platform.mk
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -42,7 +42,8 @@ $(eval $(call add_define_val,ZYNQMP_CONSOLE,ZYNQMP_CONSOLE_ID_${ZYNQMP_CONSOLE})
PLAT_INCLUDES := -Iinclude/plat/arm/common/ \
-Iinclude/plat/arm/common/aarch64/ \
-Iplat/xilinx/zynqmp/include/ \
- -Iplat/xilinx/zynqmp/pm_service/
+ -Iplat/xilinx/zynqmp/pm_service/ \
+ -Iplat/xilinx/zynqmp/ipi_mailbox_service/
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
@@ -71,7 +72,9 @@ BL31_SOURCES += drivers/arm/cci/cci.c \
plat/xilinx/zynqmp/plat_startup.c \
plat/xilinx/zynqmp/plat_topology.c \
plat/xilinx/zynqmp/sip_svc_setup.c \
+ plat/xilinx/zynqmp/zynqmp_ipi.c \
plat/xilinx/zynqmp/pm_service/pm_svc_main.c \
plat/xilinx/zynqmp/pm_service/pm_api_sys.c \
plat/xilinx/zynqmp/pm_service/pm_ipi.c \
- plat/xilinx/zynqmp/pm_service/pm_client.c
+ plat/xilinx/zynqmp/pm_service/pm_client.c \
+ plat/xilinx/zynqmp/ipi_mailbox_service/ipi_mailbox_svc.c
diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
index 90c670d1..9e210677 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_api_sys.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -542,7 +542,6 @@ enum pm_ret_status pm_get_chipid(uint32_t *value)
*/
void pm_get_callbackdata(uint32_t *data, size_t count)
{
-
pm_ipi_buff_read_callb(data, count);
- pm_ipi_irq_clear();
+ pm_ipi_irq_clear(primary_proc);
}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_common.h b/plat/xilinx/zynqmp/pm_service/pm_common.h
index 03351c24..5dcbb0d8 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_common.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_common.h
@@ -21,13 +21,13 @@
/**
* pm_ipi - struct for capturing IPI-channel specific info
- * @mask mask for enabling/disabling and triggering the IPI
- * @base base address for IPI
+ * @apu_ipi_id APU IPI agent ID
+ * @pmu_ipi_id PMU Agent ID
* @buffer_base base address for payload buffer
*/
struct pm_ipi {
- const unsigned int mask;
- const uintptr_t base;
+ const uint32_t apu_ipi_id;
+ const uint32_t pmu_ipi_id;
const uintptr_t buffer_base;
};
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.c b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
index fdffde77..58faf0e7 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,28 +8,17 @@
#include <bakery_lock.h>
#include <mmio.h>
#include <platform.h>
+#include "../zynqmp_ipi.h"
#include "../zynqmp_private.h"
#include "pm_ipi.h"
/* IPI message buffers */
#define IPI_BUFFER_BASEADDR 0xFF990000U
-#define IPI_BUFFER_RPU_0_BASE (IPI_BUFFER_BASEADDR + 0x0U)
-#define IPI_BUFFER_RPU_1_BASE (IPI_BUFFER_BASEADDR + 0x200U)
#define IPI_BUFFER_APU_BASE (IPI_BUFFER_BASEADDR + 0x400U)
-#define IPI_BUFFER_PL_0_BASE (IPI_BUFFER_BASEADDR + 0x600U)
-#define IPI_BUFFER_PL_1_BASE (IPI_BUFFER_BASEADDR + 0x800U)
-#define IPI_BUFFER_PL_2_BASE (IPI_BUFFER_BASEADDR + 0xA00U)
-#define IPI_BUFFER_PL_3_BASE (IPI_BUFFER_BASEADDR + 0xC00U)
#define IPI_BUFFER_PMU_BASE (IPI_BUFFER_BASEADDR + 0xE00U)
-#define IPI_BUFFER_TARGET_RPU_0_OFFSET 0x0U
-#define IPI_BUFFER_TARGET_RPU_1_OFFSET 0x40U
#define IPI_BUFFER_TARGET_APU_OFFSET 0x80U
-#define IPI_BUFFER_TARGET_PL_0_OFFSET 0xC0U
-#define IPI_BUFFER_TARGET_PL_1_OFFSET 0x100U
-#define IPI_BUFFER_TARGET_PL_2_OFFSET 0x140U
-#define IPI_BUFFER_TARGET_PL_3_OFFSET 0x180U
#define IPI_BUFFER_TARGET_PMU_OFFSET 0x1C0U
#define IPI_BUFFER_MAX_WORDS 8
@@ -37,76 +26,33 @@
#define IPI_BUFFER_REQ_OFFSET 0x0U
#define IPI_BUFFER_RESP_OFFSET 0x20U
-/* IPI Base Address */
-#define IPI_BASEADDR 0XFF300000
-
-/* APU's IPI registers */
-#define IPI_APU_ISR (IPI_BASEADDR + 0X00000010)
-#define IPI_APU_IER (IPI_BASEADDR + 0X00000018)
-#define IPI_APU_IDR (IPI_BASEADDR + 0X0000001C)
-#define IPI_APU_IXR_PMU_0_MASK (1 << 16)
-
-#define IPI_TRIG_OFFSET 0
-#define IPI_OBS_OFFSET 4
-
-/* Power Management IPI interrupt number */
-#define PM_INT_NUM 0
-#define IPI_PMU_PM_INT_BASE (IPI_PMU_0_TRIG + (PM_INT_NUM * 0x1000))
-#define IPI_PMU_PM_INT_MASK (IPI_APU_IXR_PMU_0_MASK << PM_INT_NUM)
-#if (PM_INT_NUM < 0 || PM_INT_NUM > 3)
- #error PM_INT_NUM value out of range
-#endif
-
-#define IPI_APU_MASK 1U
-
DEFINE_BAKERY_LOCK(pm_secure_lock);
const struct pm_ipi apu_ipi = {
- .mask = IPI_APU_MASK,
- .base = IPI_BASEADDR,
+ .apu_ipi_id = IPI_ID_APU,
+ .pmu_ipi_id = IPI_ID_PMU0,
.buffer_base = IPI_BUFFER_APU_BASE,
};
/**
* pm_ipi_init() - Initialize IPI peripheral for communication with PMU
*
+ * @proc Pointer to the processor who is initiating request
* @return On success, the initialization function must return 0.
* Any other return value will cause the framework to ignore
* the service
*
* Called from pm_setup initialization function
*/
-int pm_ipi_init(void)
+int pm_ipi_init(const struct pm_proc *proc)
{
bakery_lock_init(&pm_secure_lock);
-
- /* IPI Interrupts Clear & Disable */
- mmio_write_32(IPI_APU_ISR, 0xffffffff);
- mmio_write_32(IPI_APU_IDR, 0xffffffff);
+ ipi_mb_open(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
return 0;
}
/**
- * pm_ipi_wait() - wait for pmu to handle request
- * @proc proc which is waiting for PMU to handle request
- */
-static enum pm_ret_status pm_ipi_wait(const struct pm_proc *proc)
-{
- int status;
-
- /* Wait until previous interrupt is handled by PMU */
- do {
- status = mmio_read_32(proc->ipi->base + IPI_OBS_OFFSET) &
- IPI_PMU_PM_INT_MASK;
- /* TODO: 1) Use timer to add delay between read attempts */
- /* TODO: 2) Return PM_RET_ERR_TIMEOUT if this times out */
- } while (status);
-
- return PM_RET_SUCCESS;
-}
-
-/**
* pm_ipi_send_common() - Sends IPI request to the PMU
* @proc Pointer to the processor who is initiating request
* @payload API id and call arguments to be written in IPI buffer
@@ -124,16 +70,13 @@ static enum pm_ret_status pm_ipi_send_common(const struct pm_proc *proc,
IPI_BUFFER_TARGET_PMU_OFFSET +
IPI_BUFFER_REQ_OFFSET;
- /* Wait until previous interrupt is handled by PMU */
- pm_ipi_wait(proc);
-
/* Write payload into IPI buffer */
for (size_t i = 0; i < PAYLOAD_ARG_CNT; i++) {
mmio_write_32(buffer_base + offset, payload[i]);
offset += PAYLOAD_ARG_SIZE;
}
/* Generate IPI to PMU */
- mmio_write_32(proc->ipi->base + IPI_TRIG_OFFSET, IPI_PMU_PM_INT_MASK);
+ ipi_mb_notify(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id, 1);
return PM_RET_SUCCESS;
}
@@ -178,8 +121,6 @@ static enum pm_ret_status pm_ipi_buff_read(const struct pm_proc *proc,
IPI_BUFFER_TARGET_PMU_OFFSET +
IPI_BUFFER_RESP_OFFSET;
- pm_ipi_wait(proc);
-
/*
* Read response from IPI buffer
* buf-0: success or error+reason
@@ -250,17 +191,12 @@ unlock:
return ret;
}
-void pm_ipi_irq_enable(void)
-{
- mmio_write_32(IPI_APU_IER, IPI_APU_IXR_PMU_0_MASK);
-}
-
-void pm_ipi_irq_disable(void)
+void pm_ipi_irq_enable(const struct pm_proc *proc)
{
- mmio_write_32(IPI_APU_IDR, IPI_APU_IXR_PMU_0_MASK);
+ ipi_mb_enable_irq(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
}
-void pm_ipi_irq_clear(void)
+void pm_ipi_irq_clear(const struct pm_proc *proc)
{
- mmio_write_32(IPI_APU_ISR, IPI_APU_IXR_PMU_0_MASK);
+ ipi_mb_ack(proc->ipi->apu_ipi_id, proc->ipi->pmu_ipi_id);
}
diff --git a/plat/xilinx/zynqmp/pm_service/pm_ipi.h b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
index a76298bd..e6b36f52 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_ipi.h
+++ b/plat/xilinx/zynqmp/pm_service/pm_ipi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,7 +9,7 @@
#include "pm_common.h"
-int pm_ipi_init(void);
+int pm_ipi_init(const struct pm_proc *proc);
enum pm_ret_status pm_ipi_send(const struct pm_proc *proc,
uint32_t payload[PAYLOAD_ARG_CNT]);
@@ -17,8 +17,7 @@ enum pm_ret_status pm_ipi_send_sync(const struct pm_proc *proc,
uint32_t payload[PAYLOAD_ARG_CNT],
unsigned int *value, size_t count);
void pm_ipi_buff_read_callb(unsigned int *value, size_t count);
-void pm_ipi_irq_enable(void);
-void pm_ipi_irq_disable(void);
-void pm_ipi_irq_clear(void);
+void pm_ipi_irq_enable(const struct pm_proc *proc);
+void pm_ipi_irq_clear(const struct pm_proc *proc);
#endif /* _PM_IPI_H_ */
diff --git a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
index f4e679bc..fb64bc5c 100644
--- a/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
+++ b/plat/xilinx/zynqmp/pm_service/pm_svc_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -49,22 +49,25 @@ static struct {
*/
int pm_setup(void)
{
- int status;
+ int status, ret;
if (!zynqmp_is_pmu_up())
return -ENODEV;
- status = pm_ipi_init();
+ status = pm_ipi_init(primary_proc);
- if (status == 0)
+ if (status >= 0) {
INFO("BL31: PM Service Init Complete: API v%d.%d\n",
PM_VERSION_MAJOR, PM_VERSION_MINOR);
- else
+ ret = 0;
+ } else {
INFO("BL31: PM Service Init Failed, Error Code %d!\n", status);
+ ret = status;
+ }
pm_down = status;
- return status;
+ return ret;
}
/**
@@ -163,7 +166,7 @@ uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
* Even if we were wrong, it would not enable the IRQ in
* the GIC.
*/
- pm_ipi_irq_enable();
+ pm_ipi_irq_enable(primary_proc);
SMC_RET1(handle, (uint64_t)ret |
((uint64_t)pm_ctx.api_version << 32));
diff --git a/plat/xilinx/zynqmp/sip_svc_setup.c b/plat/xilinx/zynqmp/sip_svc_setup.c
index ae6ecafc..8b44eaa8 100644
--- a/plat/xilinx/zynqmp/sip_svc_setup.c
+++ b/plat/xilinx/zynqmp/sip_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,7 +8,9 @@
#include <runtime_svc.h>
#include <uuid.h>
+#include "ipi_mailbox_svc.h"
#include "pm_svc_main.h"
+#include "zynqmp_ipi.h"
/* SMC function IDs for SiP Service queries */
#define ZYNQMP_SIP_SVC_CALL_COUNT 0x8200ff00
@@ -19,10 +21,12 @@
#define SIP_SVC_VERSION_MAJOR 0
#define SIP_SVC_VERSION_MINOR 1
-/* These macros are used to identify PM calls from the SMC function ID */
+/* These macros are used to identify PM, IPI calls from the SMC function ID */
#define PM_FID_MASK 0xf000u
#define PM_FID_VALUE 0u
+#define IPI_FID_VALUE 0x1000u
#define is_pm_fid(_fid) (((_fid) & PM_FID_MASK) == PM_FID_VALUE)
+#define is_ipi_fid(_fid) (((_fid) & PM_FID_MASK) == IPI_FID_VALUE)
/* SiP Service UUID */
DEFINE_SVC_UUID(zynqmp_sip_uuid,
@@ -63,6 +67,12 @@ uint64_t sip_svc_smc_handler(uint32_t smc_fid,
flags);
}
+ /* Let IPI SMC handler deal with IPI-related requests */
+ if (is_ipi_fid(smc_fid)) {
+ return ipi_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+
switch (smc_fid) {
case ZYNQMP_SIP_SVC_CALL_COUNT:
/* PM functions + default functions */
diff --git a/plat/xilinx/zynqmp/zynqmp_ipi.c b/plat/xilinx/zynqmp/zynqmp_ipi.c
new file mode 100644
index 00000000..755a3b7a
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_ipi.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Zynq UltraScale+ MPSoC IPI agent registers access management
+ */
+
+#include <bakery_lock.h>
+#include <debug.h>
+#include <errno.h>
+#include <mmio.h>
+#include <runtime_svc.h>
+#include <string.h>
+#include "zynqmp_ipi.h"
+#include "../zynqmp_private.h"
+
+/*********************************************************************
+ * Macros definitions
+ ********************************************************************/
+
+/* IPI registers base address */
+#define IPI_REGS_BASE 0xFF300000U
+
+/* IPI registers offsets macros */
+#define IPI_TRIG_OFFSET 0x00U
+#define IPI_OBR_OFFSET 0x04U
+#define IPI_ISR_OFFSET 0x10U
+#define IPI_IMR_OFFSET 0x14U
+#define IPI_IER_OFFSET 0x18U
+#define IPI_IDR_OFFSET 0x1CU
+
+/* IPI register start offset */
+#define IPI_REG_BASE(I) (zynqmp_ipi_table[(I)].ipi_reg_base)
+
+/* IPI register bit mask */
+#define IPI_BIT_MASK(I) (zynqmp_ipi_table[(I)].ipi_bit_mask)
+
+/* IPI secure check */
+#define IPI_SECURE_MASK 0x1U
+#define IPI_IS_SECURE(I) ((zynqmp_ipi_table[(I)].secure_only & \
+ IPI_SECURE_MASK) ? 1 : 0)
+
+/*********************************************************************
+ * Struct definitions
+ ********************************************************************/
+
+/* structure to maintain IPI configuration information */
+struct zynqmp_ipi_config {
+ unsigned int ipi_bit_mask;
+ unsigned int ipi_reg_base;
+ unsigned char secure_only;
+};
+
+/* Zynqmp ipi configuration table */
+const static struct zynqmp_ipi_config zynqmp_ipi_table[] = {
+ /* APU IPI */
+ {
+ .ipi_bit_mask = 0x1,
+ .ipi_reg_base = 0xFF300000,
+ .secure_only = 0,
+ },
+ /* RPU0 IPI */
+ {
+ .ipi_bit_mask = 0x100,
+ .ipi_reg_base = 0xFF310000,
+ .secure_only = 0,
+ },
+ /* RPU1 IPI */
+ {
+ .ipi_bit_mask = 0x200,
+ .ipi_reg_base = 0xFF320000,
+ .secure_only = 0,
+ },
+ /* PMU0 IPI */
+ {
+ .ipi_bit_mask = 0x10000,
+ .ipi_reg_base = 0xFF330000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU1 IPI */
+ {
+ .ipi_bit_mask = 0x20000,
+ .ipi_reg_base = 0xFF331000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU2 IPI */
+ {
+ .ipi_bit_mask = 0x40000,
+ .ipi_reg_base = 0xFF332000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PMU3 IPI */
+ {
+ .ipi_bit_mask = 0x80000,
+ .ipi_reg_base = 0xFF333000,
+ .secure_only = IPI_SECURE_MASK,
+ },
+ /* PL0 IPI */
+ {
+ .ipi_bit_mask = 0x1000000,
+ .ipi_reg_base = 0xFF340000,
+ .secure_only = 0,
+ },
+ /* PL1 IPI */
+ {
+ .ipi_bit_mask = 0x2000000,
+ .ipi_reg_base = 0xFF350000,
+ .secure_only = 0,
+ },
+ /* PL2 IPI */
+ {
+ .ipi_bit_mask = 0x4000000,
+ .ipi_reg_base = 0xFF360000,
+ .secure_only = 0,
+ },
+ /* PL3 IPI */
+ {
+ .ipi_bit_mask = 0x8000000,
+ .ipi_reg_base = 0xFF370000,
+ .secure_only = 0,
+ },
+};
+
+/* is_ipi_mb_within_range() - verify if IPI mailbox is within range
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * return - 1 if within range, 0 if not
+ */
+static inline int is_ipi_mb_within_range(uint32_t local, uint32_t remote)
+{
+ int ret = 1;
+ uint32_t ipi_total = ARRAY_SIZE(zynqmp_ipi_table);
+
+ if (remote >= ipi_total || local >= ipi_total)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * ipi_mb_validate() - validate IPI mailbox access
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ * @is_secure - indicate if the requester is from secure software
+ *
+ * return - 0 success, negative value for errors
+ */
+int ipi_mb_validate(uint32_t local, uint32_t remote, unsigned int is_secure)
+{
+ int ret = 0;
+
+ if (!is_ipi_mb_within_range(local, remote))
+ ret = -EINVAL;
+ else if (IPI_IS_SECURE(local) && !is_secure)
+ ret = -EPERM;
+ else if (IPI_IS_SECURE(remote) && !is_secure)
+ ret = -EPERM;
+
+ return ret;
+}
+
+/**
+ * ipi_mb_open() - Open IPI mailbox.
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ */
+void ipi_mb_open(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+ mmio_write_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/**
+ * ipi_mb_release() - Open IPI mailbox.
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ */
+void ipi_mb_release(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/**
+ * ipi_mb_enquire_status() - Enquire IPI mailbox status
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * return - 0 idle, positive value for pending sending or receiving,
+ * negative value for errors
+ */
+int ipi_mb_enquire_status(uint32_t local, uint32_t remote)
+{
+ int ret = 0;
+ uint32_t status;
+
+ status = mmio_read_32(IPI_REG_BASE(local) + IPI_OBR_OFFSET);
+ if (status & IPI_BIT_MASK(remote))
+ ret |= IPI_MB_STATUS_SEND_PENDING;
+ status = mmio_read_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET);
+ if (status & IPI_BIT_MASK(remote))
+ ret |= IPI_MB_STATUS_RECV_PENDING;
+
+ return ret;
+}
+
+/* ipi_mb_notify() - Trigger IPI mailbox notification
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ * @is_blocking - if to trigger the notification in blocking mode or not.
+ *
+ * It sets the remote bit in the IPI agent trigger register.
+ *
+ */
+void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking)
+{
+ uint32_t status;
+
+ mmio_write_32(IPI_REG_BASE(local) + IPI_TRIG_OFFSET,
+ IPI_BIT_MASK(remote));
+ if (is_blocking) {
+ do {
+ status = mmio_read_32(IPI_REG_BASE(local) +
+ IPI_OBR_OFFSET);
+ } while (status & IPI_BIT_MASK(remote));
+ }
+}
+
+/* ipi_mb_ack() - Ack IPI mailbox notification from the other end
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will clear the remote bit in the isr register.
+ *
+ */
+void ipi_mb_ack(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_ISR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/* ipi_mb_disable_irq() - Disable IPI mailbox notification interrupt
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will mask the remote bit in the idr register.
+ *
+ */
+void ipi_mb_disable_irq(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IDR_OFFSET,
+ IPI_BIT_MASK(remote));
+}
+
+/* ipi_mb_enable_irq() - Enable IPI mailbox notification interrupt
+ *
+ * @local - local IPI ID
+ * @remote - remote IPI ID
+ *
+ * It will mask the remote bit in the idr register.
+ *
+ */
+void ipi_mb_enable_irq(uint32_t local, uint32_t remote)
+{
+ mmio_write_32(IPI_REG_BASE(local) + IPI_IER_OFFSET,
+ IPI_BIT_MASK(remote));
+}
diff --git a/plat/xilinx/zynqmp/zynqmp_ipi.h b/plat/xilinx/zynqmp/zynqmp_ipi.h
new file mode 100644
index 00000000..0544ddbf
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_ipi.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* ZynqMP IPI management enums and defines */
+
+#ifndef _ZYNQMP_IPI_H_
+#define _ZYNQMP_IPI_H_
+
+#include <stdint.h>
+
+/*********************************************************************
+ * IPI agent IDs macros
+ ********************************************************************/
+#define IPI_ID_APU 0U
+#define IPI_ID_RPU0 1U
+#define IPI_ID_RPU1 2U
+#define IPI_ID_PMU0 3U
+#define IPI_ID_PMU1 4U
+#define IPI_ID_PMU2 5U
+#define IPI_ID_PMU3 6U
+#define IPI_ID_PL0 7U
+#define IPI_ID_PL1 8U
+#define IPI_ID_PL2 9U
+#define IPI_ID_PL3 10U
+
+/*********************************************************************
+ * IPI mailbox status macros
+ ********************************************************************/
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+/*********************************************************************
+ * IPI mailbox call is secure or not macros
+ ********************************************************************/
+#define IPI_MB_CALL_NOTSECURE 0
+#define IPI_MB_CALL_SECURE 1
+
+/*********************************************************************
+ * IPI APIs declarations
+ ********************************************************************/
+
+/* Validate IPI mailbox access */
+int ipi_mb_validate(uint32_t local, uint32_t remote, unsigned int is_secure);
+
+/* Open the IPI mailbox */
+void ipi_mb_open(uint32_t local, uint32_t remote);
+
+/* Release the IPI mailbox */
+void ipi_mb_release(uint32_t local, uint32_t remote);
+
+/* Enquire IPI mailbox status */
+int ipi_mb_enquire_status(uint32_t local, uint32_t remote);
+
+/* Trigger notification on the IPI mailbox */
+void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking);
+
+/* Ack IPI mailbox notification */
+void ipi_mb_ack(uint32_t local, uint32_t remote);
+
+/* Disable IPI mailbox notification interrupt */
+void ipi_mb_disable_irq(uint32_t local, uint32_t remote);
+
+/* Enable IPI mailbox notification interrupt */
+void ipi_mb_enable_irq(uint32_t local, uint32_t remote);
+
+#endif /* _ZYNQMP_IPI_H_ */
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
index 78e98535..cb68bff7 100644
--- a/services/spd/tlkd/tlkd_main.c
+++ b/services/spd/tlkd/tlkd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -193,12 +193,14 @@ uint64_t tlkd_smc_handler(uint32_t smc_fid,
* b. register shared memory with the SP for passing args
* required for maintaining sessions with the Trusted
* Applications.
- * c. open/close sessions
- * d. issue commands to the Trusted Apps
- * e. resume the preempted yielding SMC call.
+ * c. register non-secure world's memory map with the OS
+ * d. open/close sessions
+ * e. issue commands to the Trusted Apps
+ * f. resume the preempted yielding SMC call.
*/
case TLK_REGISTER_LOGBUF:
case TLK_REGISTER_REQBUF:
+ case TLK_REGISTER_NS_DRAM:
case TLK_OPEN_TA_SESSION:
case TLK_CLOSE_TA_SESSION:
case TLK_TA_LAUNCH_OP:
diff --git a/services/std_svc/spm/spm_main.c b/services/std_svc/spm/spm_main.c
index ae71c1df..d31fad67 100644
--- a/services/std_svc/spm/spm_main.c
+++ b/services/std_svc/spm/spm_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,6 +10,7 @@
#include <context_mgmt.h>
#include <debug.h>
#include <errno.h>
+#include <mm_svc.h>
#include <platform.h>
#include <runtime_svc.h>
#include <secure_partition.h>
@@ -29,7 +30,6 @@ static spinlock_t mem_attr_smc_lock;
* Secure Partition context information.
******************************************************************************/
static secure_partition_context_t sp_ctx;
-unsigned int sp_init_in_progress;
/*******************************************************************************
* Replace the S-EL1 re-entry information with S-EL0 re-entry
@@ -126,12 +126,19 @@ int32_t spm_init(void)
secure_partition_setup();
/*
+ * Make all CPUs use the same secure context.
+ */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ cm_set_context_by_index(i, &sp_ctx.cpu_ctx, SECURE);
+ }
+
+ /*
* Arrange for an entry into the secure partition.
*/
- sp_init_in_progress = 1;
+ sp_ctx.sp_init_in_progress = 1;
rc = spm_synchronous_sp_entry(&sp_ctx);
assert(rc == 0);
- sp_init_in_progress = 0;
+ sp_ctx.sp_init_in_progress = 0;
VERBOSE("SP_MEMORY_ATTRIBUTES_SET_AARCH64 availability has been revoked\n");
return rc;
@@ -358,7 +365,7 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
cm_el1_sysregs_context_save(SECURE);
spm_setup_next_eret_into_sel0(handle);
- if (sp_init_in_progress) {
+ if (sp_ctx.sp_init_in_progress) {
/*
* SPM reports completion. The SPM must have
* initiated the original request through a
@@ -370,6 +377,9 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
assert(0);
}
+ /* Release the Secure Partition context */
+ spin_unlock(&sp_ctx.lock);
+
/*
* This is the result from the Secure partition of an
* earlier request. Copy the result into the non-secure
@@ -391,7 +401,7 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
- if (!sp_init_in_progress) {
+ if (!sp_ctx.sp_init_in_progress) {
WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
SMC_RET1(handle, SPM_NOT_SUPPORTED);
}
@@ -400,7 +410,7 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
- if (!sp_init_in_progress) {
+ if (!sp_ctx.sp_init_in_progress) {
WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
SMC_RET1(handle, SPM_NOT_SUPPORTED);
}
@@ -414,9 +424,8 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
- case SP_VERSION_AARCH64:
- case SP_VERSION_AARCH32:
- SMC_RET1(handle, SP_VERSION_COMPILED);
+ case MM_VERSION_AARCH32:
+ SMC_RET1(handle, MM_VERSION_COMPILED);
case MM_COMMUNICATE_AARCH32:
case MM_COMMUNICATE_AARCH64:
@@ -443,6 +452,9 @@ uint64_t spm_smc_handler(uint32_t smc_fid,
/* Save the Normal world context */
cm_el1_sysregs_context_save(NON_SECURE);
+ /* Lock the Secure Partition context. */
+ spin_lock(&sp_ctx.lock);
+
/*
* Restore the secure world context and prepare for
* entry in S-EL0
diff --git a/services/std_svc/spm/spm_private.h b/services/std_svc/spm/spm_private.h
index 16993e8c..1d16b458 100644
--- a/services/std_svc/spm/spm_private.h
+++ b/services/std_svc/spm/spm_private.h
@@ -32,6 +32,7 @@
#ifndef __ASSEMBLY__
+#include <spinlock.h>
#include <stdint.h>
#include <xlat_tables_v2.h>
@@ -43,6 +44,8 @@ struct entry_point_info;
typedef struct secure_partition_context {
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
+ unsigned int sp_init_in_progress;
+ spinlock_t lock;
} secure_partition_context_t;
uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
diff --git a/tools/fiptool/fiptool.c b/tools/fiptool/fiptool.c
index 1dcb7e8e..33c451e4 100644
--- a/tools/fiptool/fiptool.c
+++ b/tools/fiptool/fiptool.c
@@ -492,7 +492,7 @@ static int pack_images(const char *filename, uint64_t toc_flags, unsigned long a
fip_toc_header_t *toc_header;
fip_toc_entry_t *toc_entry;
char *buf;
- uint64_t entry_offset, buf_size, payload_size = 0;
+ uint64_t entry_offset, buf_size, payload_size = 0, pad_size;
size_t nr_images = 0;
for (desc = image_desc_head; desc != NULL; desc = desc->next)
@@ -526,9 +526,13 @@ static int pack_images(const char *filename, uint64_t toc_flags, unsigned long a
entry_offset += image->toc_e.size;
}
- /* Append a null uuid entry to mark the end of ToC entries. */
+ /*
+ * Append a null uuid entry to mark the end of ToC entries.
+ * NOTE the offset address for the last toc_entry must match the fip
+ * size.
+ */
memset(toc_entry, 0, sizeof(*toc_entry));
- toc_entry->offset_address = entry_offset;
+ toc_entry->offset_address = (entry_offset + align - 1) & ~(align - 1);
/* Generate the FIP file. */
fp = fopen(filename, "wb");
@@ -555,6 +559,13 @@ static int pack_images(const char *filename, uint64_t toc_flags, unsigned long a
xfwrite(image->buffer, image->toc_e.size, fp, filename);
}
+ if (fseek(fp, entry_offset, SEEK_SET))
+ log_errx("Failed to set file position");
+
+ pad_size = toc_entry->offset_address - entry_offset;
+ while (pad_size--)
+ fputc(0x0, fp);
+
fclose(fp);
return 0;
}