diff options
-rw-r--r-- | Makefile | 11 | ||||
-rw-r--r-- | bl31/aarch64/ea_delegate.S | 39 | ||||
-rw-r--r-- | docs/design/cpu-specific-build-macros.rst | 3 | ||||
-rw-r--r-- | docs/design/firmware-design.rst | 7 | ||||
-rw-r--r-- | docs/getting_started/user-guide.rst | 4 | ||||
-rw-r--r-- | include/drivers/delay_timer.h | 25 | ||||
-rw-r--r-- | include/lib/cpus/aarch64/cpu_macros.S | 19 | ||||
-rw-r--r-- | include/lib/cpus/aarch64/neoverse_n1.h | 3 | ||||
-rw-r--r-- | lib/cpus/aarch64/cpu_helpers.S | 21 | ||||
-rw-r--r-- | lib/cpus/aarch64/neoverse_n1.S | 85 | ||||
-rw-r--r-- | lib/cpus/cpu-ops.mk | 8 | ||||
-rw-r--r-- | lib/locks/exclusive/aarch64/spinlock.S | 53 | ||||
-rw-r--r-- | make_helpers/defaults.mk | 5 | ||||
-rw-r--r-- | plat/marvell/a8k/common/ble/ble.mk | 1 | ||||
-rw-r--r-- | plat/st/common/include/stm32mp_common.h | 18 |
15 files changed, 235 insertions, 67 deletions
@@ -141,6 +141,15 @@ else $(error Unknown BRANCH_PROTECTION value ${BRANCH_PROTECTION}) endif +# USE_SPINLOCK_CAS requires AArch64 build +ifeq (${USE_SPINLOCK_CAS},1) +ifneq (${ARCH},aarch64) + $(error USE_SPINLOCK_CAS requires AArch64) +else + $(info USE_SPINLOCK_CAS is an experimental feature) +endif +endif + ################################################################################ # Toolchain ################################################################################ @@ -690,6 +699,7 @@ $(eval $(call assert_boolean,WARMBOOT_ENABLE_DCACHE_EARLY)) $(eval $(call assert_boolean,BL2_AT_EL3)) $(eval $(call assert_boolean,BL2_IN_XIP_MEM)) $(eval $(call assert_boolean,BL2_INV_DCACHE)) +$(eval $(call assert_boolean,USE_SPINLOCK_CAS)) $(eval $(call assert_numeric,ARM_ARCH_MAJOR)) $(eval $(call assert_numeric,ARM_ARCH_MINOR)) @@ -755,6 +765,7 @@ $(eval $(call add_define,WARMBOOT_ENABLE_DCACHE_EARLY)) $(eval $(call add_define,BL2_AT_EL3)) $(eval $(call add_define,BL2_IN_XIP_MEM)) $(eval $(call add_define,BL2_INV_DCACHE)) +$(eval $(call add_define,USE_SPINLOCK_CAS)) ifeq (${SANITIZE_UB},trap) $(eval $(call add_define,MONITOR_TRAPS)) diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S index 6e71a063..3cc4d56a 100644 --- a/bl31/aarch64/ea_delegate.S +++ b/bl31/aarch64/ea_delegate.S @@ -11,7 +11,8 @@ #include <bl31/ea_handle.h> #include <context.h> #include <lib/extensions/ras_arch.h> - +#include <cpu_macros.S> +#include <context.h> .globl handle_lower_el_ea_esb .globl enter_lower_el_sync_ea @@ -35,9 +36,9 @@ endfunc handle_lower_el_ea_esb /* * This function forms the tail end of Synchronous Exception entry from lower - * EL, and expects to handle only Synchronous External Aborts from lower EL. If - * any other kind of exception is detected, then this function reports unhandled - * exception. + * EL, and expects to handle Synchronous External Aborts from lower EL and CPU + * Implementation Defined Exceptions. If any other kind of exception is detected, + * then this function reports unhandled exception. * * Since it's part of exception vector, this function doesn't expect any GP * registers to have been saved. It delegates the handling of the EA to platform @@ -58,12 +59,33 @@ func enter_lower_el_sync_ea b.eq 1f cmp x30, #EC_DABORT_LOWER_EL - b.ne 2f + b.eq 1f + + /* Save GP registers */ + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + + /* Get the cpu_ops pointer */ + bl get_cpu_ops_ptr + + /* Get the cpu_ops exception handler */ + ldr x0, [x0, #CPU_E_HANDLER_FUNC] + + /* + * If the reserved function pointer is NULL, this CPU does not have an + * implementation defined exception handler function + */ + cbz x0, 2f + mrs x1, esr_el3 + ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH + blr x0 + b 2f 1: /* Test for EA bit in the instruction syndrome */ mrs x30, esr_el3 - tbz x30, #ESR_ISS_EABORT_EA_BIT, 2f + tbz x30, #ESR_ISS_EABORT_EA_BIT, 3f /* * Save general purpose and ARMv8.3-PAuth registers (if enabled). @@ -84,6 +106,11 @@ func enter_lower_el_sync_ea b delegate_sync_ea 2: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + +3: /* Synchronous exceptions other than the above are assumed to be EA */ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] no_ret report_unhandled_exception diff --git a/docs/design/cpu-specific-build-macros.rst b/docs/design/cpu-specific-build-macros.rst index d3fe89d6..a392eea2 100644 --- a/docs/design/cpu-specific-build-macros.rst +++ b/docs/design/cpu-specific-build-macros.rst @@ -258,6 +258,9 @@ For Neoverse N1, the following errata build flags are defined : - ``ERRATA_N1_1315703``: This applies errata 1315703 workaround to Neoverse-N1 CPU. This needs to be enabled only for revision <= r3p0 of the CPU. +- ``ERRATA_N1_1542419``: This applies errata 1542419 workaround to Neoverse-N1 + CPU. This needs to be enabled only for revisions r3p0 - r4p0 of the CPU. + DSU Errata Workarounds ---------------------- diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst index dc082082..2cbd9c94 100644 --- a/docs/design/firmware-design.rst +++ b/docs/design/firmware-design.rst @@ -2540,8 +2540,11 @@ Armv8.1-A This Architecture Extension is targeted when ``ARM_ARCH_MAJOR`` >= 8, or when ``ARM_ARCH_MAJOR`` == 8 and ``ARM_ARCH_MINOR`` >= 1. -- The Compare and Swap instruction is used to implement spinlocks. Otherwise, - the load-/store-exclusive instruction pair is used. +- By default, a load-/store-exclusive instruction pair is used to implement + spinlocks. The ``USE_SPINLOCK_CAS`` build option when set to 1 selects the + spinlock implementation using the ARMv8.1-LSE Compare and Swap instruction. + Notice this instruction is only available in AArch64 execution state, so + the option is only available to AArch64 builds. Armv8.2-A ~~~~~~~~~ diff --git a/docs/getting_started/user-guide.rst b/docs/getting_started/user-guide.rst index 6dad3105..e540fd06 100644 --- a/docs/getting_started/user-guide.rst +++ b/docs/getting_started/user-guide.rst @@ -817,6 +817,10 @@ Common build options reduces SRAM usage. Refer to `Library at ROM`_ for further details. Default is 0. +- ``USE_SPINLOCK_CAS``: Setting this build flag to 1 selects the spinlock + implementation variant using the ARMv8.1-LSE compare-and-swap instruction. + Notice this option is experimental and only available to AArch64 builds. + - ``V``: Verbose build. If assigned anything other than 0, the build commands are printed. Default is 0. diff --git a/include/drivers/delay_timer.h b/include/drivers/delay_timer.h index 684f1c3c..e5044cc6 100644 --- a/include/drivers/delay_timer.h +++ b/include/drivers/delay_timer.h @@ -1,5 +1,6 @@ /* - * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2019, Linaro Limited * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,8 +8,11 @@ #ifndef DELAY_TIMER_H #define DELAY_TIMER_H +#include <stdbool.h> #include <stdint.h> +#include <arch_helpers.h> + /******************************************************************** * A simple timer driver providing synchronous delay functionality. * The driver must be initialized with a structure that provides a @@ -23,6 +27,25 @@ typedef struct timer_ops { uint32_t clk_div; } timer_ops_t; +static inline uint64_t timeout_cnt_us2cnt(uint32_t us) +{ + return ((uint64_t)us * (uint64_t)read_cntfrq_el0()) / 1000000ULL; +} + +static inline uint64_t timeout_init_us(uint32_t us) +{ + uint64_t cnt = timeout_cnt_us2cnt(us); + + cnt += read_cntfrq_el0(); + + return cnt; +} + +static inline bool timeout_elapsed(uint64_t expire_cnt) +{ + return read_cntpct_el0() > expire_cnt; +} + void mdelay(uint32_t msec); void udelay(uint32_t usec); void timer_init(const timer_ops_t *ops_ptr); diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index 044aacaf..c83824d7 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -43,6 +43,7 @@ .equ CPU_MIDR_SIZE, CPU_WORD_SIZE .equ CPU_EXTRA1_FUNC_SIZE, CPU_WORD_SIZE .equ CPU_EXTRA2_FUNC_SIZE, CPU_WORD_SIZE + .equ CPU_E_HANDLER_FUNC_SIZE, CPU_WORD_SIZE .equ CPU_RESET_FUNC_SIZE, CPU_WORD_SIZE .equ CPU_PWR_DWN_OPS_SIZE, CPU_WORD_SIZE * CPU_MAX_PWR_DWN_OPS .equ CPU_ERRATA_FUNC_SIZE, CPU_WORD_SIZE @@ -83,7 +84,8 @@ .equ CPU_RESET_FUNC, CPU_MIDR + CPU_MIDR_SIZE .equ CPU_EXTRA1_FUNC, CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE .equ CPU_EXTRA2_FUNC, CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE - .equ CPU_PWR_DWN_OPS, CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE + .equ CPU_E_HANDLER_FUNC, CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE + .equ CPU_PWR_DWN_OPS, CPU_E_HANDLER_FUNC + CPU_E_HANDLER_FUNC_SIZE .equ CPU_ERRATA_FUNC, CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE .equ CPU_ERRATA_LOCK, CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE .equ CPU_ERRATA_PRINTED, CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE @@ -139,6 +141,8 @@ * This is a placeholder for future per CPU operations. Currently * some CPUs use this entry to set a function to disable the * workaround for CVE-2018-3639. + * _e_handler: + * This is a placeholder for future per CPU exception handlers. * _power_down_ops: * Comma-separated list of functions to perform power-down * operatios on the CPU. At least one, and up to @@ -149,7 +153,7 @@ * used to handle power down at subsequent levels */ .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \ - _extra1:req, _extra2:req, _power_down_ops:vararg + _extra1:req, _extra2:req, _e_handler:req, _power_down_ops:vararg .section cpu_ops, "a" .align 3 .type cpu_ops_\_name, %object @@ -159,6 +163,7 @@ #endif .quad \_extra1 .quad \_extra2 + .quad \_e_handler #ifdef IMAGE_BL31 /* Insert list of functions */ fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops @@ -203,15 +208,21 @@ .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \ _power_down_ops:vararg - declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, \ + declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, \ \_power_down_ops .endm + .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \ + _e_handler:req, _power_down_ops:vararg + declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ + 0, 0, \_e_handler, \_power_down_ops + .endm + .macro declare_cpu_ops_wa _name:req, _midr:req, \ _resetfunc:req, _extra1:req, _extra2:req, \ _power_down_ops:vararg declare_cpu_ops_base \_name, \_midr, \_resetfunc, \ - \_extra1, \_extra2, \_power_down_ops + \_extra1, \_extra2, 0, \_power_down_ops .endm #if REPORT_ERRATA diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h index f90aa2ea..fa733ce1 100644 --- a/include/lib/cpus/aarch64/neoverse_n1.h +++ b/include/lib/cpus/aarch64/neoverse_n1.h @@ -12,6 +12,9 @@ /* Neoverse N1 MIDR for revision 0 */ #define NEOVERSE_N1_MIDR U(0x410fd0c0) +/* Exception Syndrome register EC code for IC Trap */ +#define NEOVERSE_N1_EC_IC_TRAP U(0x1f) + /******************************************************************************* * CPU Power Control register specific definitions. ******************************************************************************/ diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index de1177c3..808c7f80 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -227,6 +227,27 @@ func cpu_rev_var_hs ret endfunc cpu_rev_var_hs +/* + * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata + * application purposes. If the revision-variant is between or includes the given + * values, this indicates that errata applies; otherwise not. + * + * Shall clobber: x0-x4 + */ + .globl cpu_rev_var_range +func cpu_rev_var_range + mov x3, #ERRATA_APPLIES + mov x4, #ERRATA_NOT_APPLIES + cmp x0, x1 + csel x1, x3, x4, hs + cbz x1, 1f + cmp x0, x2 + csel x1, x3, x4, ls +1: + mov x0, x1 + ret +endfunc cpu_rev_var_range + #if REPORT_ERRATA /* * void print_errata_status(void); diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S index 31e7a3a7..c9bb005e 100644 --- a/lib/cpus/aarch64/neoverse_n1.S +++ b/lib/cpus/aarch64/neoverse_n1.S @@ -9,6 +9,7 @@ #include <neoverse_n1.h> #include <cpuamu.h> #include <cpu_macros.S> +#include <context.h> /* Hardware handled coherency */ #if HW_ASSISTED_COHERENCY == 0 @@ -20,6 +21,10 @@ #error "Neoverse-N1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +#if ERRATA_N1_IC_TRAP + .global neoverse_n1_errata_ic_trap_handler +#endif + /* -------------------------------------------------- * Errata Workaround for Neoverse N1 Erratum 1043202. * This applies to revision r0p0 and r1p0 of Neoverse N1. @@ -337,6 +342,41 @@ func check_errata_1315703 b cpu_rev_var_ls endfunc check_errata_1315703 +/* -------------------------------------------------- + * Errata Workaround for Neoverse N1 Erratum 1542419. + * This applies to revisions r3p0 - r4p0 of Neoverse N1 + * Inputs: + * x0: variant[4:7] and revision[0:3] of current cpu. + * Shall clobber: x0-x17 + * -------------------------------------------------- + */ +func errata_n1_1542419_wa + /* Compare x0 against revision r3p0 and r4p0 */ + mov x17, x30 + bl check_errata_1542419 + cbz x0, 1f + + /* Apply instruction patching sequence */ + ldr x0, =0x0 + msr CPUPSELR_EL3, x0 + ldr x0, =0xEE670D35 + msr CPUPOR_EL3, x0 + ldr x0, =0xFFFF0FFF + msr CPUPMR_EL3, x0 + ldr x0, =0x08000020007D + msr CPUPCR_EL3, x0 + isb +1: + ret x17 +endfunc errata_n1_1542419_wa + +func check_errata_1542419 + /* Applies to everything r3p0 - r4p0. */ + mov x1, #0x30 + mov x2, #0x40 + b cpu_rev_var_range +endfunc check_errata_1542419 + func neoverse_n1_reset_func mov x19, x30 @@ -406,6 +446,11 @@ func neoverse_n1_reset_func bl errata_n1_1315703_wa #endif +#if ERRATA_N1_1542419 + mov x0, x18 + bl errata_n1_1542419_wa +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 @@ -471,6 +516,7 @@ func neoverse_n1_errata_report report_errata ERRATA_N1_1262888, neoverse_n1, 1262888 report_errata ERRATA_N1_1275112, neoverse_n1, 1275112 report_errata ERRATA_N1_1315703, neoverse_n1, 1315703 + report_errata ERRATA_N1_1542419, neoverse_n1, 1542419 report_errata ERRATA_DSU_936184, neoverse_n1, dsu_936184 ldp x8, x30, [sp], #16 @@ -478,6 +524,42 @@ func neoverse_n1_errata_report endfunc neoverse_n1_errata_report #endif +/* + * Handle trap of EL0 IC IVAU instructions to EL3 by executing a TLB + * inner-shareable invalidation to an arbitrary address followed by a DSB. + * + * x1: Exception Syndrome + */ +func neoverse_n1_errata_ic_trap_handler + cmp x1, #NEOVERSE_N1_EC_IC_TRAP + b.ne 1f + tlbi vae3is, xzr + dsb sy + + # Skip the IC instruction itself + mrs x3, elr_el3 + add x3, x3, #4 + msr elr_el3, x3 + + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + +#if IMAGE_BL31 && RAS_EXTENSION + /* + * Issue Error Synchronization Barrier to synchronize SErrors before + * exiting EL3. We're running with EAs unmasked, so any synchronized + * errors would be taken immediately; therefore no need to inspect + * DISR_EL1 register. + */ + esb +#endif + eret +1: + ret +endfunc neoverse_n1_errata_ic_trap_handler + /* --------------------------------------------- * This function provides neoverse_n1 specific * register information for crash reporting. @@ -497,6 +579,7 @@ func neoverse_n1_cpu_reg_dump ret endfunc neoverse_n1_cpu_reg_dump -declare_cpu_ops neoverse_n1, NEOVERSE_N1_MIDR, \ +declare_cpu_ops_eh neoverse_n1, NEOVERSE_N1_MIDR, \ neoverse_n1_reset_func, \ + neoverse_n1_errata_ic_trap_handler, \ neoverse_n1_core_pwr_dwn diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 26040235..078888eb 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -278,6 +278,10 @@ ERRATA_N1_1275112 ?=0 # to revisions before r3p1 of the Neoverse N1 cpu. ERRATA_N1_1315703 ?=1 +# Flag to apply erratum 1542419 workaround during reset. This erratum applies +# to revisions r3p0 - r4p0 of the Neoverse N1 cpu. +ERRATA_N1_1542419 ?=0 + # Flag to apply DSU erratum 798953. This erratum applies to DSUs revision r0p0. # Applying the workaround results in higher DSU power consumption on idle. ERRATA_DSU_798953 ?=0 @@ -507,6 +511,10 @@ $(eval $(call add_define,ERRATA_N1_1275112)) $(eval $(call assert_boolean,ERRATA_N1_1315703)) $(eval $(call add_define,ERRATA_N1_1315703)) +# Process ERRATA_N1_1542419 flag +$(eval $(call assert_boolean,ERRATA_N1_1542419)) +$(eval $(call add_define,ERRATA_N1_1542419)) + # Process ERRATA_DSU_798953 flag $(eval $(call assert_boolean,ERRATA_DSU_798953)) $(eval $(call add_define,ERRATA_DSU_798953)) diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S index d0569f1c..e941b8a3 100644 --- a/lib/locks/exclusive/aarch64/spinlock.S +++ b/lib/locks/exclusive/aarch64/spinlock.S @@ -9,56 +9,38 @@ .globl spin_lock .globl spin_unlock -#if ARM_ARCH_AT_LEAST(8, 1) +#if USE_SPINLOCK_CAS +#if !ARM_ARCH_AT_LEAST(8, 1) +#error USE_SPINLOCK_CAS option requires at least an ARMv8.1 platform +#endif /* * When compiled for ARMv8.1 or later, choose spin locks based on Compare and * Swap instruction. */ -# define USE_CAS 1 - -/* - * Lock contenders using CAS, upon failing to acquire the lock, wait with the - * monitor in open state. Therefore, a normal store upon unlocking won't - * generate an SEV. Use explicit SEV instruction with CAS unlock. - */ -# define COND_SEV() sev - -#else - -# define USE_CAS 0 - -/* - * Lock contenders using exclusive pairs, upon failing to acquire the lock, wait - * with the monitor in exclusive state. A normal store upon unlocking will - * implicitly generate an envent; so, no explicit SEV with unlock is required. - */ -# define COND_SEV() - -#endif - -#if USE_CAS /* * Acquire lock using Compare and Swap instruction. * - * Compare for 0 with acquire semantics, and swap 1. Wait until CAS returns - * 0. + * Compare for 0 with acquire semantics, and swap 1. If failed to acquire, use + * load exclusive semantics to monitor the address and enter WFE. * * void spin_lock(spinlock_t *lock); */ func spin_lock mov w2, #1 - sevl -1: +1: mov w1, wzr +2: casa w1, w2, [x0] + cbz w1, 3f + ldxr w1, [x0] + cbz w1, 2b wfe - mov w1, wzr - casa w1, w2, [x0] - cbnz w1, 1b + b 1b +3: ret endfunc spin_lock -#else /* !USE_CAS */ +#else /* !USE_SPINLOCK_CAS */ /* * Acquire lock using load-/store-exclusive instruction pair. @@ -76,17 +58,18 @@ l2: ldaxr w1, [x0] ret endfunc spin_lock -#endif /* USE_CAS */ +#endif /* USE_SPINLOCK_CAS */ /* * Release lock previously acquired by spin_lock. * - * Unconditionally write 0, and conditionally generate an event. + * Use store-release to unconditionally clear the spinlock variable. + * Store operation generates an event to all cores waiting in WFE + * when address is monitored by the global monitor. * * void spin_unlock(spinlock_t *lock); */ func spin_unlock stlr wzr, [x0] - COND_SEV() ret endfunc spin_unlock diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk index b6f76559..b7fb173b 100644 --- a/make_helpers/defaults.mk +++ b/make_helpers/defaults.mk @@ -234,3 +234,8 @@ else endif SANITIZE_UB := off + +# For ARMv8.1 (AArch64) platforms, enabling this option selects the spinlock +# implementation variant using the ARMv8.1-LSE compare-and-swap instruction. +# Default: disabled +USE_SPINLOCK_CAS := 0 diff --git a/plat/marvell/a8k/common/ble/ble.mk b/plat/marvell/a8k/common/ble/ble.mk index b24083fc..b6a9cd29 100644 --- a/plat/marvell/a8k/common/ble/ble.mk +++ b/plat/marvell/a8k/common/ble/ble.mk @@ -19,6 +19,7 @@ BLE_SOURCES += $(BLE_PATH)/ble_main.c \ PLAT_INCLUDES += -I$(MV_DDR_PATH) \ -I$(CURDIR)/include \ + -I$(CURDIR)/include/arch/aarch64 \ -I$(CURDIR)/include/lib/libc \ -I$(CURDIR)/include/lib/libc/aarch64 \ -I$(CURDIR)/drivers/marvell diff --git a/plat/st/common/include/stm32mp_common.h b/plat/st/common/include/stm32mp_common.h index 59657fdc..4f856797 100644 --- a/plat/st/common/include/stm32mp_common.h +++ b/plat/st/common/include/stm32mp_common.h @@ -1,6 +1,5 @@ /* * Copyright (C) 2018-2019, STMicroelectronics - All Rights Reserved - * Copyright (c) 2018-2019, Linaro Limited * * SPDX-License-Identifier: BSD-3-Clause */ @@ -12,8 +11,6 @@ #include <platform_def.h> -#include <arch_helpers.h> - /* Functions to save and get boot context address given by ROM code */ void stm32mp_save_boot_ctx_address(uintptr_t address); uintptr_t stm32mp_get_boot_ctx_address(void); @@ -82,21 +79,6 @@ unsigned long stm32mp_clk_get_rate(unsigned long id); /* Initialise the IO layer and register platform IO devices */ void stm32mp_io_setup(void); -static inline uint64_t arm_cnt_us2cnt(uint32_t us) -{ - return ((uint64_t)us * (uint64_t)read_cntfrq()) / 1000000ULL; -} - -static inline uint64_t timeout_init_us(uint32_t us) -{ - return read_cntpct_el0() + arm_cnt_us2cnt(us); -} - -static inline bool timeout_elapsed(uint64_t expire) -{ - return read_cntpct_el0() > expire; -} - /* * Check that the STM32 header of a .stm32 binary image is valid * @param header: pointer to the stm32 image header |