diff options
35 files changed, 1779 insertions, 490 deletions
@@ -374,6 +374,55 @@ CRTTOOL ?= ${CRTTOOLPATH}/cert_create${BIN_EXT} FIPTOOLPATH ?= tools/fiptool FIPTOOL ?= ${FIPTOOLPATH}/fiptool${BIN_EXT} +################################################################################ +# Include BL specific makefiles +################################################################################ +ifdef BL1_SOURCES +NEED_BL1 := yes +include bl1/bl1.mk +endif + +ifdef BL2_SOURCES +NEED_BL2 := yes +include bl2/bl2.mk +endif + +# For AArch32, BL31 is not applicable, and BL2U is not supported at present. +ifneq (${ARCH},aarch32) +ifdef BL2U_SOURCES +NEED_BL2U := yes +include bl2u/bl2u.mk +endif + +ifdef BL31_SOURCES +# When booting an EL3 payload, there is no need to compile the BL31 image nor +# put it in the FIP. +ifndef EL3_PAYLOAD_BASE +NEED_BL31 := yes +include bl31/bl31.mk +endif +endif +endif + +ifeq (${ARCH},aarch32) +NEED_BL32 := yes + +################################################################################ +# Build `AARCH32_SP` as BL32 image for AArch32 +################################################################################ +ifneq (${AARCH32_SP},none) +# We expect to locate an sp.mk under the specified AARCH32_SP directory +AARCH32_SP_MAKE := $(wildcard bl32/${AARCH32_SP}/${AARCH32_SP}.mk) + +ifeq (${AARCH32_SP_MAKE},) + $(error Error: No bl32/${AARCH32_SP}/${AARCH32_SP}.mk located) +endif + +$(info Including ${AARCH32_SP_MAKE}) +include ${AARCH32_SP_MAKE} +endif + +endif ################################################################################ # Build options checks @@ -460,56 +509,6 @@ else endif ################################################################################ -# Include BL specific makefiles -################################################################################ -ifdef BL1_SOURCES -NEED_BL1 := yes -include bl1/bl1.mk -endif - -ifdef BL2_SOURCES -NEED_BL2 := yes -include bl2/bl2.mk -endif - -# For AArch32, BL31 is not applicable, and BL2U is not supported at present. -ifneq (${ARCH},aarch32) -ifdef BL2U_SOURCES -NEED_BL2U := yes -include bl2u/bl2u.mk -endif - -ifdef BL31_SOURCES -# When booting an EL3 payload, there is no need to compile the BL31 image nor -# put it in the FIP. -ifndef EL3_PAYLOAD_BASE -NEED_BL31 := yes -include bl31/bl31.mk -endif -endif -endif - -ifeq (${ARCH},aarch32) -NEED_BL32 := yes - -################################################################################ -# Build `AARCH32_SP` as BL32 image for AArch32 -################################################################################ -ifneq (${AARCH32_SP},none) -# We expect to locate an sp.mk under the specified AARCH32_SP directory -AARCH32_SP_MAKE := $(wildcard bl32/${AARCH32_SP}/${AARCH32_SP}.mk) - -ifeq (${AARCH32_SP_MAKE},) - $(error Error: No bl32/${AARCH32_SP}/${AARCH32_SP}.mk located) -endif - -$(info Including ${AARCH32_SP_MAKE}) -include ${AARCH32_SP_MAKE} -endif - -endif - -################################################################################ # Build targets ################################################################################ diff --git a/drivers/auth/mbedtls/mbedtls_common.c b/drivers/auth/mbedtls/mbedtls_common.c index 053bf1a4..1d2df5a5 100644 --- a/drivers/auth/mbedtls/mbedtls_common.c +++ b/drivers/auth/mbedtls/mbedtls_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -36,9 +36,9 @@ /* * mbed TLS heap */ -#if (MBEDTLS_KEY_ALG_ID == MBEDTLS_ECDSA) +#if (TBBR_KEY_ALG_ID == TBBR_ECDSA) #define MBEDTLS_HEAP_SIZE (14*1024) -#elif (MBEDTLS_KEY_ALG_ID == MBEDTLS_RSA) +#elif (TBBR_KEY_ALG_ID == TBBR_RSA) #define MBEDTLS_HEAP_SIZE (8*1024) #endif static unsigned char heap[MBEDTLS_HEAP_SIZE]; diff --git a/drivers/auth/mbedtls/mbedtls_crypto.mk b/drivers/auth/mbedtls/mbedtls_crypto.mk index b7880971..302a0a1c 100644 --- a/drivers/auth/mbedtls/mbedtls_crypto.mk +++ b/drivers/auth/mbedtls/mbedtls_crypto.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -55,18 +55,18 @@ ifeq (${MBEDTLS_KEY_ALG},ecdsa) ecp_curves.c \ ecp.c \ ) - MBEDTLS_KEY_ALG_ID := MBEDTLS_ECDSA + TBBR_KEY_ALG_ID := TBBR_ECDSA else ifeq (${MBEDTLS_KEY_ALG},rsa) MBEDTLS_CRYPTO_SOURCES += $(addprefix ${MBEDTLS_DIR}/library/, \ rsa.c \ ) - MBEDTLS_KEY_ALG_ID := MBEDTLS_RSA + TBBR_KEY_ALG_ID := TBBR_RSA else $(error "MBEDTLS_KEY_ALG=${MBEDTLS_KEY_ALG} not supported on mbed TLS") endif -# mbed TLS libraries rely on this define to build correctly -$(eval $(call add_define,MBEDTLS_KEY_ALG_ID)) +# Needs to be set to drive mbed TLS configuration correctly +$(eval $(call add_define,TBBR_KEY_ALG_ID)) BL1_SOURCES += ${MBEDTLS_CRYPTO_SOURCES} BL2_SOURCES += ${MBEDTLS_CRYPTO_SOURCES} diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 66c20fc5..38be6283 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,9 +31,8 @@ #ifndef __BL_COMMON_H__ #define __BL_COMMON_H__ -#define SECURE 0x0 -#define NON_SECURE 0x1 -#define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE)) +#include <ep_info.h> +#include <param_header.h> #define UP 1 #define DOWN 0 @@ -45,25 +44,6 @@ #define TOP 0x1 #define BOTTOM !TOP -/******************************************************************************* - * Constants that allow assembler code to access members of and the - * 'entry_point_info' structure at their correct offsets. - ******************************************************************************/ -#define ENTRY_POINT_INFO_PC_OFFSET 0x08 -#ifdef AARCH32 -#define ENTRY_POINT_INFO_ARGS_OFFSET 0x10 -#else -#define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 -#endif - -/* The following are used to set/get image attributes. */ -#define PARAM_EP_SECURITY_MASK (0x1) - -#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK) -#define SET_SECURITY_STATE(x, security) \ - ((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security)) - - /* * The following are used for image state attributes. * Image can only be in one of the following state. @@ -75,59 +55,11 @@ #define IMAGE_STATE_EXECUTED 4 #define IMAGE_STATE_INTERRUPTED 5 -#define EP_EE_MASK 0x2 -#define EP_EE_LITTLE 0x0 -#define EP_EE_BIG 0x2 -#define EP_GET_EE(x) (x & EP_EE_MASK) -#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee)) - -#define EP_ST_MASK 0x4 -#define EP_ST_DISABLE 0x0 -#define EP_ST_ENABLE 0x4 -#define EP_GET_ST(x) (x & EP_ST_MASK) -#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee)) - -#define EP_EXE_MASK 0x8 -#define NON_EXECUTABLE 0x0 -#define EXECUTABLE 0x8 -#define EP_GET_EXE(x) (x & EP_EXE_MASK) -#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee)) - -#define EP_FIRST_EXE_MASK 0x10 -#define EP_FIRST_EXE 0x10 -#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK) -#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee)) - -#define PARAM_EP 0x01 -#define PARAM_IMAGE_BINARY 0x02 -#define PARAM_BL31 0x03 -#define PARAM_BL_LOAD_INFO 0x04 -#define PARAM_BL_PARAMS 0x05 -#define PARAM_PSCI_LIB_ARGS 0x06 - #define IMAGE_ATTRIB_SKIP_LOADING 0x02 #define IMAGE_ATTRIB_PLAT_SETUP 0x04 -#define VERSION_1 0x01 -#define VERSION_2 0x02 - #define INVALID_IMAGE_ID (0xFFFFFFFF) -#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \ - (_p)->h.type = (uint8_t)(_type); \ - (_p)->h.version = (uint8_t)(_ver); \ - (_p)->h.size = (uint16_t)sizeof(*_p); \ - (_p)->h.attr = (uint32_t)(_attr) ; \ - } while (0) - -/* Following is used for populating structure members statically. */ -#define SET_STATIC_PARAM_HEAD(_p, _type, _ver, _p_type, _attr) \ - ._p.h.type = (uint8_t)(_type), \ - ._p.h.version = (uint8_t)(_ver), \ - ._p.h.size = (uint16_t)sizeof(_p_type), \ - ._p.h.attr = (uint32_t)(_attr) - - /******************************************************************************* * Constants to indicate type of exception to the common exception handler. ******************************************************************************/ @@ -149,10 +81,9 @@ #define SERROR_AARCH32 0xf #ifndef __ASSEMBLY__ -#include <cdefs.h> /* For __dead2 */ #include <cassert.h> -#include <stdint.h> #include <stddef.h> +#include <stdint.h> #include <types.h> #include <utils.h> /* To retain compatibility */ @@ -185,7 +116,6 @@ extern uintptr_t __COHERENT_RAM_START__; extern uintptr_t __COHERENT_RAM_END__; #endif - /******************************************************************************* * Structure used for telling the next BL how much of a particular type of * memory is available for its use and how much is already used. @@ -199,55 +129,6 @@ typedef struct meminfo { #endif } meminfo_t; -typedef struct aapcs64_params { - u_register_t arg0; - u_register_t arg1; - u_register_t arg2; - u_register_t arg3; - u_register_t arg4; - u_register_t arg5; - u_register_t arg6; - u_register_t arg7; -} aapcs64_params_t; - -typedef struct aapcs32_params { - u_register_t arg0; - u_register_t arg1; - u_register_t arg2; - u_register_t arg3; -} aapcs32_params_t; - -/*************************************************************************** - * This structure provides version information and the size of the - * structure, attributes for the structure it represents - ***************************************************************************/ -typedef struct param_header { - uint8_t type; /* type of the structure */ - uint8_t version; /* version of this structure */ - uint16_t size; /* size of this structure in bytes */ - uint32_t attr; /* attributes: unused bits SBZ */ -} param_header_t; - -/***************************************************************************** - * This structure represents the superset of information needed while - * switching exception levels. The only two mechanisms to do so are - * ERET & SMC. Security state is indicated using bit zero of header - * attribute - * NOTE: BL1 expects entrypoint followed by spsr at an offset from the start - * of this structure defined by the macro `ENTRY_POINT_INFO_PC_OFFSET` while - * processing SMC to jump to BL31. - *****************************************************************************/ -typedef struct entry_point_info { - param_header_t h; - uintptr_t pc; - uint32_t spsr; -#ifdef AARCH32 - aapcs32_params_t args; -#else - aapcs64_params_t args; -#endif -} entry_point_info_t; - /***************************************************************************** * Image info binary provides information from the image loader that * can be used by the firmware to manage available trusted RAM. @@ -338,24 +219,6 @@ typedef struct bl31_params { #endif /* LOAD_IMAGE_V2 */ -/* - * Compile time assertions related to the 'entry_point_info' structure to - * ensure that the assembler and the compiler view of the offsets of - * the structure members is the same. - */ -CASSERT(ENTRY_POINT_INFO_PC_OFFSET == - __builtin_offsetof(entry_point_info_t, pc), \ - assert_BL31_pc_offset_mismatch); - -CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \ - __builtin_offsetof(entry_point_info_t, args), \ - assert_BL31_args_offset_mismatch); - -CASSERT(sizeof(uintptr_t) == - __builtin_offsetof(entry_point_info_t, spsr) - \ - __builtin_offsetof(entry_point_info_t, pc), \ - assert_entrypoint_and_spsr_should_be_adjacent); - /******************************************************************************* * Function & variable prototypes ******************************************************************************/ diff --git a/include/common/ep_info.h b/include/common/ep_info.h new file mode 100644 index 00000000..36136142 --- /dev/null +++ b/include/common/ep_info.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EP_INFO_H__ +#define __EP_INFO_H__ + +#include <param_header.h> + +#define SECURE 0x0 +#define NON_SECURE 0x1 +#define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE)) + +/******************************************************************************* + * Constants that allow assembler code to access members of and the + * 'entry_point_info' structure at their correct offsets. + ******************************************************************************/ +#define ENTRY_POINT_INFO_PC_OFFSET 0x08 +#ifdef AARCH32 +#define ENTRY_POINT_INFO_ARGS_OFFSET 0x10 +#else +#define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 +#endif + +/* The following are used to set/get image attributes. */ +#define PARAM_EP_SECURITY_MASK (0x1) + +#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK) +#define SET_SECURITY_STATE(x, security) \ + ((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security)) + +#define EP_EE_MASK 0x2 +#define EP_EE_LITTLE 0x0 +#define EP_EE_BIG 0x2 +#define EP_GET_EE(x) (x & EP_EE_MASK) +#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee)) + +#define EP_ST_MASK 0x4 +#define EP_ST_DISABLE 0x0 +#define EP_ST_ENABLE 0x4 +#define EP_GET_ST(x) (x & EP_ST_MASK) +#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee)) + +#define EP_EXE_MASK 0x8 +#define NON_EXECUTABLE 0x0 +#define EXECUTABLE 0x8 +#define EP_GET_EXE(x) (x & EP_EXE_MASK) +#define EP_SET_EXE(x, ee) ((x) = ((x) & ~EP_EXE_MASK) | (ee)) + +#define EP_FIRST_EXE_MASK 0x10 +#define EP_FIRST_EXE 0x10 +#define EP_GET_FIRST_EXE(x) ((x) & EP_FIRST_EXE_MASK) +#define EP_SET_FIRST_EXE(x, ee) ((x) = ((x) & ~EP_FIRST_EXE_MASK) | (ee)) + +#ifndef __ASSEMBLY__ + +#include <cassert.h> +#include <types.h> + +typedef struct aapcs64_params { + u_register_t arg0; + u_register_t arg1; + u_register_t arg2; + u_register_t arg3; + u_register_t arg4; + u_register_t arg5; + u_register_t arg6; + u_register_t arg7; +} aapcs64_params_t; + +typedef struct aapcs32_params { + u_register_t arg0; + u_register_t arg1; + u_register_t arg2; + u_register_t arg3; +} aapcs32_params_t; + +/***************************************************************************** + * This structure represents the superset of information needed while + * switching exception levels. The only two mechanisms to do so are + * ERET & SMC. Security state is indicated using bit zero of header + * attribute + * NOTE: BL1 expects entrypoint followed by spsr at an offset from the start + * of this structure defined by the macro `ENTRY_POINT_INFO_PC_OFFSET` while + * processing SMC to jump to BL31. + *****************************************************************************/ +typedef struct entry_point_info { + param_header_t h; + uintptr_t pc; + uint32_t spsr; +#ifdef AARCH32 + aapcs32_params_t args; +#else + aapcs64_params_t args; +#endif +} entry_point_info_t; + +/* + * Compile time assertions related to the 'entry_point_info' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(ENTRY_POINT_INFO_PC_OFFSET == + __builtin_offsetof(entry_point_info_t, pc), \ + assert_BL31_pc_offset_mismatch); + +CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \ + __builtin_offsetof(entry_point_info_t, args), \ + assert_BL31_args_offset_mismatch); + +CASSERT(sizeof(uintptr_t) == + __builtin_offsetof(entry_point_info_t, spsr) - \ + __builtin_offsetof(entry_point_info_t, pc), \ + assert_entrypoint_and_spsr_should_be_adjacent); + +#endif /*__ASSEMBLY__*/ + +#endif /* __EP_INFO_H__ */ + diff --git a/include/common/param_header.h b/include/common/param_header.h new file mode 100644 index 00000000..176fc958 --- /dev/null +++ b/include/common/param_header.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PARAM_HEADER_H__ +#define __PARAM_HEADER_H__ + +/* Param header types */ +#define PARAM_EP 0x01 +#define PARAM_IMAGE_BINARY 0x02 +#define PARAM_BL31 0x03 +#define PARAM_BL_LOAD_INFO 0x04 +#define PARAM_BL_PARAMS 0x05 +#define PARAM_PSCI_LIB_ARGS 0x06 + +/* Param header version */ +#define VERSION_1 0x01 +#define VERSION_2 0x02 + +#define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \ + (_p)->h.type = (uint8_t)(_type); \ + (_p)->h.version = (uint8_t)(_ver); \ + (_p)->h.size = (uint16_t)sizeof(*_p); \ + (_p)->h.attr = (uint32_t)(_attr) ; \ + } while (0) + +/* Following is used for populating structure members statically. */ +#define SET_STATIC_PARAM_HEAD(_p, _type, _ver, _p_type, _attr) \ + ._p.h.type = (uint8_t)(_type), \ + ._p.h.version = (uint8_t)(_ver), \ + ._p.h.size = (uint16_t)sizeof(_p_type), \ + ._p.h.attr = (uint32_t)(_attr) + +#ifndef __ASSEMBLY__ + +#include <types.h> + +/*************************************************************************** + * This structure provides version information and the size of the + * structure, attributes for the structure it represents + ***************************************************************************/ +typedef struct param_header { + uint8_t type; /* type of the structure */ + uint8_t version; /* version of this structure */ + uint16_t size; /* size of this structure in bytes */ + uint32_t attr; /* attributes: unused bits SBZ */ +} param_header_t; + +#endif /*__ASSEMBLY__*/ + +#endif /* __PARAM_HEADER_H__ */ + diff --git a/include/drivers/auth/mbedtls/mbedtls_config.h b/include/drivers/auth/mbedtls/mbedtls_config.h index a8d72415..9fce4242 100644 --- a/include/drivers/auth/mbedtls/mbedtls_config.h +++ b/include/drivers/auth/mbedtls/mbedtls_config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,8 +33,8 @@ /* * Key algorithms currently supported on mbed TLS libraries */ -#define MBEDTLS_RSA 1 -#define MBEDTLS_ECDSA 2 +#define TBBR_RSA 1 +#define TBBR_ECDSA 2 /* * Configuration file to build mbed TLS with the required features for @@ -69,11 +69,11 @@ #define MBEDTLS_PLATFORM_C -#if (MBEDTLS_KEY_ALG_ID == MBEDTLS_ECDSA) +#if (TBBR_KEY_ALG_ID == TBBR_ECDSA) #define MBEDTLS_ECDSA_C #define MBEDTLS_ECP_C #define MBEDTLS_ECP_DP_SECP256R1_ENABLED -#elif (MBEDTLS_KEY_ALG_ID == MBEDTLS_RSA) +#elif (TBBR_KEY_ALG_ID == TBBR_RSA) #define MBEDTLS_RSA_C #endif diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index 676973ce..ca868ddb 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,7 +31,9 @@ #ifndef __CM_H__ #define __CM_H__ +#ifndef AARCH32 #include <arch.h> +#endif /******************************************************************************* * Forward declarations diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h index 34de4c28..fa85e0bc 100644 --- a/include/lib/psci/psci.h +++ b/include/lib/psci/psci.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,6 +37,7 @@ #if ENABLE_PLAT_COMPAT #include <psci_compat.h> #endif +#include <psci_lib.h> /* To maintain compatibility for SPDs */ /******************************************************************************* * Number of power domains whose state this PSCI implementation can track @@ -311,24 +312,6 @@ typedef struct plat_psci_ops { } plat_psci_ops_t; /******************************************************************************* - * Optional structure populated by the Secure Payload Dispatcher to be given a - * chance to perform any bookkeeping before PSCI executes a power management - * operation. It also allows PSCI to determine certain properties of the SP e.g. - * migrate capability etc. - ******************************************************************************/ -typedef struct spd_pm_ops { - void (*svc_on)(u_register_t target_cpu); - int32_t (*svc_off)(u_register_t __unused); - void (*svc_suspend)(u_register_t max_off_pwrlvl); - void (*svc_on_finish)(u_register_t __unused); - void (*svc_suspend_finish)(u_register_t max_off_pwrlvl); - int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu); - int32_t (*svc_migrate_info)(u_register_t *resident_cpu); - void (*svc_system_off)(void); - void (*svc_system_reset)(void); -} spd_pm_ops_t; - -/******************************************************************************* * Function & Data prototypes ******************************************************************************/ unsigned int psci_version(void); @@ -357,63 +340,6 @@ void psci_arch_setup(void); */ void psci_entrypoint(void) __deprecated; -/* - * Function prototype for the warmboot entrypoint function which will be - * programmed in the mailbox by the platform. - */ -typedef void (*mailbox_entrypoint_t)(void); - -/****************************************************************************** - * Structure to pass PSCI Library arguments. - *****************************************************************************/ -typedef struct psci_lib_args { - /* The version information of PSCI Library Interface */ - param_header_t h; - /* The warm boot entrypoint function */ - mailbox_entrypoint_t mailbox_ep; -} psci_lib_args_t; - -/* Helper macro to set the psci_lib_args_t structure at runtime */ -#define SET_PSCI_LIB_ARGS_V1(_p, _entry) do { \ - SET_PARAM_HEAD(_p, PARAM_PSCI_LIB_ARGS, VERSION_1, 0); \ - (_p)->mailbox_ep = (_entry); \ - } while (0) - -/* Helper macro to define the psci_lib_args_t statically */ -#define DEFINE_STATIC_PSCI_LIB_ARGS_V1(_name, _entry) \ - static const psci_lib_args_t (_name) = { \ - .h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \ - .h.version = (uint8_t)VERSION_1, \ - .h.size = (uint16_t)sizeof(_name), \ - .h.attr = 0, \ - .mailbox_ep = (_entry) \ - } - -/* Helper macro to verify the pointer to psci_lib_args_t structure */ -#define VERIFY_PSCI_LIB_ARGS_V1(_p) ((_p) \ - && ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \ - && ((_p)->h.version == VERSION_1) \ - && ((_p)->h.size == sizeof(*(_p))) \ - && ((_p)->h.attr == 0) \ - && ((_p)->mailbox_ep)) - -/****************************************************************************** - * PSCI Library Interfaces - *****************************************************************************/ -u_register_t psci_smc_handler(uint32_t smc_fid, - u_register_t x1, - u_register_t x2, - u_register_t x3, - u_register_t x4, - void *cookie, - void *handle, - u_register_t flags); -int psci_setup(const psci_lib_args_t *lib_args); -void psci_warmboot_entrypoint(void); -void psci_register_spd_pm_hook(const spd_pm_ops_t *pm); -void psci_prepare_next_non_secure_ctx( - entry_point_info_t *next_image_info); - #endif /*__ASSEMBLY__*/ #endif /* __PSCI_H__ */ diff --git a/include/lib/psci/psci_lib.h b/include/lib/psci/psci_lib.h new file mode 100644 index 00000000..2169d6de --- /dev/null +++ b/include/lib/psci/psci_lib.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_LIB_H__ +#define __PSCI_LIB_H__ + +#include <ep_info.h> + +#ifndef __ASSEMBLY__ +#include <types.h> + +/******************************************************************************* + * Optional structure populated by the Secure Payload Dispatcher to be given a + * chance to perform any bookkeeping before PSCI executes a power management + * operation. It also allows PSCI to determine certain properties of the SP e.g. + * migrate capability etc. + ******************************************************************************/ +typedef struct spd_pm_ops { + void (*svc_on)(u_register_t target_cpu); + int32_t (*svc_off)(u_register_t __unused); + void (*svc_suspend)(u_register_t max_off_pwrlvl); + void (*svc_on_finish)(u_register_t __unused); + void (*svc_suspend_finish)(u_register_t max_off_pwrlvl); + int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu); + int32_t (*svc_migrate_info)(u_register_t *resident_cpu); + void (*svc_system_off)(void); + void (*svc_system_reset)(void); +} spd_pm_ops_t; + +/* + * Function prototype for the warmboot entrypoint function which will be + * programmed in the mailbox by the platform. + */ +typedef void (*mailbox_entrypoint_t)(void); + +/****************************************************************************** + * Structure to pass PSCI Library arguments. + *****************************************************************************/ +typedef struct psci_lib_args { + /* The version information of PSCI Library Interface */ + param_header_t h; + /* The warm boot entrypoint function */ + mailbox_entrypoint_t mailbox_ep; +} psci_lib_args_t; + +/* Helper macro to set the psci_lib_args_t structure at runtime */ +#define SET_PSCI_LIB_ARGS_V1(_p, _entry) do { \ + SET_PARAM_HEAD(_p, PARAM_PSCI_LIB_ARGS, VERSION_1, 0); \ + (_p)->mailbox_ep = (_entry); \ + } while (0) + +/* Helper macro to define the psci_lib_args_t statically */ +#define DEFINE_STATIC_PSCI_LIB_ARGS_V1(_name, _entry) \ + static const psci_lib_args_t (_name) = { \ + .h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \ + .h.version = (uint8_t)VERSION_1, \ + .h.size = (uint16_t)sizeof(_name), \ + .h.attr = 0, \ + .mailbox_ep = (_entry) \ + } + +/* Helper macro to verify the pointer to psci_lib_args_t structure */ +#define VERIFY_PSCI_LIB_ARGS_V1(_p) ((_p) \ + && ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \ + && ((_p)->h.version == VERSION_1) \ + && ((_p)->h.size == sizeof(*(_p))) \ + && ((_p)->h.attr == 0) \ + && ((_p)->mailbox_ep)) + +/****************************************************************************** + * PSCI Library Interfaces + *****************************************************************************/ +u_register_t psci_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); +int psci_setup(const psci_lib_args_t *lib_args); +void psci_warmboot_entrypoint(void); +void psci_register_spd_pm_hook(const spd_pm_ops_t *pm); +void psci_prepare_next_non_secure_ctx( + entry_point_info_t *next_image_info); +#endif /* __ASSEMBLY__ */ + +#endif /* __PSCI_LIB_H */ + diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h index 1f4ae6d0..3105d7aa 100644 --- a/include/lib/xlat_tables/xlat_tables_defs.h +++ b/include/lib/xlat_tables/xlat_tables_defs.h @@ -135,4 +135,10 @@ */ #define DISABLE_DCACHE (1 << 0) +/* + * This flag marks the translation tables are Non-cacheable for MMU accesses. + * If the flag is not specified, by default the tables are cacheable. + */ +#define XLAT_TABLE_NC (1 << 1) + #endif /* __XLAT_TABLES_DEFS_H__ */ diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S index d0e5cd06..b17b9037 100644 --- a/lib/aarch32/cache_helpers.S +++ b/lib/aarch32/cache_helpers.S @@ -118,7 +118,7 @@ loop1: mov r12, r2, LSR r10 // extract cache type bits from clidr and r12, r12, #7 // mask the bits for current cache only cmp r12, #2 // see what cache we have at this level - blt level_done // no cache or only instruction cache at this level + blo level_done // no cache or only instruction cache at this level stcopr r1, CSSELR // select current cache level in csselr isb // isb to sych the new cssr&csidr @@ -138,14 +138,14 @@ loop3: blx r6 subs r7, r7, #1 // decrement the set number - bge loop3 + bhs loop3 subs r9, r9, #1 // decrement the way number - bge loop2 + bhs loop2 level_done: add r1, r1, #2 // increment the cache number cmp r3, r1 dsb sy // ensure completion of previous cache maintenance instruction - bgt loop1 + bhi loop1 mov r6, #0 stcopr r6, CSSELR //select cache level 0 in csselr diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S index dc847995..5b17c21c 100644 --- a/lib/aarch32/misc_helpers.S +++ b/lib/aarch32/misc_helpers.S @@ -170,7 +170,7 @@ func memcpy4 /* copy 4 bytes at a time */ m_loop4: cmp r2, #4 - blt m_loop1 + blo m_loop1 ldr r3, [r1], #4 str r3, [r0], #4 sub r2, r2, #4 diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S index 476b906e..acafea70 100644 --- a/lib/aarch64/cache_helpers.S +++ b/lib/aarch64/cache_helpers.S @@ -119,7 +119,7 @@ loop1: lsr x1, x0, x2 // extract cache type bits from clidr and x1, x1, #7 // mask the bits for current cache only cmp x1, #2 // see what cache we have at this level - b.lt level_done // nothing to do if no cache or icache + b.lo level_done // nothing to do if no cache or icache msr csselr_el1, x10 // select current cache level in csselr isb // isb to sych the new cssr&csidr @@ -144,10 +144,10 @@ loop3_\_op: orr w11, w9, w7 // combine cache, way and set number dc \_op, x11 subs w7, w7, w17 // decrement set number - b.ge loop3_\_op + b.hs loop3_\_op subs x9, x9, x16 // decrement way number - b.ge loop2_\_op + b.hs loop2_\_op b level_done .endm @@ -155,7 +155,7 @@ loop3_\_op: level_done: add x10, x10, #2 // increment cache number cmp x3, x10 - b.gt loop1 + b.hi loop1 msr csselr_el1, xzr // select cache level 0 in csselr dsb sy // barrier to complete final cache operation isb diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S index c41978ed..dc1b6e61 100644 --- a/lib/cpus/aarch32/cpu_helpers.S +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -157,7 +157,7 @@ func get_cpu_ops_ptr 1: /* Check if we have reached end of list */ cmp r4, r5 - bge error_exit + bhs error_exit /* load the midr from the cpu_ops */ ldr r1, [r4], #CPU_OPS_SIZE diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c index e8408da8..316a60e7 100644 --- a/lib/xlat_tables/aarch32/xlat_tables.c +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -130,13 +130,21 @@ void enable_mmu_secure(unsigned int flags) tlbiall(); /* - * Set TTBCR bits as well. Set TTBR0 table properties as Inner - * & outer WBWA & shareable. Disable TTBR1. + * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1. */ - ttbcr = TTBCR_EAE_BIT | - TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | - TTBCR_RGN0_INNER_WBA | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + if (flags & XLAT_TABLE_NC) { + /* Inner & outer non-cacheable non-shareable. */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | + TTBCR_RGN0_INNER_NC | + (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + } else { + /* Inner & outer WBWA & shareable. */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA | + (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + } ttbcr |= TTBCR_EPD1_BIT; write_ttbcr(ttbcr); diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c index af12b9f1..ecb12022 100644 --- a/lib/xlat_tables/aarch64/xlat_tables.c +++ b/lib/xlat_tables/aarch64/xlat_tables.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -192,11 +192,18 @@ void init_xlat_tables(void) _tlbi_fct(); \ \ /* Set TCR bits as well. */ \ - /* Inner & outer WBWA & shareable. */ \ /* Set T0SZ to (64 - width of virtual address space) */ \ - tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \ - TCR_RGN_INNER_WBA | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + if (flags & XLAT_TABLE_NC) { \ + /* Inner & outer non-cacheable non-shareable. */\ + tcr = TCR_SH_NON_SHAREABLE | \ + TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ + (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + } else { \ + /* Inner & outer WBWA & shareable. */ \ + tcr = TCR_SH_INNER_SHAREABLE | \ + TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ + (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + } \ tcr |= _tcr_extra; \ write_tcr_el##_el(tcr); \ \ diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index 7de90304..ba0e53d6 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -122,13 +122,21 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) write_mair0(mair0); /* - * Set TTBCR bits as well. Set TTBR0 table properties as Inner - * & outer WBWA & shareable. Disable TTBR1. + * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1. */ - ttbcr = TTBCR_EAE_BIT | - TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | - TTBCR_RGN0_INNER_WBA | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + if (flags & XLAT_TABLE_NC) { + /* Inner & outer non-cacheable non-shareable. */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | + TTBCR_RGN0_INNER_NC | + (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + } else { + /* Inner & outer WBWA & shareable. */ + ttbcr = TTBCR_EAE_BIT | + TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA | + (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + } ttbcr |= TTBCR_EPD1_BIT; write_ttbcr(ttbcr); diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 235fa445..575ac71c 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -201,11 +201,18 @@ void init_xlat_tables_arch(unsigned long long max_pa) write_mair_el##_el(mair); \ \ /* Set TCR bits as well. */ \ - /* Inner & outer WBWA & shareable. */ \ /* Set T0SZ to (64 - width of virtual address space) */ \ - tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \ - TCR_RGN_INNER_WBA | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + if (flags & XLAT_TABLE_NC) { \ + /* Inner & outer non-cacheable non-shareable. */\ + tcr = TCR_SH_NON_SHAREABLE | \ + TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ + (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + } else { \ + /* Inner & outer WBWA & shareable. */ \ + tcr = TCR_SH_INNER_SHAREABLE | \ + TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ + (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + } \ tcr |= _tcr_extra; \ write_tcr_el##_el(tcr); \ \ diff --git a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S index 70a7f3a9..308753ec 100644 --- a/plat/nvidia/tegra/common/aarch64/tegra_helpers.S +++ b/plat/nvidia/tegra/common/aarch64/tegra_helpers.S @@ -289,7 +289,7 @@ func plat_reset_handler mov x2, #BL31_SIZE _loop16: cmp x2, #16 - b.lt _loop1 + b.lo _loop1 ldp x3, x4, [x1], #16 stp x3, x4, [x0], #16 sub x2, x2, #16 diff --git a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c index f0202041..bd16b991 100644 --- a/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c +++ b/plat/nvidia/tegra/common/drivers/memctrl/memctrl_v2.c @@ -38,6 +38,7 @@ #include <smmu.h> #include <string.h> #include <tegra_def.h> +#include <tegra_platform.h> #include <xlat_tables.h> #define TEGRA_GPU_RESET_REG_OFFSET 0x30 @@ -45,7 +46,7 @@ /* Video Memory base and size (live values) */ static uint64_t video_mem_base; -static uint64_t video_mem_size; +static uint64_t video_mem_size_mb; /* array to hold stream_id override config register offsets */ const static uint32_t streamid_overrides[] = { @@ -144,8 +145,7 @@ const static mc_streamid_security_cfg_t sec_cfgs[] = { mc_make_sec_cfg(SCEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(UFSHCR, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(SDMMCWAA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SESWR, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SESWR, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(MPCORER, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(PTCR, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(BPMPW, NON_SECURE, NO_OVERRIDE, ENABLE), @@ -159,9 +159,7 @@ const static mc_streamid_security_cfg_t sec_cfgs[] = { mc_make_sec_cfg(XUSB_HOSTW, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(TSECSWR, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(SDMMCRAA, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(VIW, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(AXISR, SECURE, NO_OVERRIDE, DISABLE), mc_make_sec_cfg(SDMMCW, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(BPMPDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), @@ -191,13 +189,23 @@ const static mc_streamid_security_cfg_t sec_cfgs[] = { mc_make_sec_cfg(SDMMCRAB, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(ETRR, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(AONR, NON_SECURE, OVERRIDE, ENABLE), - mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), - mc_make_sec_cfg(SESRD, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(SESRD, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(NVENCSRD, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(GPUSWR, SECURE, NO_OVERRIDE, DISABLE), mc_make_sec_cfg(TSECSWRB, NON_SECURE, NO_OVERRIDE, ENABLE), mc_make_sec_cfg(ISPWB, NON_SECURE, OVERRIDE, ENABLE), mc_make_sec_cfg(GPUSRD2, SECURE, NO_OVERRIDE, DISABLE), +#if ENABLE_CHIP_VERIFICATION_HARNESS + mc_make_sec_cfg(APEDMAW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(APER, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(APEW, NON_SECURE, OVERRIDE, ENABLE), + mc_make_sec_cfg(APEDMAR, NON_SECURE, OVERRIDE, ENABLE), +#else + mc_make_sec_cfg(APEDMAW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APER, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APEW, NON_SECURE, NO_OVERRIDE, ENABLE), + mc_make_sec_cfg(APEDMAR, NON_SECURE, NO_OVERRIDE, ENABLE), +#endif }; const static mc_txn_override_cfg_t mc_override_cfgs[] = { @@ -234,6 +242,251 @@ const static mc_txn_override_cfg_t mc_override_cfgs[] = { mc_make_txn_override_cfg(SCEW, CGID_TAG_ADR), }; +static void tegra_memctrl_reconfig_mss_clients(void) +{ +#if ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS + uint32_t val, wdata_0, wdata_1; + + /* + * Assert Memory Controller's HOTRESET_FLUSH_ENABLE signal for + * boot and strongly ordered MSS clients to flush existing memory + * traffic and stall future requests. + */ + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0); + assert(val == MC_CLIENT_HOTRESET_CTRL0_RESET_VAL); + + wdata_0 = MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB; + tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0); + + /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0); + } while ((val & wdata_0) != wdata_0); + + /* Wait one more time due to SW WAR for known legacy issue */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0); + } while ((val & wdata_0) != wdata_0); + + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1); + assert(val == MC_CLIENT_HOTRESET_CTRL1_RESET_VAL); + + wdata_1 = MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB | + MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB; + tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1); + + /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1); + } while ((val & wdata_1) != wdata_1); + + /* Wait one more time due to SW WAR for known legacy issue */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1); + } while ((val & wdata_1) != wdata_1); + + /* + * Change MEMTYPE_OVERRIDE from SO_DEV -> PASSTHRU for boot and + * strongly ordered MSS clients. ROC needs to be single point + * of control on overriding the memory type. So, remove TSA's + * memtype override. + */ + mc_set_tsa_passthrough(AFIW); + mc_set_tsa_passthrough(HDAW); + mc_set_tsa_passthrough(SATAW); + mc_set_tsa_passthrough(XUSB_HOSTW); + mc_set_tsa_passthrough(XUSB_DEVW); + mc_set_tsa_passthrough(SDMMCWAB); + mc_set_tsa_passthrough(APEDMAW); + mc_set_tsa_passthrough(SESWR); + mc_set_tsa_passthrough(ETRW); + mc_set_tsa_passthrough(AXISW); + mc_set_tsa_passthrough(EQOSW); + mc_set_tsa_passthrough(UFSHCW); + mc_set_tsa_passthrough(BPMPDMAW); + mc_set_tsa_passthrough(AONDMAW); + mc_set_tsa_passthrough(SCEDMAW); + + /* + * Change COH_PATH_OVERRIDE_SO_DEV from NO_OVERRIDE -> FORCE_COHERENT + * for boot and strongly ordered MSS clients. This steers all sodev + * transactions to ROC. + * + * Change AXID_OVERRIDE/AXID_OVERRIDE_SO_DEV only for some clients + * whose AXI IDs we know and trust. + */ + + /* Match AFIW */ + mc_set_forced_coherent_so_dev_cfg(AFIR); + + /* + * See bug 200131110 comment #35 - there are no normal requests + * and AWID for SO/DEV requests is hardcoded in RTL for a + * particular PCIE controller + */ + mc_set_forced_coherent_so_dev_cfg(AFIW); + mc_set_forced_coherent_cfg(HDAR); + mc_set_forced_coherent_cfg(HDAW); + mc_set_forced_coherent_cfg(SATAR); + mc_set_forced_coherent_cfg(SATAW); + mc_set_forced_coherent_cfg(XUSB_HOSTR); + mc_set_forced_coherent_cfg(XUSB_HOSTW); + mc_set_forced_coherent_cfg(XUSB_DEVR); + mc_set_forced_coherent_cfg(XUSB_DEVW); + mc_set_forced_coherent_cfg(SDMMCRAB); + mc_set_forced_coherent_cfg(SDMMCWAB); + + /* Match APEDMAW */ + mc_set_forced_coherent_axid_so_dev_cfg(APEDMAR); + + /* + * See bug 200131110 comment #35 - AWID for normal requests + * is 0x80 and AWID for SO/DEV requests is 0x01 + */ + mc_set_forced_coherent_axid_so_dev_cfg(APEDMAW); + mc_set_forced_coherent_cfg(SESRD); + mc_set_forced_coherent_cfg(SESWR); + mc_set_forced_coherent_cfg(ETRR); + mc_set_forced_coherent_cfg(ETRW); + mc_set_forced_coherent_cfg(AXISR); + mc_set_forced_coherent_cfg(AXISW); + mc_set_forced_coherent_cfg(EQOSR); + mc_set_forced_coherent_cfg(EQOSW); + mc_set_forced_coherent_cfg(UFSHCR); + mc_set_forced_coherent_cfg(UFSHCW); + mc_set_forced_coherent_cfg(BPMPDMAR); + mc_set_forced_coherent_cfg(BPMPDMAW); + mc_set_forced_coherent_cfg(AONDMAR); + mc_set_forced_coherent_cfg(AONDMAW); + mc_set_forced_coherent_cfg(SCEDMAR); + mc_set_forced_coherent_cfg(SCEDMAW); + + /* + * At this point, ordering can occur at ROC. So, remove PCFIFO's + * control over ordering requests. + * + * Change PCFIFO_*_ORDERED_CLIENT from ORDERED -> UNORDERED for + * boot and strongly ordered MSS clients + */ + val = MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL & + mc_set_pcfifo_unordered_boot_so_mss(1, AFIW) & + mc_set_pcfifo_unordered_boot_so_mss(1, HDAW) & + mc_set_pcfifo_unordered_boot_so_mss(1, SATAW); + tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG1, val); + + val = MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL & + mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_HOSTW) & + mc_set_pcfifo_unordered_boot_so_mss(2, XUSB_DEVW); + tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG2, val); + + val = MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL & + mc_set_pcfifo_unordered_boot_so_mss(3, SDMMCWAB); + tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG3, val); + + val = MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL & + mc_set_pcfifo_unordered_boot_so_mss(4, SESWR) & + mc_set_pcfifo_unordered_boot_so_mss(4, ETRW) & + mc_set_pcfifo_unordered_boot_so_mss(4, AXISW) & + mc_set_pcfifo_unordered_boot_so_mss(4, EQOSW) & + mc_set_pcfifo_unordered_boot_so_mss(4, UFSHCW) & + mc_set_pcfifo_unordered_boot_so_mss(4, BPMPDMAW) & + mc_set_pcfifo_unordered_boot_so_mss(4, AONDMAW) & + mc_set_pcfifo_unordered_boot_so_mss(4, SCEDMAW); + tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG4, val); + + val = MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL & + mc_set_pcfifo_unordered_boot_so_mss(5, APEDMAW); + tegra_mc_write_32(MC_PCFIFO_CLIENT_CONFIG5, val); + + /* + * At this point, ordering can occur at ROC. SMMU need not + * reorder any requests. + * + * Change SMMU_*_ORDERED_CLIENT from ORDERED -> UNORDERED + * for boot and strongly ordered MSS clients + */ + val = MC_SMMU_CLIENT_CONFIG1_RESET_VAL & + mc_set_smmu_unordered_boot_so_mss(1, AFIW) & + mc_set_smmu_unordered_boot_so_mss(1, HDAW) & + mc_set_smmu_unordered_boot_so_mss(1, SATAW); + tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG1, val); + + val = MC_SMMU_CLIENT_CONFIG2_RESET_VAL & + mc_set_smmu_unordered_boot_so_mss(2, XUSB_HOSTW) & + mc_set_smmu_unordered_boot_so_mss(2, XUSB_DEVW); + tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG2, val); + + val = MC_SMMU_CLIENT_CONFIG3_RESET_VAL & + mc_set_smmu_unordered_boot_so_mss(3, SDMMCWAB); + tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG3, val); + + val = MC_SMMU_CLIENT_CONFIG4_RESET_VAL & + mc_set_smmu_unordered_boot_so_mss(4, SESWR) & + mc_set_smmu_unordered_boot_so_mss(4, ETRW) & + mc_set_smmu_unordered_boot_so_mss(4, AXISW) & + mc_set_smmu_unordered_boot_so_mss(4, EQOSW) & + mc_set_smmu_unordered_boot_so_mss(4, UFSHCW) & + mc_set_smmu_unordered_boot_so_mss(4, BPMPDMAW) & + mc_set_smmu_unordered_boot_so_mss(4, AONDMAW) & + mc_set_smmu_unordered_boot_so_mss(4, SCEDMAW); + tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG4, val); + + val = MC_SMMU_CLIENT_CONFIG5_RESET_VAL & + mc_set_smmu_unordered_boot_so_mss(5, APEDMAW); + tegra_mc_write_32(MC_SMMU_CLIENT_CONFIG5, val); + + /* + * Deassert HOTRESET FLUSH_ENABLE for boot and strongly ordered MSS + * clients to allow memory traffic from all clients to start passing + * through ROC + */ + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL0); + assert(val == wdata_0); + + wdata_0 = MC_CLIENT_HOTRESET_CTRL0_RESET_VAL; + tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL0, wdata_0); + + /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0); + } while ((val & wdata_0) != wdata_0); + + /* Wait one more time due to SW WAR for known legacy issue */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS0); + } while ((val & wdata_0) != wdata_0); + + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_CTRL1); + assert(val == wdata_1); + + wdata_1 = MC_CLIENT_HOTRESET_CTRL1_RESET_VAL; + tegra_mc_write_32(MC_CLIENT_HOTRESET_CTRL1, wdata_1); + + /* Wait for HOTRESET STATUS to indicate FLUSH_DONE */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1); + } while ((val & wdata_1) != wdata_1); + + /* Wait one more time due to SW WAR for known legacy issue */ + do { + val = tegra_mc_read_32(MC_CLIENT_HOTRESET_STATUS1); + } while ((val & wdata_1) != wdata_1); + +#endif +} + /* * Init Memory controller during boot. */ @@ -243,7 +496,6 @@ void tegra_memctrl_setup(void) uint32_t num_overrides = sizeof(streamid_overrides) / sizeof(uint32_t); uint32_t num_sec_cfgs = sizeof(sec_cfgs) / sizeof(mc_streamid_security_cfg_t); uint32_t num_txn_overrides = sizeof(mc_override_cfgs) / sizeof(mc_txn_override_cfg_t); - uint32_t tegra_rev; int i; INFO("Tegra Memory Controller (v2)\n"); @@ -281,12 +533,18 @@ void tegra_memctrl_setup(void) MC_SMMU_BYPASS_CONFIG_SETTINGS); /* - * Set the MC_TXN_OVERRIDE registers for write clients. + * Re-configure MSS to allow ROC to deal with ordering of the + * Memory Controller traffic. This is needed as the Memory Controller + * boots with MSS having all control, but ROC provides a performance + * boost as compared to MSS. */ - tegra_rev = (mmio_read_32(TEGRA_MISC_BASE + HARDWARE_REVISION_OFFSET) & - HARDWARE_MINOR_REVISION_MASK) >> HARDWARE_MINOR_REVISION_SHIFT; + tegra_memctrl_reconfig_mss_clients(); - if (tegra_rev == HARDWARE_REVISION_A01) { + /* + * Set the MC_TXN_OVERRIDE registers for write clients. + */ + if (!tegra_platform_is_silicon() || + (tegra_platform_is_silicon() && tegra_get_chipid_minor() == 1)) { /* GPU and NVENC settings for rev. A01 */ val = tegra_mc_read_32(MC_TXN_OVERRIDE_CONFIG_GPUSWR); @@ -322,13 +580,21 @@ void tegra_memctrl_setup(void) */ void tegra_memctrl_restore_settings(void) { + /* + * Re-configure MSS to allow ROC to deal with ordering of the + * Memory Controller traffic. This is needed as the Memory Controller + * resets during System Suspend with MSS having all control, but ROC + * provides a performance boost as compared to MSS. + */ + tegra_memctrl_reconfig_mss_clients(); + /* video memory carveout region */ if (video_mem_base) { tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)video_mem_base); tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(video_mem_base >> 32)); - tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size); + tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, video_mem_size_mb); /* * MCE propogates the VideoMem configuration values across the @@ -371,33 +637,55 @@ void tegra_memctrl_tzdram_setup(uint64_t phys_base, uint32_t size_in_bytes) */ void tegra_memctrl_tzram_setup(uint64_t phys_base, uint32_t size_in_bytes) { - uint64_t tzram_end = phys_base + size_in_bytes - 1; + uint32_t index; + uint32_t total_128kb_blocks = size_in_bytes >> 17; + uint32_t residual_4kb_blocks = (size_in_bytes & 0x1FFFF) >> 12; uint32_t val; /* - * Check if the TZRAM is locked already. + * Reset the access configuration registers to restrict access + * to the TZRAM aperture */ - if (tegra_mc_read_32(MC_TZRAM_REG_CTRL) == DISABLE_TZRAM_ACCESS) - return; + for (index = MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG0; + index <= MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS5; + index += 4) + tegra_mc_write_32(index, 0); /* - * Setup the Memory controller to allow only secure accesses to - * the TZRAM carveout + * Allow CPU read/write access to the aperture */ - INFO("Configuring TrustZone RAM (SysRAM) Memory Carveout\n"); + tegra_mc_write_32(MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG1, + TZRAM_CARVEOUT_CPU_WRITE_ACCESS_BIT | + TZRAM_CARVEOUT_CPU_READ_ACCESS_BIT); - /* Program the base and end values */ - tegra_mc_write_32(MC_TZRAM_BASE, (uint32_t)phys_base); - tegra_mc_write_32(MC_TZRAM_END, (uint32_t)tzram_end); + /* + * Set the TZRAM base. TZRAM base must be 4k aligned, at least. + */ + assert(!(phys_base & 0xFFF)); + tegra_mc_write_32(MC_TZRAM_BASE_LO, (uint32_t)phys_base); + tegra_mc_write_32(MC_TZRAM_BASE_HI, + (uint32_t)(phys_base >> 32) & TZRAM_BASE_HI_MASK); - /* Extract the high address bits from the base/end values */ - val = (uint32_t)(phys_base >> 32) & TZRAM_ADDR_HI_BITS_MASK; - val |= (((uint32_t)(tzram_end >> 32) & TZRAM_ADDR_HI_BITS_MASK) << - TZRAM_END_HI_BITS_SHIFT); - tegra_mc_write_32(MC_TZRAM_HI_ADDR_BITS, val); + /* + * Set the TZRAM size + * + * total size = (number of 128KB blocks) + (number of remaining 4KB + * blocks) + * + */ + val = (residual_4kb_blocks << TZRAM_SIZE_RANGE_4KB_SHIFT) | + total_128kb_blocks; + tegra_mc_write_32(MC_TZRAM_SIZE, val); - /* Disable further writes to the TZRAM setup registers */ - tegra_mc_write_32(MC_TZRAM_REG_CTRL, DISABLE_TZRAM_ACCESS); + /* + * Lock the configuration settings by disabling TZ-only lock + * and locking the configuration against any future changes + * at all. + */ + val = tegra_mc_read_32(MC_TZRAM_CARVEOUT_CFG); + val &= ~TZRAM_ENABLE_TZ_LOCK_BIT; + val |= TZRAM_LOCK_CFG_SETTINGS_BIT; + tegra_mc_write_32(MC_TZRAM_CARVEOUT_CFG, val); /* * MCE propogates the security configuration values across the @@ -436,11 +724,11 @@ void tegra_memctrl_videomem_setup(uint64_t phys_base, uint32_t size_in_bytes) tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_LO, (uint32_t)phys_base); tegra_mc_write_32(MC_VIDEO_PROTECT_BASE_HI, (uint32_t)(phys_base >> 32)); - tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes); + tegra_mc_write_32(MC_VIDEO_PROTECT_SIZE_MB, size_in_bytes >> 20); /* store new values */ video_mem_base = phys_base; - video_mem_size = size_in_bytes >> 20; + video_mem_size_mb = size_in_bytes >> 20; /* * MCE propogates the VideoMem configuration values across the diff --git a/plat/nvidia/tegra/include/drivers/memctrl_v2.h b/plat/nvidia/tegra/include/drivers/memctrl_v2.h index 9623e25f..e1abe143 100644 --- a/plat/nvidia/tegra/include/drivers/memctrl_v2.h +++ b/plat/nvidia/tegra/include/drivers/memctrl_v2.h @@ -31,7 +31,6 @@ #ifndef __MEMCTRLV2_H__ #define __MEMCTRLV2_H__ -#include <mmio.h> #include <tegra_def.h> /******************************************************************************* @@ -283,6 +282,10 @@ #define MC_TXN_OVERRIDE_CONFIG_AFIW 0x1188 #define MC_TXN_OVERRIDE_CONFIG_SCEW 0x14e0 +#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID (1 << 0) +#define MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV (2 << 4) +#define MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT (1 << 12) + /******************************************************************************* * Non-SO_DEV transactions override values for CGID_TAG bitfield for the * MC_TXN_OVERRIDE_CONFIG_{module} registers @@ -293,6 +296,10 @@ #define MC_TXN_OVERRIDE_CGID_TAG_ADR 3 #define MC_TXN_OVERRIDE_CGID_TAG_MASK 3 +#ifndef __ASSEMBLY__ + +#include <sys/types.h> + /******************************************************************************* * Structure to hold the transaction override settings to use to override * client inputs @@ -327,12 +334,12 @@ typedef struct mc_streamid_security_cfg { int override_client_ns_flag; } mc_streamid_security_cfg_t; -#define OVERRIDE_DISABLE 1 -#define OVERRIDE_ENABLE 0 -#define CLIENT_FLAG_SECURE 0 -#define CLIENT_FLAG_NON_SECURE 1 -#define CLIENT_INPUTS_OVERRIDE 1 -#define CLIENT_INPUTS_NO_OVERRIDE 0 +#define OVERRIDE_DISABLE 1 +#define OVERRIDE_ENABLE 0 +#define CLIENT_FLAG_SECURE 0 +#define CLIENT_FLAG_NON_SECURE 1 +#define CLIENT_INPUTS_OVERRIDE 1 +#define CLIENT_INPUTS_NO_OVERRIDE 0 #define mc_make_sec_cfg(off, ns, ovrrd, access) \ { \ @@ -343,30 +350,229 @@ typedef struct mc_streamid_security_cfg { .override_enable = OVERRIDE_ ## access \ } +#endif /* __ASSEMBLY__ */ + /******************************************************************************* * TZDRAM carveout configuration registers ******************************************************************************/ -#define MC_SECURITY_CFG0_0 0x70 -#define MC_SECURITY_CFG1_0 0x74 -#define MC_SECURITY_CFG3_0 0x9BC +#define MC_SECURITY_CFG0_0 0x70 +#define MC_SECURITY_CFG1_0 0x74 +#define MC_SECURITY_CFG3_0 0x9BC /******************************************************************************* * Video Memory carveout configuration registers ******************************************************************************/ -#define MC_VIDEO_PROTECT_BASE_HI 0x978 -#define MC_VIDEO_PROTECT_BASE_LO 0x648 -#define MC_VIDEO_PROTECT_SIZE_MB 0x64c +#define MC_VIDEO_PROTECT_BASE_HI 0x978 +#define MC_VIDEO_PROTECT_BASE_LO 0x648 +#define MC_VIDEO_PROTECT_SIZE_MB 0x64c + +/******************************************************************************* + * TZRAM carveout (MC_SECURITY_CARVEOUT11) configuration registers + ******************************************************************************/ +#define MC_TZRAM_BASE_LO 0x2194 +#define TZRAM_BASE_LO_SHIFT 12 +#define TZRAM_BASE_LO_MASK 0xFFFFF +#define MC_TZRAM_BASE_HI 0x2198 +#define TZRAM_BASE_HI_SHIFT 0 +#define TZRAM_BASE_HI_MASK 3 +#define MC_TZRAM_SIZE 0x219C +#define TZRAM_SIZE_RANGE_4KB_SHIFT 27 + +#define MC_TZRAM_CARVEOUT_CFG 0x2190 +#define TZRAM_LOCK_CFG_SETTINGS_BIT (1 << 1) +#define TZRAM_ENABLE_TZ_LOCK_BIT (1 << 0) +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG0 0x21A0 +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG1 0x21A4 +#define TZRAM_CARVEOUT_CPU_WRITE_ACCESS_BIT (1 << 25) +#define TZRAM_CARVEOUT_CPU_READ_ACCESS_BIT (1 << 7) +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG2 0x21A8 +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG3 0x21AC +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG4 0x21B0 +#define MC_TZRAM_CARVEOUT_CLIENT_ACCESS_CFG5 0x21B4 + +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS0 0x21B8 +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS1 0x21BC +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS2 0x21C0 +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS3 0x21C4 +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS4 0x21C8 +#define MC_TZRAM_CARVEOUT_FORCE_INTERNAL_ACCESS5 0x21CC /******************************************************************************* - * TZRAM carveout configuration registers + * Memory Controller Reset Control registers ******************************************************************************/ -#define MC_TZRAM_BASE 0x1850 -#define MC_TZRAM_END 0x1854 -#define MC_TZRAM_HI_ADDR_BITS 0x1588 - #define TZRAM_ADDR_HI_BITS_MASK 0x3 - #define TZRAM_END_HI_BITS_SHIFT 8 -#define MC_TZRAM_REG_CTRL 0x185c - #define DISABLE_TZRAM_ACCESS 1 +#define MC_CLIENT_HOTRESET_CTRL0 0x200 +#define MC_CLIENT_HOTRESET_CTRL0_RESET_VAL 0 +#define MC_CLIENT_HOTRESET_CTRL0_AFI_FLUSH_ENB (1 << 0) +#define MC_CLIENT_HOTRESET_CTRL0_HC_FLUSH_ENB (1 << 6) +#define MC_CLIENT_HOTRESET_CTRL0_HDA_FLUSH_ENB (1 << 7) +#define MC_CLIENT_HOTRESET_CTRL0_ISP2_FLUSH_ENB (1 << 8) +#define MC_CLIENT_HOTRESET_CTRL0_MPCORE_FLUSH_ENB (1 << 9) +#define MC_CLIENT_HOTRESET_CTRL0_NVENC_FLUSH_ENB (1 << 11) +#define MC_CLIENT_HOTRESET_CTRL0_SATA_FLUSH_ENB (1 << 15) +#define MC_CLIENT_HOTRESET_CTRL0_VI_FLUSH_ENB (1 << 17) +#define MC_CLIENT_HOTRESET_CTRL0_VIC_FLUSH_ENB (1 << 18) +#define MC_CLIENT_HOTRESET_CTRL0_XUSB_HOST_FLUSH_ENB (1 << 19) +#define MC_CLIENT_HOTRESET_CTRL0_XUSB_DEV_FLUSH_ENB (1 << 20) +#define MC_CLIENT_HOTRESET_CTRL0_TSEC_FLUSH_ENB (1 << 22) +#define MC_CLIENT_HOTRESET_CTRL0_SDMMC1A_FLUSH_ENB (1 << 29) +#define MC_CLIENT_HOTRESET_CTRL0_SDMMC2A_FLUSH_ENB (1 << 30) +#define MC_CLIENT_HOTRESET_CTRL0_SDMMC3A_FLUSH_ENB (1 << 31) +#define MC_CLIENT_HOTRESET_STATUS0 0x204 +#define MC_CLIENT_HOTRESET_CTRL1 0x970 +#define MC_CLIENT_HOTRESET_CTRL1_RESET_VAL 0 +#define MC_CLIENT_HOTRESET_CTRL1_SDMMC4A_FLUSH_ENB (1 << 0) +#define MC_CLIENT_HOTRESET_CTRL1_GPU_FLUSH_ENB (1 << 2) +#define MC_CLIENT_HOTRESET_CTRL1_NVDEC_FLUSH_ENB (1 << 5) +#define MC_CLIENT_HOTRESET_CTRL1_APE_FLUSH_ENB (1 << 6) +#define MC_CLIENT_HOTRESET_CTRL1_SE_FLUSH_ENB (1 << 7) +#define MC_CLIENT_HOTRESET_CTRL1_NVJPG_FLUSH_ENB (1 << 8) +#define MC_CLIENT_HOTRESET_CTRL1_ETR_FLUSH_ENB (1 << 12) +#define MC_CLIENT_HOTRESET_CTRL1_TSECB_FLUSH_ENB (1 << 13) +#define MC_CLIENT_HOTRESET_CTRL1_AXIS_FLUSH_ENB (1 << 18) +#define MC_CLIENT_HOTRESET_CTRL1_EQOS_FLUSH_ENB (1 << 19) +#define MC_CLIENT_HOTRESET_CTRL1_UFSHC_FLUSH_ENB (1 << 20) +#define MC_CLIENT_HOTRESET_CTRL1_NVDISPLAY_FLUSH_ENB (1 << 21) +#define MC_CLIENT_HOTRESET_CTRL1_BPMP_FLUSH_ENB (1 << 22) +#define MC_CLIENT_HOTRESET_CTRL1_AON_FLUSH_ENB (1 << 23) +#define MC_CLIENT_HOTRESET_CTRL1_SCE_FLUSH_ENB (1 << 24) +#define MC_CLIENT_HOTRESET_STATUS1 0x974 + +/******************************************************************************* + * TSA configuration registers + ******************************************************************************/ +#define TSA_CONFIG_STATIC0_CSW_SESWR 0x4010 +#define TSA_CONFIG_STATIC0_CSW_SESWR_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_ETRW 0x4038 +#define TSA_CONFIG_STATIC0_CSW_ETRW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB 0x5010 +#define TSA_CONFIG_STATIC0_CSW_SDMMCWAB_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_AXISW 0x7008 +#define TSA_CONFIG_STATIC0_CSW_AXISW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_HDAW 0xA008 +#define TSA_CONFIG_STATIC0_CSW_HDAW_RESET 0x100 +#define TSA_CONFIG_STATIC0_CSW_AONDMAW 0xB018 +#define TSA_CONFIG_STATIC0_CSW_AONDMAW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_SCEDMAW 0xD018 +#define TSA_CONFIG_STATIC0_CSW_SCEDMAW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW 0xD028 +#define TSA_CONFIG_STATIC0_CSW_BPMPDMAW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_APEDMAW 0x12018 +#define TSA_CONFIG_STATIC0_CSW_APEDMAW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_UFSHCW 0x13008 +#define TSA_CONFIG_STATIC0_CSW_UFSHCW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_AFIW 0x13018 +#define TSA_CONFIG_STATIC0_CSW_AFIW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_SATAW 0x13028 +#define TSA_CONFIG_STATIC0_CSW_SATAW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_EQOSW 0x13038 +#define TSA_CONFIG_STATIC0_CSW_EQOSW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW 0x15008 +#define TSA_CONFIG_STATIC0_CSW_XUSB_DEVW_RESET 0x1100 +#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW 0x15018 +#define TSA_CONFIG_STATIC0_CSW_XUSB_HOSTW_RESET 0x1100 + +#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK (0x3 << 11) +#define TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU (0 << 11) + +/******************************************************************************* + * Memory Controller's PCFIFO client configuration registers + ******************************************************************************/ +#define MC_PCFIFO_CLIENT_CONFIG1 0xdd4 +#define MC_PCFIFO_CLIENT_CONFIG1_RESET_VAL 0x20000 +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_UNORDERED (0 << 17) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_AFIW_MASK (1 << 17) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_UNORDERED (0 << 21) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_HDAW_MASK (1 << 21) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_UNORDERED (0 << 29) +#define MC_PCFIFO_CLIENT_CONFIG1_PCFIFO_SATAW_MASK (1 << 29) + +#define MC_PCFIFO_CLIENT_CONFIG2 0xdd8 +#define MC_PCFIFO_CLIENT_CONFIG2_RESET_VAL 0x20000 +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_UNORDERED (0 << 11) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_HOSTW_MASK (1 << 11) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_UNORDERED (0 << 13) +#define MC_PCFIFO_CLIENT_CONFIG2_PCFIFO_XUSB_DEVW_MASK (1 << 13) + +#define MC_PCFIFO_CLIENT_CONFIG3 0xddc +#define MC_PCFIFO_CLIENT_CONFIG3_RESET_VAL 0 +#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_UNORDERED (0 << 7) +#define MC_PCFIFO_CLIENT_CONFIG3_PCFIFO_SDMMCWAB_MASK (1 << 7) + +#define MC_PCFIFO_CLIENT_CONFIG4 0xde0 +#define MC_PCFIFO_CLIENT_CONFIG4_RESET_VAL 0 +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_UNORDERED (0 << 1) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SESWR_MASK (1 << 1) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_UNORDERED (0 << 5) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_ETRW_MASK (1 << 5) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_UNORDERED (0 << 13) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AXISW_MASK (1 << 13) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_UNORDERED (0 << 15) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_EQOSW_MASK (1 << 15) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_UNORDERED (0 << 17) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_UFSHCW_MASK (1 << 17) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_UNORDERED (0 << 22) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_BPMPDMAW_MASK (1 << 22) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_UNORDERED (0 << 26) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_AONDMAW_MASK (1 << 26) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_UNORDERED (0 << 30) +#define MC_PCFIFO_CLIENT_CONFIG4_PCFIFO_SCEDMAW_MASK (1 << 30) + +#define MC_PCFIFO_CLIENT_CONFIG5 0xbf4 +#define MC_PCFIFO_CLIENT_CONFIG5_RESET_VAL 0 +#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_UNORDERED (0 << 0) +#define MC_PCFIFO_CLIENT_CONFIG5_PCFIFO_APEDMAW_MASK (1 << 0) + +/******************************************************************************* + * Memory Controller's SMMU client configuration registers + ******************************************************************************/ +#define MC_SMMU_CLIENT_CONFIG1 0x44 +#define MC_SMMU_CLIENT_CONFIG1_RESET_VAL 0x20000 +#define MC_SMMU_CLIENT_CONFIG1_AFIW_UNORDERED (0 << 17) +#define MC_SMMU_CLIENT_CONFIG1_AFIW_MASK (1 << 17) +#define MC_SMMU_CLIENT_CONFIG1_HDAW_UNORDERED (0 << 21) +#define MC_SMMU_CLIENT_CONFIG1_HDAW_MASK (1 << 21) +#define MC_SMMU_CLIENT_CONFIG1_SATAW_UNORDERED (0 << 29) +#define MC_SMMU_CLIENT_CONFIG1_SATAW_MASK (1 << 29) + +#define MC_SMMU_CLIENT_CONFIG2 0x48 +#define MC_SMMU_CLIENT_CONFIG2_RESET_VAL 0x20000 +#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_UNORDERED (0 << 11) +#define MC_SMMU_CLIENT_CONFIG2_XUSB_HOSTW_MASK (1 << 11) +#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_UNORDERED (0 << 13) +#define MC_SMMU_CLIENT_CONFIG2_XUSB_DEVW_MASK (1 << 13) + +#define MC_SMMU_CLIENT_CONFIG3 0x4c +#define MC_SMMU_CLIENT_CONFIG3_RESET_VAL 0 +#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_UNORDERED (0 << 7) +#define MC_SMMU_CLIENT_CONFIG3_SDMMCWAB_MASK (1 << 7) + +#define MC_SMMU_CLIENT_CONFIG4 0xb9c +#define MC_SMMU_CLIENT_CONFIG4_RESET_VAL 0 +#define MC_SMMU_CLIENT_CONFIG4_SESWR_UNORDERED (0 << 1) +#define MC_SMMU_CLIENT_CONFIG4_SESWR_MASK (1 << 1) +#define MC_SMMU_CLIENT_CONFIG4_ETRW_UNORDERED (0 << 5) +#define MC_SMMU_CLIENT_CONFIG4_ETRW_MASK (1 << 5) +#define MC_SMMU_CLIENT_CONFIG4_AXISW_UNORDERED (0 << 13) +#define MC_SMMU_CLIENT_CONFIG4_AXISW_MASK (1 << 13) +#define MC_SMMU_CLIENT_CONFIG4_EQOSW_UNORDERED (0 << 15) +#define MC_SMMU_CLIENT_CONFIG4_EQOSW_MASK (1 << 15) +#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_UNORDERED (0 << 17) +#define MC_SMMU_CLIENT_CONFIG4_UFSHCW_MASK (1 << 17) +#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_UNORDERED (0 << 22) +#define MC_SMMU_CLIENT_CONFIG4_BPMPDMAW_MASK (1 << 22) +#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_UNORDERED (0 << 26) +#define MC_SMMU_CLIENT_CONFIG4_AONDMAW_MASK (1 << 26) +#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_UNORDERED (0 << 30) +#define MC_SMMU_CLIENT_CONFIG4_SCEDMAW_MASK (1 << 30) + +#define MC_SMMU_CLIENT_CONFIG5 0xbac +#define MC_SMMU_CLIENT_CONFIG5_RESET_VAL 0 +#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_UNORDERED (0 << 0) +#define MC_SMMU_CLIENT_CONFIG5_APEDMAW_MASK (1 << 0) + +#ifndef __ASSEMBLY__ + +#include <mmio.h> static inline uint32_t tegra_mc_read_32(uint32_t off) { @@ -388,4 +594,42 @@ static inline void tegra_mc_streamid_write_32(uint32_t off, uint32_t val) mmio_write_32(TEGRA_MC_STREAMID_BASE + off, val); } +#define mc_set_pcfifo_unordered_boot_so_mss(id, client) \ + (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \ + MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED) + +#define mc_set_smmu_unordered_boot_so_mss(id, client) \ + (~MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_MASK | \ + MC_PCFIFO_CLIENT_CONFIG##id##_PCFIFO_##client##_UNORDERED) + +#define mc_set_tsa_passthrough(client) \ + { \ + mmio_write_32(TEGRA_TSA_BASE + TSA_CONFIG_STATIC0_CSW_##client, \ + (TSA_CONFIG_STATIC0_CSW_##client##_RESET & \ + ~TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_MASK) | \ + TSA_CONFIG_CSW_MEMTYPE_OVERRIDE_PASTHRU); \ + } + +#define mc_set_forced_coherent_cfg(client) \ + { \ + tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ + MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV); \ + } + +#define mc_set_forced_coherent_so_dev_cfg(client) \ + { \ + tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ + MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \ + MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \ + } + +#define mc_set_forced_coherent_axid_so_dev_cfg(client) \ + { \ + tegra_mc_write_32(MC_TXN_OVERRIDE_CONFIG_##client, \ + MC_TXN_OVERRIDE_CONFIG_COH_PATH_OVERRIDE_SO_DEV | \ + MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_CGID | \ + MC_TXN_OVERRIDE_CONFIG_AXID_OVERRIDE_SO_DEV_CGID_SO_DEV_CLIENT); \ + } +#endif /* __ASSMEBLY__ */ + #endif /* __MEMCTRLV2_H__ */ diff --git a/plat/nvidia/tegra/include/drivers/smmu.h b/plat/nvidia/tegra/include/drivers/smmu.h index bb08a559..0640846a 100644 --- a/plat/nvidia/tegra/include/drivers/smmu.h +++ b/plat/nvidia/tegra/include/drivers/smmu.h @@ -599,9 +599,16 @@ * SMMU Global Secure Aux. Configuration Register ******************************************************************************/ #define SMMU_GSR0_SECURE_ACR 0x10 +#define SMMU_GNSR_ACR (SMMU_GSR0_SECURE_ACR + 0x400) #define SMMU_GSR0_PGSIZE_SHIFT 16 #define SMMU_GSR0_PGSIZE_4K (0 << SMMU_GSR0_PGSIZE_SHIFT) #define SMMU_GSR0_PGSIZE_64K (1 << SMMU_GSR0_PGSIZE_SHIFT) +#define SMMU_ACR_CACHE_LOCK_ENABLE_BIT (1 << 26) + +/******************************************************************************* + * SMMU Global Aux. Control Register + ******************************************************************************/ +#define SMMU_CBn_ACTLR_CPRE_BIT (1 << 1) /******************************************************************************* * SMMU configuration constants @@ -627,6 +634,6 @@ static inline void tegra_smmu_write_32(uint32_t off, uint32_t val) } void tegra_smmu_init(void); -void tegra_smmu_save_context(void); +void tegra_smmu_save_context(uint64_t smmu_ctx_addr); #endif /*__SMMU_H */ diff --git a/plat/nvidia/tegra/include/t186/tegra_def.h b/plat/nvidia/tegra/include/t186/tegra_def.h index f3fbb891..e0eddfd3 100644 --- a/plat/nvidia/tegra/include/t186/tegra_def.h +++ b/plat/nvidia/tegra/include/t186/tegra_def.h @@ -31,8 +31,6 @@ #ifndef __TEGRA_DEF_H__ #define __TEGRA_DEF_H__ -#include <platform_def.h> - /******************************************************************************* * These values are used by the PSCI implementation during the `CPU_SUSPEND` * and `SYSTEM_SUSPEND` calls as the `state-id` field in the 'power state' @@ -78,12 +76,15 @@ ******************************************************************************/ #define TEGRA_MISC_BASE 0x00100000 #define HARDWARE_REVISION_OFFSET 0x4 -#define HARDWARE_MINOR_REVISION_MASK 0xf0000 -#define HARDWARE_MINOR_REVISION_SHIFT 0x10 -#define HARDWARE_REVISION_A01 1 + #define MISCREG_PFCFG 0x200C /******************************************************************************* + * Tegra TSA Controller constants + ******************************************************************************/ +#define TEGRA_TSA_BASE 0x02400000 + +/******************************************************************************* * Tegra Memory Controller constants ******************************************************************************/ #define TEGRA_MC_STREAMID_BASE 0x02C00000 @@ -101,6 +102,13 @@ #define TEGRA_UARTG_BASE 0x0C290000 /******************************************************************************* + * Tegra Fuse Controller related constants + ******************************************************************************/ +#define TEGRA_FUSE_BASE 0x03820000 +#define OPT_SUBREVISION 0x248 +#define SUBREVISION_MASK 0xFF + +/******************************************************************************* * GICv2 & interrupt handling related constants ******************************************************************************/ #define TEGRA_GICD_BASE 0x03881000 @@ -138,6 +146,8 @@ #define SECURE_SCRATCH_RSV6 0x680 #define SECURE_SCRATCH_RSV11_LO 0x6A8 #define SECURE_SCRATCH_RSV11_HI 0x6AC +#define SECURE_SCRATCH_RSV53_LO 0x7F8 +#define SECURE_SCRATCH_RSV53_HI 0x7FC /******************************************************************************* * Tegra Memory Mapped Control Register Access Bus constants @@ -153,6 +163,6 @@ * Tegra TZRAM constants ******************************************************************************/ #define TEGRA_TZRAM_BASE 0x30000000 -#define TEGRA_TZRAM_SIZE 0x50000 +#define TEGRA_TZRAM_SIZE 0x40000 #endif /* __TEGRA_DEF_H__ */ diff --git a/plat/nvidia/tegra/include/tegra_private.h b/plat/nvidia/tegra/include/tegra_private.h index 012bfd77..39006f6f 100644 --- a/plat/nvidia/tegra/include/tegra_private.h +++ b/plat/nvidia/tegra/include/tegra_private.h @@ -119,4 +119,7 @@ void plat_early_platform_setup(void); /* Declarations for tegra_delay_timer.c */ void tegra_delay_timer_init(void); +void tegra_secure_entrypoint(void); +void tegra186_cpu_reset_handler(void); + #endif /* __TEGRA_PRIVATE_H__ */ diff --git a/plat/nvidia/tegra/soc/t186/drivers/include/mce.h b/plat/nvidia/tegra/soc/t186/drivers/include/mce.h index 7078b8bb..66e212bf 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/include/mce.h +++ b/plat/nvidia/tegra/soc/t186/drivers/include/mce.h @@ -94,6 +94,8 @@ typedef enum mce_cmd { MCE_CMD_ENUM_WRITE_MCA, MCE_CMD_ROC_FLUSH_CACHE, MCE_CMD_ROC_CLEAN_CACHE, + MCE_CMD_ENABLE_LATIC, + MCE_CMD_UNCORE_PERFMON_REQ, MCE_CMD_IS_CCX_ALLOWED = 0xFE, MCE_CMD_MAX = 0xFF, } mce_cmd_t; @@ -101,6 +103,24 @@ typedef enum mce_cmd { #define MCE_CMD_MASK 0xFF /******************************************************************************* + * Struct to prepare UPDATE_CSTATE_INFO request + ******************************************************************************/ +typedef struct mce_cstate_info { + /* cluster cstate value */ + uint32_t cluster; + /* ccplex cstate value */ + uint32_t ccplex; + /* system cstate value */ + uint32_t system; + /* force system state? */ + uint8_t system_state_force; + /* wake mask value */ + uint32_t wake_mask; + /* update the wake mask? */ + uint8_t update_wake_mask; +} mce_cstate_info_t; + +/******************************************************************************* * Macros to prepare CSTATE info request ******************************************************************************/ /* Description of the parameters for UPDATE_CSTATE_INFO request */ @@ -183,6 +203,54 @@ typedef union mca_arg { } mca_arg_t; /******************************************************************************* + * Uncore PERFMON ARI struct + ******************************************************************************/ +typedef union uncore_perfmon_req { + struct perfmon_command { + /* + * Commands: 0 = READ, 1 = WRITE + */ + uint64_t cmd:8; + /* + * The unit group: L2=0, L3=1, ROC=2, MC=3, IOB=4 + */ + uint64_t grp:4; + /* + * Unit selector: Selects the unit instance, with 0 = Unit + * = (number of units in group) - 1. + */ + uint64_t unit:4; + /* + * Selects the uncore perfmon register to access + */ + uint64_t reg:8; + /* + * Counter number. Selects which counter to use for + * registers NV_PMEVCNTR and NV_PMEVTYPER. + */ + uint64_t counter:8; + } perfmon_command; + struct perfmon_status { + /* + * Resulting command status + */ + uint64_t val:8; + uint64_t unused:24; + } perfmon_status; + uint64_t data; +} uncore_perfmon_req_t; + +#define UNCORE_PERFMON_CMD_READ 0 +#define UNCORE_PERFMON_CMD_WRITE 1 + +#define UNCORE_PERFMON_CMD_MASK 0xFF +#define UNCORE_PERFMON_UNIT_GRP_MASK 0xF +#define UNCORE_PERFMON_SELECTOR_MASK 0xF +#define UNCORE_PERFMON_REG_MASK 0xFF +#define UNCORE_PERFMON_CTR_MASK 0xFF +#define UNCORE_PERFMON_RESP_STATUS_MASK 0xFF + +/******************************************************************************* * Structure populated by arch specific code to export routines which perform * common low level MCE functions ******************************************************************************/ @@ -312,6 +380,12 @@ typedef struct arch_mce_ops { * reset the entire system */ void (*enter_ccplex_state)(uint32_t ari_base, uint32_t state_idx); + /* + * This ARI request reads/writes data from/to Uncore PERFMON + * registers + */ + int (*read_write_uncore_perfmon)(uint32_t ari_base, + uncore_perfmon_req_t req, uint64_t *data); } arch_mce_ops_t; int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1, @@ -321,6 +395,8 @@ int mce_update_gsc_videomem(void); int mce_update_gsc_tzdram(void); int mce_update_gsc_tzram(void); __dead2 void mce_enter_ccplex_state(uint32_t state_idx); +void mce_update_cstate_info(mce_cstate_info_t *cstate); +void mce_verify_firmware_version(void); /* declarations for ARI/NVG handler functions */ int ari_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time); @@ -342,6 +418,8 @@ int ari_roc_clean_cache(uint32_t ari_base); uint64_t ari_read_write_mca(uint32_t ari_base, mca_cmd_t cmd, uint64_t *data); int ari_update_ccplex_gsc(uint32_t ari_base, uint32_t gsc_idx); void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx); +int ari_read_write_uncore_perfmon(uint32_t ari_base, + uncore_perfmon_req_t req, uint64_t *data); int nvg_enter_cstate(uint32_t ari_base, uint32_t state, uint32_t wake_time); int nvg_update_cstate_info(uint32_t ari_base, uint32_t cluster, uint32_t ccplex, diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c index 147a358a..e11d1600 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/ari.c @@ -389,3 +389,41 @@ void ari_enter_ccplex_state(uint32_t ari_base, uint32_t state_idx) */ (void)ari_request_wait(ari_base, 0, TEGRA_ARI_MISC_CCPLEX, state_idx, 0); } + +int ari_read_write_uncore_perfmon(uint32_t ari_base, + uncore_perfmon_req_t req, uint64_t *data) +{ + int ret; + uint32_t val; + + /* sanity check input parameters */ + if (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_READ && !data) { + ERROR("invalid parameters\n"); + return EINVAL; + } + + /* + * For "write" commands get the value that has to be written + * to the uncore perfmon registers + */ + val = (req.perfmon_command.cmd == UNCORE_PERFMON_CMD_WRITE) ? + *data : 0; + + ret = ari_request_wait(ari_base, 0, TEGRA_ARI_PERFMON, val, req.data); + if (ret) + return ret; + + /* read the command status value */ + req.perfmon_status.val = ari_get_response_high(ari_base) & + UNCORE_PERFMON_RESP_STATUS_MASK; + + /* + * For "read" commands get the data from the uncore + * perfmon registers + */ + if ((req.perfmon_status.val == 0) && (req.perfmon_command.cmd == + UNCORE_PERFMON_CMD_READ)) + *data = ari_get_response_low(ari_base); + + return (int)req.perfmon_status.val; +} diff --git a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c index 745b6f4e..f87dfa4d 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c +++ b/plat/nvidia/tegra/soc/t186/drivers/mce/mce.c @@ -42,6 +42,7 @@ #include <sys/errno.h> #include <t18x_ari.h> #include <tegra_def.h> +#include <tegra_platform.h> /* NVG functions handlers */ static arch_mce_ops_t nvg_mce_ops = { @@ -61,7 +62,8 @@ static arch_mce_ops_t nvg_mce_ops = { .roc_clean_cache = ari_roc_clean_cache, .read_write_mca = ari_read_write_mca, .update_ccplex_gsc = ari_update_ccplex_gsc, - .enter_ccplex_state = ari_enter_ccplex_state + .enter_ccplex_state = ari_enter_ccplex_state, + .read_write_uncore_perfmon = ari_read_write_uncore_perfmon }; /* ARI functions handlers */ @@ -82,7 +84,8 @@ static arch_mce_ops_t ari_mce_ops = { .roc_clean_cache = ari_roc_clean_cache, .read_write_mca = ari_read_write_mca, .update_ccplex_gsc = ari_update_ccplex_gsc, - .enter_ccplex_state = ari_enter_ccplex_state + .enter_ccplex_state = ari_enter_ccplex_state, + .read_write_uncore_perfmon = ari_read_write_uncore_perfmon }; typedef struct mce_config { @@ -173,6 +176,7 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1, uint64_t ret64 = 0, arg3, arg4, arg5; int ret = 0; mca_cmd_t mca_cmd; + uncore_perfmon_req_t req; cpu_context_t *ctx = cm_get_context(NON_SECURE); gp_regs_t *gp_regs = get_gpregs_ctx(ctx); @@ -356,6 +360,33 @@ int mce_command_handler(mce_cmd_t cmd, uint64_t arg0, uint64_t arg1, break; +#if ENABLE_CHIP_VERIFICATION_HARNESS + case MCE_CMD_ENABLE_LATIC: + /* + * This call is not for production use. The constant value, + * 0xFFFF0000, is specific to allowing for enabling LATIC on + * pre-production parts for the chip verification harness. + * + * Enabling LATIC allows S/W to read the MINI ISPs in the + * CCPLEX. The ISMs are used for various measurements relevant + * to particular locations in the Silicon. They are small + * counters which can be polled to determine how fast a + * particular location in the Silicon is. + */ + ops->enter_ccplex_state(mce_get_curr_cpu_ari_base(), + 0xFFFF0000); + + break; +#endif + + case MCE_CMD_UNCORE_PERFMON_REQ: + memcpy(&req, &arg0, sizeof(arg0)); + ret = ops->read_write_uncore_perfmon(cpu_ari_base, req, &arg1); + + /* update context to return data */ + write_ctx_reg(gp_regs, CTX_GPREG_X1, arg1); + break; + default: ERROR("unknown MCE command (%d)\n", cmd); return EINVAL; @@ -429,3 +460,65 @@ __dead2 void mce_enter_ccplex_state(uint32_t state_idx) panic(); } + +/******************************************************************************* + * Handler to issue the UPDATE_CSTATE_INFO request + ******************************************************************************/ +void mce_update_cstate_info(mce_cstate_info_t *cstate) +{ + arch_mce_ops_t *ops = mce_get_curr_cpu_ops(); + + /* issue the UPDATE_CSTATE_INFO request */ + ops->update_cstate_info(mce_get_curr_cpu_ari_base(), cstate->cluster, + cstate->ccplex, cstate->system, cstate->system_state_force, + cstate->wake_mask, cstate->update_wake_mask); +} + +/******************************************************************************* + * Handler to read the MCE firmware version and check if it is compatible + * with interface header the BL3-1 was compiled against + ******************************************************************************/ +void mce_verify_firmware_version(void) +{ + arch_mce_ops_t *ops; + uint32_t cpu_ari_base; + uint64_t version; + uint32_t major, minor; + + /* + * MCE firmware is not running on simulation platforms. + */ + if (tegra_platform_is_emulation()) + return; + + /* get a pointer to the CPU's arch_mce_ops_t struct */ + ops = mce_get_curr_cpu_ops(); + + /* get the CPU's ARI base address */ + cpu_ari_base = mce_get_curr_cpu_ari_base(); + + /* + * Read the MCE firmware version and extract the major and minor + * version fields + */ + version = ops->call_enum_misc(cpu_ari_base, TEGRA_ARI_MISC_VERSION, 0); + major = (uint32_t)version; + minor = (uint32_t)(version >> 32); + + INFO("MCE Version - HW=%d:%d, SW=%d:%d\n", major, minor, + TEGRA_ARI_VERSION_MAJOR, TEGRA_ARI_VERSION_MINOR); + + /* + * Verify that the MCE firmware version and the interface header + * match + */ + if (major != TEGRA_ARI_VERSION_MAJOR) { + ERROR("ARI major version mismatch\n"); + panic(); + } + + if (minor < TEGRA_ARI_VERSION_MINOR) { + ERROR("ARI minor version mismatch\n"); + panic(); + } +} diff --git a/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c b/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c index 2940f583..bca6f2ec 100644 --- a/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c +++ b/plat/nvidia/tegra/soc/t186/drivers/smmu/smmu.c @@ -29,9 +29,13 @@ */ #include <assert.h> +#include <bl_common.h> #include <debug.h> #include <memctrl_v2.h> +#include <platform_def.h> #include <smmu.h> +#include <string.h> +#include <tegra_private.h> typedef struct smmu_regs { uint32_t reg; @@ -133,7 +137,7 @@ typedef struct smmu_regs { .val = 0xFFFFFFFF, \ } -static smmu_regs_t smmu_ctx_regs[] = { +static __attribute__((aligned(16))) smmu_regs_t smmu_ctx_regs[] = { _START_OF_TABLE_, mc_make_sid_security_cfg(SCEW), mc_make_sid_security_cfg(AFIR), @@ -421,12 +425,15 @@ static smmu_regs_t smmu_ctx_regs[] = { }; /* - * Save SMMU settings before "System Suspend" + * Save SMMU settings before "System Suspend" to TZDRAM */ -void tegra_smmu_save_context(void) +void tegra_smmu_save_context(uint64_t smmu_ctx_addr) { uint32_t i; #if DEBUG + plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + uint64_t tzdram_base = params_from_bl2->tzdram_base; + uint64_t tzdram_end = tzdram_base + params_from_bl2->tzdram_size; uint32_t reg_id1, pgshift, cb_size; /* sanity check SMMU settings c*/ @@ -438,6 +445,8 @@ void tegra_smmu_save_context(void) assert(!((pgshift != PGSHIFT) || (cb_size != CB_SIZE))); #endif + assert((smmu_ctx_addr >= tzdram_base) && (smmu_ctx_addr <= tzdram_end)); + /* index of _END_OF_TABLE_ */ smmu_ctx_regs[0].val = ARRAY_SIZE(smmu_ctx_regs) - 1; @@ -445,22 +454,53 @@ void tegra_smmu_save_context(void) for (i = 1; i < ARRAY_SIZE(smmu_ctx_regs) - 1; i++) smmu_ctx_regs[i].val = mmio_read_32(smmu_ctx_regs[i].reg); + /* Save SMMU config settings */ + memcpy16((void *)(uintptr_t)smmu_ctx_addr, (void *)smmu_ctx_regs, + sizeof(smmu_ctx_regs)); + /* save the SMMU table address */ mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_LO, - (uint32_t)(unsigned long)smmu_ctx_regs); + (uint32_t)smmu_ctx_addr); mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV11_HI, - (uint32_t)(((unsigned long)smmu_ctx_regs) >> 32)); + (uint32_t)(smmu_ctx_addr >> 32)); } +#define SMMU_NUM_CONTEXTS 64 +#define SMMU_CONTEXT_BANK_MAX_IDX 64 + /* * Init SMMU during boot or "System Suspend" exit */ void tegra_smmu_init(void) { - uint32_t val; + uint32_t val, i, ctx_base; - /* Program the SMMU pagesize */ + /* Program the SMMU pagesize and reset CACHE_LOCK bit */ val = tegra_smmu_read_32(SMMU_GSR0_SECURE_ACR); val |= SMMU_GSR0_PGSIZE_64K; + val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(SMMU_GSR0_SECURE_ACR, val); + + /* reset CACHE LOCK bit for NS Aux. Config. Register */ + val = tegra_smmu_read_32(SMMU_GNSR_ACR); + val &= ~SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(SMMU_GNSR_ACR, val); + + /* disable TCU prefetch for all contexts */ + ctx_base = (SMMU_GSR0_PGSIZE_64K * SMMU_NUM_CONTEXTS) + SMMU_CBn_ACTLR; + for (i = 0; i < SMMU_CONTEXT_BANK_MAX_IDX; i++) { + val = tegra_smmu_read_32(ctx_base + (SMMU_GSR0_PGSIZE_64K * i)); + val &= ~SMMU_CBn_ACTLR_CPRE_BIT; + tegra_smmu_write_32(ctx_base + (SMMU_GSR0_PGSIZE_64K * i), val); + } + + /* set CACHE LOCK bit for NS Aux. Config. Register */ + val = tegra_smmu_read_32(SMMU_GNSR_ACR); + val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; + tegra_smmu_write_32(SMMU_GNSR_ACR, val); + + /* set CACHE LOCK bit for S Aux. Config. Register */ + val = tegra_smmu_read_32(SMMU_GSR0_SECURE_ACR); + val |= SMMU_ACR_CACHE_LOCK_ENABLE_BIT; tegra_smmu_write_32(SMMU_GSR0_SECURE_ACR, val); } diff --git a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c index 7e35cc6b..35828787 100644 --- a/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c +++ b/plat/nvidia/tegra/soc/t186/plat_psci_handlers.c @@ -37,20 +37,30 @@ #include <debug.h> #include <denver.h> #include <mce.h> +#include <platform.h> #include <psci.h> #include <smmu.h> +#include <string.h> #include <t18x_ari.h> #include <tegra_private.h> extern void prepare_cpu_pwr_dwn(void); +extern void tegra186_cpu_reset_handler(void); +extern uint32_t __tegra186_cpu_reset_handler_data, + __tegra186_cpu_reset_handler_end; + +/* TZDRAM offset for saving SMMU context */ +#define TEGRA186_SMMU_CTX_OFFSET 16 /* state id mask */ #define TEGRA186_STATE_ID_MASK 0xF /* constants to get power state's wake time */ #define TEGRA186_WAKE_TIME_MASK 0xFFFFFF #define TEGRA186_WAKE_TIME_SHIFT 4 +/* default core wake mask for CPU_SUSPEND */ +#define TEGRA186_CORE_WAKE_MASK 0x180c /* context size to save during system suspend */ -#define TEGRA186_SE_CONTEXT_SIZE 3 +#define TEGRA186_SE_CONTEXT_SIZE 3 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE]; static unsigned int wake_time[PLATFORM_CORE_COUNT]; @@ -62,12 +72,9 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state, psci_power_state_t *req_state) { int state_id = psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK; - int cpu = read_mpidr() & MPIDR_CPU_MASK; - int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; - - if (impl == DENVER_IMPL) - cpu |= 0x4; + int cpu = plat_my_core_pos(); + /* save the core wake time (us) */ wake_time[cpu] = (power_state >> TEGRA186_WAKE_TIME_SHIFT) & TEGRA186_WAKE_TIME_MASK; @@ -75,10 +82,10 @@ int32_t tegra_soc_validate_power_state(unsigned int power_state, switch (state_id) { case PSTATE_ID_CORE_IDLE: case PSTATE_ID_CORE_POWERDN: - /* - * Core powerdown request only for afflvl 0 - */ + + /* Core powerdown request */ req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id; + req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id; break; @@ -94,18 +101,12 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) { const plat_local_state_t *pwr_domain_state; unsigned int stateid_afflvl0, stateid_afflvl2; - int cpu = read_mpidr() & MPIDR_CPU_MASK; - int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; - cpu_context_t *ctx = cm_get_context(NON_SECURE); - gp_regs_t *gp_regs = get_gpregs_ctx(ctx); + int cpu = plat_my_core_pos(); + plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + mce_cstate_info_t cstate_info = { 0 }; + uint64_t smmu_ctx_base; uint32_t val; - assert(ctx); - assert(gp_regs); - - if (impl == DENVER_IMPL) - cpu |= 0x4; - /* get the state ID */ pwr_domain_state = target_state->pwr_domain_state; stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] & @@ -113,28 +114,17 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & TEGRA186_STATE_ID_MASK; - if (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) { - - /* Prepare for cpu idle */ - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, - TEGRA_ARI_CORE_C6, wake_time[cpu], 0); - - } else if (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN) { + if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) || + (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) { - /* Prepare for cpu powerdn */ - (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, - TEGRA_ARI_CORE_C7, wake_time[cpu], 0); + /* Enter CPU idle/powerdown */ + val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ? + TEGRA_ARI_CORE_C6 : TEGRA_ARI_CORE_C7; + (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, val, + wake_time[cpu], 0); } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { - /* loop until SC7 is allowed */ - do { - val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, - TEGRA_ARI_CORE_C7, - MCE_CORE_SLEEP_TIME_INFINITE, - 0); - } while (val == 0); - /* save SE registers */ se_regs[0] = mmio_read_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT); @@ -147,23 +137,129 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state) val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG); mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV6, val); - /* save SMMU context */ - tegra_smmu_save_context(); + /* save SMMU context to TZDRAM */ + smmu_ctx_base = params_from_bl2->tzdram_base + + ((uintptr_t)&__tegra186_cpu_reset_handler_data - + (uintptr_t)tegra186_cpu_reset_handler) + + TEGRA186_SMMU_CTX_OFFSET; + tegra_smmu_save_context((uintptr_t)smmu_ctx_base); /* Prepare for system suspend */ - write_ctx_reg(gp_regs, CTX_GPREG_X4, 1); - write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); - write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); - (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, - TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC7); + cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; + cstate_info.system = TEGRA_ARI_SYSTEM_SC7; + cstate_info.system_state_force = 1; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); - /* Enter system suspend state */ + /* Loop until system suspend is allowed */ + do { + val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, + TEGRA_ARI_CORE_C7, + MCE_CORE_SLEEP_TIME_INFINITE, + 0); + } while (val == 0); + + /* Instruct the MCE to enter system suspend state */ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); + } - } else { - ERROR("%s: Unknown state id\n", __func__); - return PSCI_E_NOT_SUPPORTED; + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * Platform handler to calculate the proper target power level at the + * specified affinity level + ******************************************************************************/ +plat_local_state_t tegra_soc_get_target_pwr_state(unsigned int lvl, + const plat_local_state_t *states, + unsigned int ncpu) +{ + plat_local_state_t target = *states; + int cpu = plat_my_core_pos(), ret, cluster_powerdn = 1; + int core_pos = read_mpidr() & MPIDR_CPU_MASK; + mce_cstate_info_t cstate_info = { 0 }; + + /* get the current core's power state */ + target = *(states + core_pos); + + /* CPU suspend */ + if (lvl == MPIDR_AFFLVL1 && target == PSTATE_ID_CORE_POWERDN) { + + /* Program default wake mask */ + cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); + + /* Check if CCx state is allowed. */ + ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED, + TEGRA_ARI_CORE_C7, wake_time[cpu], 0); + if (ret) + return PSTATE_ID_CORE_POWERDN; + } + + /* CPU off */ + if (lvl == MPIDR_AFFLVL1 && target == PLAT_MAX_OFF_STATE) { + + /* find out the number of ON cpus in the cluster */ + do { + target = *states++; + if (target != PLAT_MAX_OFF_STATE) + cluster_powerdn = 0; + } while (--ncpu); + + /* Enable cluster powerdn from last CPU in the cluster */ + if (cluster_powerdn) { + + /* Enable CC7 state and turn off wake mask */ + cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); + + /* Check if CCx state is allowed. */ + ret = mce_command_handler(MCE_CMD_IS_CCX_ALLOWED, + TEGRA_ARI_CORE_C7, + MCE_CORE_SLEEP_TIME_INFINITE, + 0); + if (ret) + return PSTATE_ID_CORE_POWERDN; + + } else { + + /* Turn off wake_mask */ + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); + } + } + + /* System Suspend */ + if ((lvl == MPIDR_AFFLVL2) || (target == PSTATE_ID_SOC_POWERDN)) + return PSTATE_ID_SOC_POWERDN; + + /* default state */ + return PSCI_LOCAL_STATE_RUN; +} + +int tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state) +{ + const plat_local_state_t *pwr_domain_state = + target_state->pwr_domain_state; + plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + unsigned int stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] & + TEGRA186_STATE_ID_MASK; + uint32_t val; + + if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { + /* + * The TZRAM loses power when we enter system suspend. To + * allow graceful exit from system suspend, we need to copy + * BL3-1 over to TZDRAM. + */ + val = params_from_bl2->tzdram_base + + ((uintptr_t)&__tegra186_cpu_reset_handler_end - + (uintptr_t)tegra186_cpu_reset_handler); + memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE, + (uintptr_t)&__BL31_END__ - (uintptr_t)BL31_BASE); } return PSCI_E_SUCCESS; @@ -190,13 +286,30 @@ int tegra_soc_pwr_domain_on(u_register_t mpidr) int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) { - int state_id = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; + int stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL]; + int stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0]; + mce_cstate_info_t cstate_info = { 0 }; + + /* + * Reset power state info for CPUs when onlining, we set + * deepest power when offlining a core but that may not be + * requested by non-secure sw which controls idle states. It + * will re-init this info from non-secure software when the + * core come online. + */ + if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) { + + cstate_info.cluster = TEGRA_ARI_CLUSTER_CC1; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); + } /* * Check if we are exiting from deep sleep and restore SE * context if we are. */ - if (state_id == PSTATE_ID_SOC_POWERDN) { + if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) { + mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT, se_regs[0]); mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT, @@ -206,6 +319,17 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) /* Init SMMU */ tegra_smmu_init(); + + /* + * Reset power state info for the last core doing SC7 + * entry and exit, we set deepest power state as CC7 + * and SC7 for SC7 entry which may not be requested by + * non-secure SW which controls idle states. + */ + cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; + cstate_info.system = TEGRA_ARI_SYSTEM_SC1; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); } return PSCI_E_SUCCESS; @@ -213,33 +337,22 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state) int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) { - cpu_context_t *ctx = cm_get_context(NON_SECURE); - gp_regs_t *gp_regs = get_gpregs_ctx(ctx); int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; - assert(ctx); - assert(gp_regs); - - /* Turn off wake_mask */ - write_ctx_reg(gp_regs, CTX_GPREG_X4, 0); - write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); - write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); - mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, TEGRA_ARI_CLUSTER_CC7, - 0, TEGRA_ARI_SYSTEM_SC7); - /* Disable Denver's DCO operations */ if (impl == DENVER_IMPL) denver_disable_dco(); /* Turn off CPU */ - return mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, + (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); + + return PSCI_E_SUCCESS; } __dead2 void tegra_soc_prepare_system_off(void) { - cpu_context_t *ctx = cm_get_context(NON_SECURE); - gp_regs_t *gp_regs = get_gpregs_ctx(ctx); + mce_cstate_info_t cstate_info = { 0 }; uint32_t val; if (tegra186_system_powerdn_state == TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF) { @@ -249,6 +362,13 @@ __dead2 void tegra_soc_prepare_system_off(void) } else if (tegra186_system_powerdn_state == TEGRA_ARI_SYSTEM_SC8) { + /* Prepare for quasi power down */ + cstate_info.cluster = TEGRA_ARI_CLUSTER_CC7; + cstate_info.system = TEGRA_ARI_SYSTEM_SC8; + cstate_info.system_state_force = 1; + cstate_info.update_wake_mask = 1; + mce_update_cstate_info(&cstate_info); + /* loop until other CPUs power down */ do { val = mce_command_handler(MCE_CMD_IS_SC7_ALLOWED, @@ -257,13 +377,6 @@ __dead2 void tegra_soc_prepare_system_off(void) 0); } while (val == 0); - /* Prepare for quasi power down */ - write_ctx_reg(gp_regs, CTX_GPREG_X4, 1); - write_ctx_reg(gp_regs, CTX_GPREG_X5, 0); - write_ctx_reg(gp_regs, CTX_GPREG_X6, 1); - (void)mce_command_handler(MCE_CMD_UPDATE_CSTATE_INFO, - TEGRA_ARI_CLUSTER_CC7, 0, TEGRA_ARI_SYSTEM_SC8); - /* Enter quasi power down state */ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); @@ -274,6 +387,9 @@ __dead2 void tegra_soc_prepare_system_off(void) /* power down core */ prepare_cpu_pwr_dwn(); + /* flush L1/L2 data caches */ + dcsw_op_all(DCCISW); + } else { ERROR("%s: unsupported power down state (%d)\n", __func__, tegra186_system_powerdn_state); diff --git a/plat/nvidia/tegra/soc/t186/plat_secondary.c b/plat/nvidia/tegra/soc/t186/plat_secondary.c index df802891..406c1e08 100644 --- a/plat/nvidia/tegra/soc/t186/plat_secondary.c +++ b/plat/nvidia/tegra/soc/t186/plat_secondary.c @@ -28,10 +28,13 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include <arch_helpers.h> #include <debug.h> #include <mce.h> #include <mmio.h> +#include <string.h> #include <tegra_def.h> +#include <tegra_private.h> #define MISCREG_CPU_RESET_VECTOR 0x2000 #define MISCREG_AA64_RST_LOW 0x2004 @@ -42,7 +45,8 @@ #define CPU_RESET_MODE_AA64 1 -extern void tegra_secure_entrypoint(void); +extern uint64_t tegra_bl31_phys_base; +extern uint64_t __tegra186_cpu_reset_handler_end; /******************************************************************************* * Setup secondary CPU vectors @@ -50,12 +54,31 @@ extern void tegra_secure_entrypoint(void); void plat_secondary_setup(void) { uint32_t addr_low, addr_high; - uint64_t reset_addr = (uint64_t)tegra_secure_entrypoint; + plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); + uint64_t cpu_reset_handler_base; INFO("Setting up secondary CPU boot\n"); - addr_low = (uint32_t)reset_addr | CPU_RESET_MODE_AA64; - addr_high = (uint32_t)((reset_addr >> 32) & 0x7ff); + if ((tegra_bl31_phys_base >= TEGRA_TZRAM_BASE) && + (tegra_bl31_phys_base <= (TEGRA_TZRAM_BASE + TEGRA_TZRAM_SIZE))) { + + /* + * The BL31 code resides in the TZSRAM which loses state + * when we enter System Suspend. Copy the wakeup trampoline + * code to TZDRAM to help us exit from System Suspend. + */ + cpu_reset_handler_base = params_from_bl2->tzdram_base; + memcpy16((void *)((uintptr_t)cpu_reset_handler_base), + (void *)(uintptr_t)tegra186_cpu_reset_handler, + (uintptr_t)&__tegra186_cpu_reset_handler_end - + (uintptr_t)tegra186_cpu_reset_handler); + + } else { + cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint; + } + + addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64; + addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff); /* write lower 32 bits first, then the upper 11 bits */ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low); diff --git a/plat/nvidia/tegra/soc/t186/plat_setup.c b/plat/nvidia/tegra/soc/t186/plat_setup.c index d6b8bc3f..e848eabb 100644 --- a/plat/nvidia/tegra/soc/t186/plat_setup.c +++ b/plat/nvidia/tegra/soc/t186/plat_setup.c @@ -30,18 +30,25 @@ #include <arch_helpers.h> #include <assert.h> +#include <bl31.h> #include <bl_common.h> #include <console.h> #include <context.h> #include <context_mgmt.h> +#include <cortex_a57.h> #include <debug.h> #include <denver.h> #include <interrupt_mgmt.h> +#include <mce.h> #include <platform.h> #include <tegra_def.h> +#include <tegra_platform.h> #include <tegra_private.h> #include <xlat_tables.h> +DEFINE_RENAME_SYSREG_RW_FUNCS(l2ctlr_el1, L2CTLR_EL1) +extern uint64_t tegra_enable_l2_ecc_parity_prot; + /******************************************************************************* * The Tegra power domain tree has a single system level power domain i.e. a * single root node. The first entry in the power domain descriptor specifies @@ -65,11 +72,19 @@ const unsigned char tegra_power_domain_tree_desc[] = { static const mmap_region_t tegra_mmap[] = { MAP_REGION_FLAT(TEGRA_MISC_BASE, 0x10000, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TEGRA_TSA_BASE, 0x20000, /* 128KB */ + MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(TEGRA_MC_STREAMID_BASE, 0x10000, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(TEGRA_MC_BASE, 0x10000, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), - MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB */ + MAP_REGION_FLAT(TEGRA_UARTA_BASE, 0x20000, /* 128KB - UART A, B*/ + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TEGRA_UARTC_BASE, 0x20000, /* 128KB - UART C, G */ + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TEGRA_UARTD_BASE, 0x30000, /* 192KB - UART D, E, F */ + MT_DEVICE | MT_RW | MT_SECURE), + MAP_REGION_FLAT(TEGRA_FUSE_BASE, 0x10000, /* 64KB */ MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(TEGRA_GICD_BASE, 0x20000, /* 128KB */ MT_DEVICE | MT_RW | MT_SECURE), @@ -139,6 +154,51 @@ uint32_t plat_get_console_from_id(int id) return tegra186_uart_addresses[id]; } +/* represent chip-version as concatenation of major (15:12), minor (11:8) and subrev (7:0) */ +#define TEGRA186_VER_A02P 0x1201 + +/******************************************************************************* + * Handler for early platform setup + ******************************************************************************/ +void plat_early_platform_setup(void) +{ + int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; + uint32_t chip_subrev, val; + + /* sanity check MCE firmware compatibility */ + mce_verify_firmware_version(); + + /* + * Enable ECC and Parity Protection for Cortex-A57 CPUs + * for Tegra A02p SKUs + */ + if (impl != DENVER_IMPL) { + + /* get the major, minor and sub-version values */ + chip_subrev = mmio_read_32(TEGRA_FUSE_BASE + OPT_SUBREVISION) & + SUBREVISION_MASK; + + /* prepare chip version number */ + val = (tegra_get_chipid_major() << 12) | + (tegra_get_chipid_minor() << 8) | + chip_subrev; + + /* enable L2 ECC for Tegra186 A02P and beyond */ + if (val >= TEGRA186_VER_A02P) { + + val = read_l2ctlr_el1(); + val |= L2_ECC_PARITY_PROTECTION_BIT; + write_l2ctlr_el1(val); + + /* + * Set the flag to enable ECC/Parity Protection + * when we exit System Suspend or Cluster Powerdn + */ + tegra_enable_l2_ecc_parity_prot = 1; + } + } +} + /* Secure IRQs for Tegra186 */ static const irq_sec_cfg_t tegra186_sec_irqs[] = { { @@ -168,3 +228,27 @@ void plat_gic_setup(void) if (sizeof(tegra186_sec_irqs) > 0) tegra_fiq_handler_setup(); } + +/******************************************************************************* + * Return pointer to the BL31 params from previous bootloader + ******************************************************************************/ +bl31_params_t *plat_get_bl31_params(void) +{ + uint32_t val; + + val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_LO); + + return (bl31_params_t *)(uintptr_t)val; +} + +/******************************************************************************* + * Return pointer to the BL31 platform params from previous bootloader + ******************************************************************************/ +plat_params_from_bl2_t *plat_get_bl31_plat_params(void) +{ + uint32_t val; + + val = mmio_read_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV53_HI); + + return (plat_params_from_bl2_t *)(uintptr_t)val; +} diff --git a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c index fabab018..8e337184 100644 --- a/plat/nvidia/tegra/soc/t186/plat_sip_calls.c +++ b/plat/nvidia/tegra/soc/t186/plat_sip_calls.c @@ -64,6 +64,8 @@ extern uint32_t tegra186_system_powerdn_state; #define TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA 0x82FFFF0D #define TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE 0x82FFFF0E #define TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE 0x82FFFF0F +#define TEGRA_SIP_MCE_CMD_ENABLE_LATIC 0x82FFFF10 +#define TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ 0x82FFFF11 /******************************************************************************* * This function is responsible for handling all T186 SiP calls @@ -100,6 +102,8 @@ int plat_sip_handler(uint32_t smc_fid, case TEGRA_SIP_MCE_CMD_ENUM_WRITE_MCA: case TEGRA_SIP_MCE_CMD_ROC_FLUSH_CACHE: case TEGRA_SIP_MCE_CMD_ROC_CLEAN_CACHE: + case TEGRA_SIP_MCE_CMD_ENABLE_LATIC: + case TEGRA_SIP_MCE_CMD_UNCORE_PERFMON_REQ: /* clean up the high bits */ smc_fid &= MCE_CMD_MASK; @@ -110,32 +114,6 @@ int plat_sip_handler(uint32_t smc_fid, return 0; - case TEGRA_SIP_NEW_VIDEOMEM_REGION: - /* clean up the high bits */ - x1 = (uint32_t)x1; - x2 = (uint32_t)x2; - - /* - * Check if Video Memory overlaps TZDRAM (contains bl31/bl32) - * or falls outside of the valid DRAM range - */ - mce_ret = bl31_check_ns_address(x1, x2); - if (mce_ret) - return -ENOTSUP; - - /* - * Check if Video Memory is aligned to 1MB. - */ - if ((x1 & 0xFFFFF) || (x2 & 0xFFFFF)) { - ERROR("Unaligned Video Memory base address!\n"); - return -ENOTSUP; - } - - /* new video memory carveout settings */ - tegra_memctrl_videomem_setup(x1, x2); - - return 0; - case TEGRA_SIP_SYSTEM_SHUTDOWN_STATE: /* clean up the high bits */ diff --git a/plat/nvidia/tegra/soc/t186/plat_trampoline.S b/plat/nvidia/tegra/soc/t186/plat_trampoline.S new file mode 100644 index 00000000..7619ed0c --- /dev/null +++ b/plat/nvidia/tegra/soc/t186/plat_trampoline.S @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> +#include <common_def.h> +#include <memctrl_v2.h> +#include <tegra_def.h> + +#define TEGRA186_SMMU_CTX_SIZE 0x420 + + .align 4 + .globl tegra186_cpu_reset_handler + +/* CPU reset handler routine */ +func tegra186_cpu_reset_handler + /* + * The Memory Controller loses state during System Suspend. We + * use this information to decide if the reset handler is running + * after a System Suspend. Resume from system suspend requires + * restoring the entire state from TZDRAM to TZRAM. + */ + mov x1, #TEGRA_MC_BASE + ldr w0, [x1, #MC_SECURITY_CFG3_0] + lsl x0, x0, #32 + ldr w0, [x1, #MC_SECURITY_CFG0_0] + adr x1, tegra186_cpu_reset_handler + cmp x0, x1 + beq boot_cpu + + /* resume from system suspend */ + mov x0, #BL31_BASE + adr x1, __tegra186_cpu_reset_handler_end + adr x2, __tegra186_cpu_reset_handler_data + ldr x2, [x2, #8] + + /* memcpy16 */ +m_loop16: + cmp x2, #16 + b.lt m_loop1 + ldp x3, x4, [x1], #16 + stp x3, x4, [x0], #16 + sub x2, x2, #16 + b m_loop16 + /* copy byte per byte */ +m_loop1: + cbz x2, boot_cpu + ldrb w3, [x1], #1 + strb w3, [x0], #1 + subs x2, x2, #1 + b.ne m_loop1 + +boot_cpu: + adr x0, __tegra186_cpu_reset_handler_data + ldr x0, [x0] + br x0 +endfunc tegra186_cpu_reset_handler + + /* + * Tegra186 reset data (offset 0x0 - 0x430) + * + * 0x000: secure world's entrypoint + * 0x008: BL31 size (RO + RW) + * 0x00C: SMMU context start + * 0x42C: SMMU context end + */ + + .align 4 + .type __tegra186_cpu_reset_handler_data, %object + .globl __tegra186_cpu_reset_handler_data +__tegra186_cpu_reset_handler_data: + .quad tegra_secure_entrypoint + .quad __BL31_END__ - BL31_BASE + .rept TEGRA186_SMMU_CTX_SIZE + .quad 0 + .endr + .size __tegra186_cpu_reset_handler_data, \ + . - __tegra186_cpu_reset_handler_data + + .align 4 + .globl __tegra186_cpu_reset_handler_end +__tegra186_cpu_reset_handler_end: diff --git a/plat/nvidia/tegra/soc/t186/platform_t186.mk b/plat/nvidia/tegra/soc/t186/platform_t186.mk index adc4a9ee..b62d47da 100644 --- a/plat/nvidia/tegra/soc/t186/platform_t186.mk +++ b/plat/nvidia/tegra/soc/t186/platform_t186.mk @@ -29,8 +29,20 @@ # # platform configs -ENABLE_NS_L2_CPUECTRL_RW_ACCESS := 1 -$(eval $(call add_define,ENABLE_NS_L2_CPUECTRL_RW_ACCESS)) +ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS := 1 +$(eval $(call add_define,ENABLE_ROC_FOR_ORDERING_CLIENT_REQUESTS)) + +RELOCATE_TO_BL31_BASE := 1 +$(eval $(call add_define,RELOCATE_TO_BL31_BASE)) + +ENABLE_CHIP_VERIFICATION_HARNESS := 0 +$(eval $(call add_define,ENABLE_CHIP_VERIFICATION_HARNESS)) + +RESET_TO_BL31 := 1 + +PROGRAMMABLE_RESET_ADDRESS := 1 + +COLD_BOOT_SINGLE_CPU := 1 # platform settings TZDRAM_BASE := 0x30000000 @@ -42,10 +54,10 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT)) PLATFORM_MAX_CPUS_PER_CLUSTER := 4 $(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER)) -MAX_XLAT_TABLES := 15 +MAX_XLAT_TABLES := 20 $(eval $(call add_define,MAX_XLAT_TABLES)) -MAX_MMAP_REGIONS := 15 +MAX_MMAP_REGIONS := 20 $(eval $(call add_define,MAX_MMAP_REGIONS)) # platform files @@ -62,4 +74,5 @@ BL31_SOURCES += lib/cpus/aarch64/denver.S \ ${SOC_DIR}/plat_psci_handlers.c \ ${SOC_DIR}/plat_setup.c \ ${SOC_DIR}/plat_secondary.c \ - ${SOC_DIR}/plat_sip_calls.c + ${SOC_DIR}/plat_sip_calls.c \ + ${SOC_DIR}/plat_trampoline.S |