summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDouglas Raillard <douglas.raillard@arm.com>2016-11-24 15:43:19 +0000
committerDouglas Raillard <douglas.raillard@arm.com>2016-12-23 10:46:32 +0000
commit3df6012a3eff73d75d747187d7cfac1fd6d7819f (patch)
treee46500a3463b4b45b38e5815314aea100634b177
parent153e5eb8f155ec001027ee28bfc229b67ccceee0 (diff)
Abort preempted TSP STD SMC after PSCI CPU suspend
Standard SMC requests that are handled in the secure-world by the Secure Payload can be preempted by interrupts that must be handled in the normal world. When the TSP is preempted the secure context is stored and control is passed to the normal world to handle the non-secure interrupt. Once completed the preempted secure context is restored. When restoring the preempted context, the dispatcher assumes that the TSP preempted context is still stored as the SECURE context by the context management library. However, PSCI power management operations causes synchronous entry into TSP. This overwrites the preempted SECURE context in the context management library. When restoring back the SECURE context, the Secure Payload crashes because this context is not the preempted context anymore. This patch avoids corruption of the preempted SECURE context by aborting any preempted SMC during PSCI power management calls. The abort_std_smc_entry hook of the TSP is called when aborting the SMC request. It also exposes this feature as a FAST SMC callable from normal world to abort preempted SMC with FID TSP_FID_ABORT. Change-Id: I7a70347e9293f47d87b5de20484b4ffefb56b770 Signed-off-by: Douglas Raillard <douglas.raillard@arm.com>
-rw-r--r--bl32/tsp/aarch64/tsp_entrypoint.S28
-rw-r--r--bl32/tsp/tsp_main.c19
-rw-r--r--include/bl32/tsp/tsp.h13
-rw-r--r--services/spd/tspd/tspd_common.c32
-rw-r--r--services/spd/tspd/tspd_main.c27
-rw-r--r--services/spd/tspd/tspd_pm.c38
-rw-r--r--services/spd/tspd/tspd_private.h3
7 files changed, 148 insertions, 12 deletions
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index 25385caa..4c296d4a 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -180,6 +180,7 @@ func tsp_vector_table
b tsp_sel1_intr_entry
b tsp_system_off_entry
b tsp_system_reset_entry
+ b tsp_abort_std_smc_entry
endfunc tsp_vector_table
/*---------------------------------------------
@@ -441,3 +442,30 @@ func tsp_std_smc_entry
/* Should never reach here */
no_ret plat_panic_handler
endfunc tsp_std_smc_entry
+
+ /*---------------------------------------------------------------------
+ * This entrypoint is used by the TSPD to abort a pre-empted Standard
+ * SMC. It could be on behalf of non-secure world or because a CPU
+ * suspend/CPU off request needs to abort the preempted SMC.
+ * --------------------------------------------------------------------
+ */
+func tsp_abort_std_smc_entry
+
+ /*
+ * Exceptions masking is already done by the TSPD when entering this
+ * hook so there is no need to do it here.
+ */
+
+ /* Reset the stack used by the pre-empted SMC */
+ bl plat_set_my_stack
+
+ /*
+ * Allow some cleanup such as releasing locks.
+ */
+ bl tsp_abort_smc_handler
+
+ restore_args_call_smc
+
+ /* Should never reach here */
+ bl plat_panic_handler
+endfunc tsp_abort_std_smc_entry
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
index d03f7e22..2b365320 100644
--- a/bl32/tsp/tsp_main.c
+++ b/bl32/tsp/tsp_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -416,3 +416,20 @@ tsp_args_t *tsp_smc_handler(uint64_t func,
0, 0, 0, 0);
}
+/*******************************************************************************
+ * TSP smc abort handler. This function is called when aborting a preemtped
+ * standard SMC request. It should cleanup all resources owned by the SMC
+ * handler such as locks or dynamically allocated memory so following SMC
+ * request are executed in a clean environment.
+ ******************************************************************************/
+tsp_args_t *tsp_abort_smc_handler(uint64_t func,
+ uint64_t arg1,
+ uint64_t arg2,
+ uint64_t arg3,
+ uint64_t arg4,
+ uint64_t arg5,
+ uint64_t arg6,
+ uint64_t arg7)
+{
+ return set_smc_args(TSP_ABORT_DONE, 0, 0, 0, 0, 0, 0, 0);
+}
diff --git a/include/bl32/tsp/tsp.h b/include/bl32/tsp/tsp.h
index 4d7bc232..1e357884 100644
--- a/include/bl32/tsp/tsp.h
+++ b/include/bl32/tsp/tsp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@
#define TSP_SUSPEND_DONE 0xf2000003
#define TSP_RESUME_DONE 0xf2000004
#define TSP_PREEMPTED 0xf2000005
+#define TSP_ABORT_DONE 0xf2000007
#define TSP_SYSTEM_OFF_DONE 0xf2000008
#define TSP_SYSTEM_RESET_DONE 0xf2000009
@@ -81,10 +82,17 @@
/* SMC function ID to request a previously preempted std smc */
#define TSP_FID_RESUME TSP_STD_FID(0x3000)
/*
+ * SMC function ID to request abortion of a previously preempted std smc. A
+ * fast SMC is used so that the TSP abort handler does not have to be
+ * reentrant.
+ */
+#define TSP_FID_ABORT TSP_FAST_FID(0x3001)
+
+/*
* Total number of function IDs implemented for services offered to NS clients.
* The function IDs are defined above
*/
-#define TSP_NUM_FID 0x4
+#define TSP_NUM_FID 0x5
/* TSP implementation version numbers */
#define TSP_VERSION_MAJOR 0x0 /* Major version */
@@ -117,6 +125,7 @@ typedef struct tsp_vectors {
tsp_vector_isn_t sel1_intr_entry;
tsp_vector_isn_t system_off_entry;
tsp_vector_isn_t system_reset_entry;
+ tsp_vector_isn_t abort_std_smc_entry;
} tsp_vectors_t;
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 322413c9..3dcefea9 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,7 +32,9 @@
#include <assert.h>
#include <bl_common.h>
#include <context_mgmt.h>
+#include <debug.h>
#include <string.h>
+#include <tsp.h>
#include "tspd_private.h"
/*******************************************************************************
@@ -129,3 +131,31 @@ void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
/* Should never reach here */
assert(0);
}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and abort any preempted SMC
+ * request.
+ * Return 1 if there was a preempted SMC request, 0 otherwise.
+ ******************************************************************************/
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
+{
+ if (!get_std_smc_active_flag(tsp_ctx->state))
+ return 0;
+
+ /* Abort any preempted SMC request */
+ clr_std_smc_active_flag(tsp_ctx->state);
+
+ /*
+ * Arrange for an entry into the test secure payload. It will
+ * be returned via TSP_ABORT_DONE case in tspd_smc_handler.
+ */
+ cm_set_elr_el3(SECURE,
+ (uint64_t) &tsp_vectors->abort_std_smc_entry);
+ uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
+
+ if (rc != 0)
+ panic();
+
+ return 1;
+}
+
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index 1a064594..2850e703 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -459,6 +459,11 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/
tspd_synchronous_sp_exit(tsp_ctx, x1);
#endif
+ /*
+ * This function ID is used only by the SP to indicate it has finished
+ * aborting a preempted Standard SMC request.
+ */
+ case TSP_ABORT_DONE:
/*
* These function IDs are used only by the SP to indicate it has
@@ -596,6 +601,26 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
}
break;
+ /*
+ * Request from the non-secure world to abort a preempted Standard SMC
+ * call.
+ */
+ case TSP_FID_ABORT:
+ /* ABORT should only be invoked by normal world */
+ if (!ns) {
+ assert(0);
+ break;
+ }
+
+ /* Abort the preempted SMC request */
+ if (!tspd_abort_preempted_smc(tsp_ctx))
+ /*
+ * If there was no preempted SMC to abort, return
+ * SMC_UNK.
+ */
+ SMC_RET1(handle, SMC_UNK);
+
+ break;
/*
* Request from non secure world to resume the preempted
diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c
index 55562ba4..bc5435a8 100644
--- a/services/spd/tspd/tspd_pm.c
+++ b/services/spd/tspd/tspd_pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -58,6 +58,12 @@ static int32_t tspd_cpu_off_handler(uint64_t unused)
assert(tsp_vectors);
assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
/* Program the entry point and enter the TSP */
cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_off_entry);
rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -75,7 +81,7 @@ static int32_t tspd_cpu_off_handler(uint64_t unused)
*/
set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
- return 0;
+ return 0;
}
/*******************************************************************************
@@ -91,6 +97,12 @@ static void tspd_cpu_suspend_handler(uint64_t max_off_pwrlvl)
assert(tsp_vectors);
assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
/* Program the entry point and enter the TSP */
cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_suspend_entry);
rc = tspd_synchronous_sp_entry(tsp_ctx);
@@ -99,7 +111,7 @@ static void tspd_cpu_suspend_handler(uint64_t max_off_pwrlvl)
* Read the response from the TSP. A non-zero return means that
* something went wrong while communicating with the TSP.
*/
- if (rc != 0)
+ if (rc)
panic();
/* Update its context to reflect the state the TSP is in */
@@ -108,7 +120,7 @@ static void tspd_cpu_suspend_handler(uint64_t max_off_pwrlvl)
/*******************************************************************************
* This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits
- * before passing control back to the Secure Monitor. Entry in S-El1 is done
+ * before passing control back to the Secure Monitor. Entry in S-EL1 is done
* after initialising minimal architectural state that guarantees safe
* execution.
******************************************************************************/
@@ -205,6 +217,12 @@ static void tspd_system_off(void)
assert(tsp_vectors);
assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
/* Program the entry point */
cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_off_entry);
@@ -225,11 +243,19 @@ static void tspd_system_reset(void)
assert(tsp_vectors);
assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON);
+ /*
+ * Abort any preempted SMC request before overwriting the SECURE
+ * context.
+ */
+ tspd_abort_preempted_smc(tsp_ctx);
+
/* Program the entry point */
cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->system_reset_entry);
- /* Enter the TSP. We do not care about the return value because we
- * must continue the reset anyway */
+ /*
+ * Enter the TSP. We do not care about the return value because we
+ * must continue the reset anyway
+ */
tspd_synchronous_sp_entry(tsp_ctx);
}
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index cadc6aaa..82039a42 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -242,6 +242,7 @@ void tspd_init_tsp_ep_state(struct entry_point_info *tsp_ep,
uint32_t rw,
uint64_t pc,
tsp_context_t *tsp_ctx);
+int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx);
extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
extern struct tsp_vectors *tsp_vectors;