summaryrefslogtreecommitdiff
path: root/services/spd
diff options
context:
space:
mode:
authorDavid Cunado <david.cunado@arm.com>2017-04-05 11:34:03 +0100
committerDavid Cunado <david.cunado@arm.com>2017-04-26 12:58:52 +0100
commit16292f54811f27bb7de28512cda74db83686cb63 (patch)
tree2e88a51f4e01efaf990f73bcaf248f65ce1cc57d /services/spd
parentec54a87184d53e88c3666a30738ef506ddc1acc2 (diff)
Update terminology: standard SMC to yielding SMC
Since Issue B (November 2016) of the SMC Calling Convention document standard SMC calls are renamed to yielding SMC calls to help avoid confusion with the standard service SMC range, which remains unchanged. http://infocenter.arm.com/help/topic/com.arm.doc.den0028b/ARM_DEN0028B_SMC_Calling_Convention.pdf This patch adds a new define for yielding SMC call type and deprecates the current standard SMC call type. The tsp is migrated to use this new terminology and, additionally, the documentation and code comments are updated to use this new terminology. Change-Id: I0d7cc0224667ee6c050af976745f18c55906a793 Signed-off-by: David Cunado <david.cunado@arm.com>
Diffstat (limited to 'services/spd')
-rw-r--r--services/spd/tspd/tspd_common.c8
-rw-r--r--services/spd/tspd/tspd_main.c49
-rw-r--r--services/spd/tspd/tspd_private.h23
3 files changed, 41 insertions, 39 deletions
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 70959d75..3c6fb28b 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -65,7 +65,7 @@ void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point,
tsp_ctx->mpidr = read_mpidr_el1();
tsp_ctx->state = 0;
set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
- clr_std_smc_active_flag(tsp_ctx->state);
+ clr_yield_smc_active_flag(tsp_ctx->state);
cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
@@ -140,18 +140,18 @@ void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
******************************************************************************/
int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx)
{
- if (!get_std_smc_active_flag(tsp_ctx->state))
+ if (!get_yield_smc_active_flag(tsp_ctx->state))
return 0;
/* Abort any preempted SMC request */
- clr_std_smc_active_flag(tsp_ctx->state);
+ clr_yield_smc_active_flag(tsp_ctx->state);
/*
* Arrange for an entry into the test secure payload. It will
* be returned via TSP_ABORT_DONE case in tspd_smc_handler.
*/
cm_set_elr_el3(SECURE,
- (uint64_t) &tsp_vectors->abort_std_smc_entry);
+ (uint64_t) &tsp_vectors->abort_yield_smc_entry);
uint64_t rc = tspd_synchronous_sp_entry(tsp_ctx);
if (rc != 0)
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index ff515cca..f5384223 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -102,7 +102,7 @@ uint64_t tspd_handle_sp_preemption(void *handle)
cm_set_next_eret_context(NON_SECURE);
/*
- * The TSP was preempted during STD SMC execution.
+ * The TSP was preempted during execution of a Yielding SMC Call.
* Return back to the normal world with SMC_PREEMPTED as error
* code in x0.
*/
@@ -146,7 +146,7 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
* context since the TSP is supposed to preserve it during S-EL1
* interrupt handling.
*/
- if (get_std_smc_active_flag(tsp_ctx->state)) {
+ if (get_yield_smc_active_flag(tsp_ctx->state)) {
tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3);
tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
@@ -345,7 +345,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* Restore the relevant EL3 state which saved to service
* this SMC.
*/
- if (get_std_smc_active_flag(tsp_ctx->state)) {
+ if (get_yield_smc_active_flag(tsp_ctx->state)) {
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3,
tsp_ctx->saved_spsr_el3);
@@ -461,7 +461,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
#endif
/*
* This function ID is used only by the SP to indicate it has finished
- * aborting a preempted Standard SMC request.
+ * aborting a preempted Yielding SMC Call.
*/
case TSP_ABORT_DONE:
@@ -509,10 +509,10 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
case TSP_FAST_FID(TSP_MUL):
case TSP_FAST_FID(TSP_DIV):
- case TSP_STD_FID(TSP_ADD):
- case TSP_STD_FID(TSP_SUB):
- case TSP_STD_FID(TSP_MUL):
- case TSP_STD_FID(TSP_DIV):
+ case TSP_YIELD_FID(TSP_ADD):
+ case TSP_YIELD_FID(TSP_SUB):
+ case TSP_YIELD_FID(TSP_MUL):
+ case TSP_YIELD_FID(TSP_DIV):
if (ns) {
/*
* This is a fresh request from the non-secure client.
@@ -523,7 +523,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
assert(handle == cm_get_context(NON_SECURE));
/* Check if we are already preempted */
- if (get_std_smc_active_flag(tsp_ctx->state))
+ if (get_yield_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE);
@@ -553,13 +553,14 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
cm_set_elr_el3(SECURE, (uint64_t)
&tsp_vectors->fast_smc_entry);
} else {
- set_std_smc_active_flag(tsp_ctx->state);
+ set_yield_smc_active_flag(tsp_ctx->state);
cm_set_elr_el3(SECURE, (uint64_t)
- &tsp_vectors->std_smc_entry);
+ &tsp_vectors->yield_smc_entry);
#if TSP_NS_INTR_ASYNC_PREEMPT
/*
* Enable the routing of NS interrupts to EL3
- * during STD SMC processing on this core.
+ * during processing of a Yielding SMC Call on
+ * this core.
*/
enable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
@@ -585,13 +586,13 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
- if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) {
- clr_std_smc_active_flag(tsp_ctx->state);
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
+ clr_yield_smc_active_flag(tsp_ctx->state);
#if TSP_NS_INTR_ASYNC_PREEMPT
/*
* Disable the routing of NS interrupts to EL3
- * after STD SMC processing is finished on this
- * core.
+ * after processing of a Yielding SMC Call on
+ * this core is finished.
*/
disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
@@ -602,8 +603,8 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
break;
/*
- * Request from the non-secure world to abort a preempted Standard SMC
- * call.
+ * Request from the non-secure world to abort a preempted Yielding SMC
+ * Call.
*/
case TSP_FID_ABORT:
/* ABORT should only be invoked by normal world */
@@ -635,7 +636,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/*
* Request from non secure world to resume the preempted
- * Standard SMC call.
+ * Yielding SMC Call.
*/
case TSP_FID_RESUME:
/* RESUME should be invoked only by normal world */
@@ -652,7 +653,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
assert(handle == cm_get_context(NON_SECURE));
/* Check if we are already preempted before resume */
- if (!get_std_smc_active_flag(tsp_ctx->state))
+ if (!get_yield_smc_active_flag(tsp_ctx->state))
SMC_RET1(handle, SMC_UNK);
cm_el1_sysregs_context_save(NON_SECURE);
@@ -664,7 +665,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
#if TSP_NS_INTR_ASYNC_PREEMPT
/*
* Enable the routing of NS interrupts to EL3 during resumption
- * of STD SMC call on this core.
+ * of a Yielding SMC Call on this core.
*/
enable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
@@ -724,13 +725,13 @@ DECLARE_RT_SVC(
tspd_smc_handler
);
-/* Define a SPD runtime service descriptor for standard SMC calls */
+/* Define a SPD runtime service descriptor for Yielding SMC Calls */
DECLARE_RT_SVC(
tspd_std,
OEN_TOS_START,
OEN_TOS_END,
- SMC_TYPE_STD,
+ SMC_TYPE_YIELD,
NULL,
tspd_smc_handler
);
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index 82039a42..ebed7e50 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,7 +57,7 @@
/*
- * This flag is used by the TSPD to determine if the TSP is servicing a standard
+ * This flag is used by the TSPD to determine if the TSP is servicing a yielding
* SMC request prior to programming the next entry into the TSP e.g. if TSP
* execution is preempted by a non-secure interrupt and handed control to the
* normal world. If another request which is distinct from what the TSP was
@@ -65,15 +65,16 @@
* reject the new request or service it while ensuring that the previous context
* is not corrupted.
*/
-#define STD_SMC_ACTIVE_FLAG_SHIFT 2
-#define STD_SMC_ACTIVE_FLAG_MASK 1
-#define get_std_smc_active_flag(state) ((state >> STD_SMC_ACTIVE_FLAG_SHIFT) \
- & STD_SMC_ACTIVE_FLAG_MASK)
-#define set_std_smc_active_flag(state) (state |= \
- 1 << STD_SMC_ACTIVE_FLAG_SHIFT)
-#define clr_std_smc_active_flag(state) (state &= \
- ~(STD_SMC_ACTIVE_FLAG_MASK \
- << STD_SMC_ACTIVE_FLAG_SHIFT))
+#define YIELD_SMC_ACTIVE_FLAG_SHIFT 2
+#define YIELD_SMC_ACTIVE_FLAG_MASK 1
+#define get_yield_smc_active_flag(state) \
+ ((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \
+ & YIELD_SMC_ACTIVE_FLAG_MASK)
+#define set_yield_smc_active_flag(state) (state |= \
+ 1 << YIELD_SMC_ACTIVE_FLAG_SHIFT)
+#define clr_yield_smc_active_flag(state) (state &= \
+ ~(YIELD_SMC_ACTIVE_FLAG_MASK \
+ << YIELD_SMC_ACTIVE_FLAG_SHIFT))
/*******************************************************************************
* Secure Payload execution state information i.e. aarch32 or aarch64