summaryrefslogtreecommitdiff
path: root/services
diff options
context:
space:
mode:
Diffstat (limited to 'services')
-rw-r--r--services/spd/opteed/opteed_common.c2
-rw-r--r--services/spd/tlkd/tlkd_common.c2
-rw-r--r--services/spd/tspd/tspd_common.c2
-rw-r--r--services/std_svc/sdei/sdei_event.c98
-rw-r--r--services/std_svc/sdei/sdei_intr_mgmt.c678
-rw-r--r--services/std_svc/sdei/sdei_main.c1066
-rw-r--r--services/std_svc/sdei/sdei_private.h234
-rw-r--r--services/std_svc/sdei/sdei_state.c150
-rw-r--r--services/std_svc/spm/aarch64/spm_helpers.S74
-rw-r--r--services/std_svc/spm/aarch64/spm_shim_exceptions.S128
-rw-r--r--services/std_svc/spm/secure_partition_setup.c310
-rw-r--r--services/std_svc/spm/spm.mk25
-rw-r--r--services/std_svc/spm/spm_main.c452
-rw-r--r--services/std_svc/spm/spm_private.h55
-rw-r--r--services/std_svc/spm/spm_shim_private.h29
-rw-r--r--services/std_svc/std_svc_setup.c42
16 files changed, 3341 insertions, 6 deletions
diff --git a/services/spd/opteed/opteed_common.c b/services/spd/opteed/opteed_common.c
index 2693e7d1..e5e2be77 100644
--- a/services/spd/opteed/opteed_common.c
+++ b/services/spd/opteed/opteed_common.c
@@ -78,7 +78,7 @@ uint64_t opteed_synchronous_sp_entry(optee_context_t *optee_ctx)
cm_set_next_eret_context(SECURE);
rc = opteed_enter_sp(&optee_ctx->c_rt_ctx);
-#if DEBUG
+#if ENABLE_ASSERTIONS
optee_ctx->c_rt_ctx = 0;
#endif
diff --git a/services/spd/tlkd/tlkd_common.c b/services/spd/tlkd/tlkd_common.c
index 599d7a30..483d45b6 100644
--- a/services/spd/tlkd/tlkd_common.c
+++ b/services/spd/tlkd/tlkd_common.c
@@ -131,7 +131,7 @@ uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx)
cm_set_next_eret_context(SECURE);
rc = tlkd_enter_sp(&tlk_ctx->c_rt_ctx);
-#if DEBUG
+#if ENABLE_ASSERTIONS
tlk_ctx->c_rt_ctx = 0;
#endif
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 1538232f..de54dbe9 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -79,7 +79,7 @@ uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx)
cm_set_next_eret_context(SECURE);
rc = tspd_enter_sp(&tsp_ctx->c_rt_ctx);
-#if DEBUG
+#if ENABLE_ASSERTIONS
tsp_ctx->c_rt_ctx = 0;
#endif
diff --git a/services/std_svc/sdei/sdei_event.c b/services/std_svc/sdei/sdei_event.c
new file mode 100644
index 00000000..bf0e779d
--- /dev/null
+++ b/services/std_svc/sdei/sdei_event.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <utils.h>
+#include "sdei_private.h"
+
+#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
+
+/*
+ * Get SDEI entry with the given mapping: on success, returns pointer to SDEI
+ * entry. On error, returns NULL.
+ *
+ * Both shared and private maps are stored in single-dimensional array. Private
+ * event entries are kept for each PE forming a 2D array.
+ */
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
+{
+ const sdei_mapping_t *mapping;
+ sdei_entry_t *cpu_priv_base;
+ unsigned int idx, base_idx;
+
+ if (is_event_private(map)) {
+ /*
+ * For a private map, find the index of the mapping in the
+ * array.
+ */
+ mapping = SDEI_PRIVATE_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ /* Base of private mappings for this CPU */
+ base_idx = plat_my_core_pos() * mapping->num_maps;
+ cpu_priv_base = &sdei_private_event_table[base_idx];
+
+ /*
+ * Return the address of the entry at the same index in the
+ * per-CPU event entry.
+ */
+ return &cpu_priv_base[idx];
+ } else {
+ mapping = SDEI_SHARED_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ return &sdei_shared_event_table[idx];
+ }
+}
+
+/*
+ * Find event mapping for a given interrupt number: On success, returns pointer
+ * to the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i;
+
+ /*
+ * Look for a match in private and shared mappings, as requested. This
+ * is a linear search. However, if the mappings are required to be
+ * sorted, for large maps, we could consider binary search.
+ */
+ mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
+ iterate_mapping(mapping, i, map) {
+ if (map->intr == intr_num)
+ return map;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find event mapping for a given event number: On success returns pointer to
+ * the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map(int ev_num)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i, j;
+
+ /*
+ * Iterate through mappings to find a match. This is a linear search.
+ * However, if the mappings are required to be sorted, for large maps,
+ * we could consider binary search.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ if (map->ev_num == ev_num)
+ return map;
+ }
+ }
+
+ return NULL;
+}
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
new file mode 100644
index 00000000..42bf46d0
--- /dev/null
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <interrupt_mgmt.h>
+#include <runtime_svc.h>
+#include <sdei.h>
+#include <string.h>
+#include "sdei_private.h"
+
+#define PE_MASKED 1
+#define PE_NOT_MASKED 0
+
+/* x0-x17 GPREGS context */
+#define SDEI_SAVED_GPREGS 18
+
+/* Maximum preemption nesting levels: Critical priority and Normal priority */
+#define MAX_EVENT_NESTING 2
+
+/* Per-CPU SDEI state access macro */
+#define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()])
+
+/* Structure to store information about an outstanding dispatch */
+typedef struct sdei_dispatch_context {
+ sdei_ev_map_t *map;
+ unsigned int sec_state;
+ unsigned int intr_raw;
+ uint64_t x[SDEI_SAVED_GPREGS];
+
+ /* Exception state registers */
+ uint64_t elr_el3;
+ uint64_t spsr_el3;
+} sdei_dispatch_context_t;
+
+/* Per-CPU SDEI state data */
+typedef struct sdei_cpu_state {
+ sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
+ unsigned short stack_top; /* Empty ascending */
+ unsigned int pe_masked:1;
+ unsigned int pending_enables:1;
+} sdei_cpu_state_t;
+
+/* SDEI states for all cores in the system */
+static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
+
+unsigned int sdei_pe_mask(void)
+{
+ unsigned int ret;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ /*
+ * Return value indicates whether this call had any effect in the mask
+ * status of this PE.
+ */
+ ret = (state->pe_masked ^ PE_MASKED);
+ state->pe_masked = PE_MASKED;
+
+ return ret;
+}
+
+void sdei_pe_unmask(void)
+{
+ int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ /*
+ * If there are pending enables, iterate through the private mappings
+ * and enable those bound maps that are in enabled state. Also, iterate
+ * through shared mappings and enable interrupts of events that are
+ * targeted to this PE.
+ */
+ if (state->pending_enables) {
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
+ plat_ic_enable_interrupt(map->intr);
+ }
+
+ for_each_shared_map(i, map) {
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+ if (is_map_bound(map) &&
+ GET_EV_STATE(se, ENABLED) &&
+ (se->reg_flags == SDEI_REGF_RM_PE) &&
+ (se->affinity == my_mpidr)) {
+ plat_ic_enable_interrupt(map->intr);
+ }
+ sdei_map_unlock(map);
+ }
+ }
+
+ state->pending_enables = 0;
+ state->pe_masked = PE_NOT_MASKED;
+}
+
+/* Push a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *push_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ sdei_dispatch_context_t *disp_ctx;
+
+ /* Cannot have more than max events */
+ assert(state->stack_top < MAX_EVENT_NESTING);
+
+ disp_ctx = &state->dispatch_stack[state->stack_top];
+ state->stack_top++;
+
+ return disp_ctx;
+}
+
+/* Pop a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *pop_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ state->stack_top--;
+
+ return &state->dispatch_stack[state->stack_top];
+}
+
+/* Retrieve the context at the top of dispatch stack */
+static sdei_dispatch_context_t *get_outstanding_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ return &state->dispatch_stack[state->stack_top - 1];
+}
+
+static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
+ unsigned int intr_raw)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ gp_regs_t *tgt_gpregs;
+ el3_state_t *tgt_el3;
+
+ assert(tgt_ctx);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ disp_ctx = push_dispatch();
+ assert(disp_ctx);
+ disp_ctx->sec_state = sec_state;
+ disp_ctx->map = map;
+ disp_ctx->intr_raw = intr_raw;
+
+ /* Save general purpose and exception registers */
+ memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
+ disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
+ disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
+}
+
+static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
+{
+ gp_regs_t *tgt_gpregs;
+ el3_state_t *tgt_el3;
+
+ assert(tgt_ctx);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
+ foo);
+
+ /* Restore general purpose and exception registers */
+ memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
+ write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
+ write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
+}
+
+static void save_secure_context(void)
+{
+ cm_el1_sysregs_context_save(SECURE);
+}
+
+/* Restore Secure context and arrange to resume it at the next ERET */
+static void restore_and_resume_secure_context(void)
+{
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+}
+
+/*
+ * Restore Non-secure context and arrange to resume it at the next ERET. Return
+ * pointer to the Non-secure context.
+ */
+static cpu_context_t *restore_and_resume_ns_context(void)
+{
+ cpu_context_t *ns_ctx;
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ ns_ctx = cm_get_context(NON_SECURE);
+ assert(ns_ctx);
+
+ return ns_ctx;
+}
+
+/*
+ * Populate the Non-secure context so that the next ERET will dispatch to the
+ * SDEI client.
+ */
+static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
+ cpu_context_t *ctx, int sec_state_to_resume,
+ unsigned int intr_raw)
+{
+ el3_state_t *el3_ctx = get_el3state_ctx(ctx);
+
+ /* Push the event and context */
+ save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
+
+ /*
+ * Setup handler arguments:
+ *
+ * - x0: Event number
+ * - x1: Handler argument supplied at the time of event registration
+ * - x2: Interrupted PC
+ * - x3: Interrupted SPSR
+ */
+ SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
+ SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
+ SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
+ SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
+
+ /*
+ * Prepare for ERET:
+ *
+ * - Set PC to the registered handler address
+ * - Set SPSR to jump to client EL with exceptions masked
+ */
+ cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
+ SPSR_64(sdei_client_el(), MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS));
+}
+
+/* Handle a triggered SDEI interrupt while events were masked on this PE */
+static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
+ sdei_cpu_state_t *state, unsigned int intr_raw)
+{
+ uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
+ int disable = 0;
+
+ /* Nothing to do for event 0 */
+ if (map->ev_num == SDEI_EVENT_0)
+ return;
+
+ /*
+ * For a private event, or for a shared event specifically routed to
+ * this CPU, we disable interrupt, leave the interrupt pending, and do
+ * EOI.
+ */
+ if (is_event_private(map)) {
+ disable = 1;
+ } else if (se->reg_flags == SDEI_REGF_RM_PE) {
+ assert(se->affinity == my_mpidr);
+ disable = 1;
+ }
+
+ if (disable) {
+ plat_ic_disable_interrupt(map->intr);
+ plat_ic_set_interrupt_pending(map->intr);
+ plat_ic_end_of_interrupt(intr_raw);
+ state->pending_enables = 1;
+
+ return;
+ }
+
+ /*
+ * We just received a shared event with routing set to ANY PE. The
+ * interrupt can't be delegated on this PE as SDEI events are masked.
+ * However, because its routing mode is ANY, it is possible that the
+ * event can be delegated on any other PE that hasn't masked events.
+ * Therefore, we set the interrupt back pending so as to give other
+ * suitable PEs a chance of handling it.
+ */
+ assert(plat_ic_is_spi(map->intr));
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * Leaving the same interrupt pending also means that the same interrupt
+ * can target this PE again as soon as this PE leaves EL3. Whether and
+ * how often that happens depends on the implementation of GIC.
+ *
+ * We therefore call a platform handler to resolve this situation.
+ */
+ plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
+
+ /* This PE is masked. We EOI the interrupt, as it can't be delegated */
+ plat_ic_end_of_interrupt(intr_raw);
+}
+
+/* SDEI main interrupt handler */
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+ void *cookie)
+{
+ sdei_entry_t *se;
+ cpu_context_t *ctx;
+ sdei_ev_map_t *map;
+ sdei_dispatch_context_t *disp_ctx;
+ unsigned int sec_state;
+ sdei_cpu_state_t *state;
+ uint32_t intr;
+
+ /*
+ * To handle an event, the following conditions must be true:
+ *
+ * 1. Event must be signalled
+ * 2. Event must be enabled
+ * 3. This PE must be a target PE for the event
+ * 4. PE must be unmasked for SDEI
+ * 5. If this is a normal event, no event must be running
+ * 6. If this is a critical event, no critical event must be running
+ *
+ * (1) and (2) are true when this function is running
+ * (3) is enforced in GIC by selecting the appropriate routing option
+ * (4) is satisfied by client calling PE_UNMASK
+ * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
+ * - Normal SDEI events belong to Normal SDE priority class
+ * - Critical SDEI events belong to Critical CSDE priority class
+ *
+ * The interrupt has already been acknowledged, and therefore is active,
+ * so no other PE can handle this event while we are at it.
+ *
+ * Find if this is an SDEI interrupt. There must be an event mapped to
+ * this interrupt
+ */
+ intr = plat_ic_get_interrupt_id(intr_raw);
+ map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
+ if (!map) {
+ ERROR("No SDEI map for interrupt %u\n", intr);
+ panic();
+ }
+
+ /*
+ * Received interrupt number must either correspond to event 0, or must
+ * be bound interrupt.
+ */
+ assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
+
+ se = get_event_entry(map);
+ state = sdei_get_this_pe_state();
+
+ if (state->pe_masked == PE_MASKED) {
+ /*
+ * Interrupts received while this PE was masked can't be
+ * dispatched.
+ */
+ SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
+ read_mpidr_el1());
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ handle_masked_trigger(map, se, state, intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ /* Insert load barrier for signalled SDEI event */
+ if (map->ev_num == SDEI_EVENT_0)
+ dmbld();
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Assert shared event routed to this PE had been configured so */
+ if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
+ assert(se->affinity ==
+ (read_mpidr_el1() & MPIDR_AFFINITY_MASK));
+ }
+
+ if (!can_sdei_state_trans(se, DO_DISPATCH)) {
+ SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
+ map->ev_num, se->state);
+
+ /*
+ * If the event is registered, leave the interrupt pending so
+ * that it's delivered when the event is enabled.
+ */
+ if (GET_EV_STATE(se, REGISTERED))
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * The interrupt was disabled or unregistered after the handler
+ * started to execute, which means now the interrupt is already
+ * disabled and we just need to EOI the interrupt.
+ */
+ plat_ic_end_of_interrupt(intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ disp_ctx = get_outstanding_dispatch();
+ if (is_event_critical(map)) {
+ /*
+ * If this event is Critical, and if there's an outstanding
+ * dispatch, assert the latter is a Normal dispatch. Critical
+ * events can preempt an outstanding Normal event dispatch.
+ */
+ if (disp_ctx)
+ assert(is_event_normal(disp_ctx->map));
+ } else {
+ /*
+ * If this event is Normal, assert that there are no outstanding
+ * dispatches. Normal events can't preempt any outstanding event
+ * dispatches.
+ */
+ assert(disp_ctx == NULL);
+ }
+
+ sec_state = get_interrupt_src_ss(flags);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
+ map->ev_num, sec_state, read_spsr_el3(),
+ read_elr_el3());
+
+ ctx = handle;
+
+ /*
+ * Check if we interrupted secure state. Perform a context switch so
+ * that we can delegate to NS.
+ */
+ if (sec_state == SECURE) {
+ save_secure_context();
+ ctx = restore_and_resume_ns_context();
+ }
+
+ setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
+
+ /*
+ * End of interrupt is done in sdei_event_complete, when the client
+ * signals completion.
+ */
+ return 0;
+}
+
+/* Explicitly dispatch the given SDEI event */
+int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ctx;
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_cpu_state_t *state;
+
+ /* Validate preempted security state */
+ if ((preempted_sec_state != SECURE) &&
+ (preempted_sec_state != NON_SECURE)) {
+ return -1;
+ }
+
+ /* Can't dispatch if events are masked on this PE */
+ state = sdei_get_this_pe_state();
+ if (state->pe_masked == PE_MASKED)
+ return -1;
+
+ /* Event 0 can't be dispatched */
+ if (ev_num == SDEI_EVENT_0)
+ return -1;
+
+ /* Locate mapping corresponding to this event */
+ map = find_event_map(ev_num);
+ if (!map)
+ return -1;
+
+ /*
+ * Statically-bound or dynamic maps are dispatched only as a result of
+ * interrupt, and not upon explicit request.
+ */
+ if (is_map_dynamic(map) || is_map_bound(map))
+ return -1;
+
+ /* The event must be private */
+ if (is_event_shared(map))
+ return -1;
+
+ /* Examine state of dispatch stack */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx) {
+ /*
+ * There's an outstanding dispatch. If the outstanding dispatch
+ * is critical, no more dispatches are possible.
+ */
+ if (is_event_critical(disp_ctx->map))
+ return -1;
+
+ /*
+ * If the outstanding dispatch is Normal, only critical events
+ * can be dispatched.
+ */
+ if (is_event_normal(map))
+ return -1;
+ }
+
+ se = get_event_entry(map);
+ if (!can_sdei_state_trans(se, DO_DISPATCH))
+ return -1;
+
+ /* Activate the priority corresponding to the event being dispatched */
+ ehf_activate_priority(sdei_event_priority(map));
+
+ /*
+ * We assume the current context is SECURE, and that it's already been
+ * saved.
+ */
+ ctx = restore_and_resume_ns_context();
+
+ /*
+ * The caller has effectively terminated execution. Record to resume the
+ * preempted context later when the event completes or
+ * complete-and-resumes.
+ */
+ setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0);
+
+ return 0;
+}
+
+int sdei_event_complete(int resume, uint64_t pc)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ctx;
+ sdei_action_t act;
+ unsigned int client_el = sdei_client_el();
+
+ /* Return error if called without an active event */
+ disp_ctx = pop_dispatch();
+ if (!disp_ctx)
+ return SDEI_EDENY;
+
+ /* Validate resumption point */
+ if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
+ return SDEI_EDENY;
+
+ map = disp_ctx->map;
+ assert(map);
+
+ se = get_event_entry(map);
+
+ SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
+ map->ev_num, read_spsr_el3(), read_elr_el3());
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
+ if (!can_sdei_state_trans(se, act)) {
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+ return SDEI_EDENY;
+ }
+
+ /*
+ * Restore Non-secure to how it was originally interrupted. Once done,
+ * it's up-to-date with the saved copy.
+ */
+ ctx = cm_get_context(NON_SECURE);
+ restore_event_ctx(disp_ctx, ctx);
+
+ if (resume) {
+ /*
+ * Complete-and-resume call. Prepare the Non-secure context
+ * (currently active) for complete and resume.
+ */
+ cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
+ MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
+
+ /*
+ * Make it look as if a synchronous exception were taken at the
+ * supplied Non-secure resumption point. Populate SPSR and
+ * ELR_ELx so that an ERET from there works as expected.
+ *
+ * The assumption is that the client, if necessary, would have
+ * saved any live content in these registers before making this
+ * call.
+ */
+ if (client_el == MODE_EL2) {
+ write_elr_el2(disp_ctx->elr_el3);
+ write_spsr_el2(disp_ctx->spsr_el3);
+ } else {
+ /* EL1 */
+ write_elr_el1(disp_ctx->elr_el3);
+ write_spsr_el1(disp_ctx->spsr_el3);
+ }
+ }
+
+ /*
+ * If the cause of dispatch originally interrupted the Secure world, and
+ * if Non-secure world wasn't allowed to preempt Secure execution,
+ * resume Secure.
+ *
+ * No need to save the Non-secure context ahead of a world switch: the
+ * Non-secure context was fully saved before dispatch, and has been
+ * returned to its pre-dispatch state.
+ */
+ if ((disp_ctx->sec_state == SECURE) &&
+ (ehf_is_ns_preemption_allowed() == 0)) {
+ restore_and_resume_secure_context();
+ }
+
+ if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
+ /*
+ * The event was dispatched after receiving SDEI interrupt. With
+ * the event handling completed, EOI the corresponding
+ * interrupt.
+ */
+ plat_ic_end_of_interrupt(disp_ctx->intr_raw);
+ } else {
+ /*
+ * An unbound event must have been dispatched explicitly.
+ * Deactivate the priority level that was activated at the time
+ * of explicit dispatch.
+ */
+ ehf_deactivate_priority(sdei_event_priority(map));
+ }
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+}
+
+int sdei_event_context(void *handle, unsigned int param)
+{
+ sdei_dispatch_context_t *disp_ctx;
+
+ if (param >= SDEI_SAVED_GPREGS)
+ return SDEI_EINVAL;
+
+ /* Get outstanding dispatch on this CPU */
+ disp_ctx = get_outstanding_dispatch();
+ if (!disp_ctx)
+ return SDEI_EDENY;
+
+ assert(disp_ctx->map);
+
+ if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
+ return SDEI_EDENY;
+
+ /*
+ * No locking is required for the Running status as this is the only CPU
+ * which can complete the event
+ */
+
+ return disp_ctx->x[param];
+}
diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c
new file mode 100644
index 00000000..2f08c8ba
--- /dev/null
+++ b/services/std_svc/sdei/sdei_main.c
@@ -0,0 +1,1066 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <bl_common.h>
+#include <cassert.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <pubsub.h>
+#include <runtime_svc.h>
+#include <sdei.h>
+#include <stddef.h>
+#include <string.h>
+#include <utils.h>
+#include "sdei_private.h"
+
+#define MAJOR_VERSION 1
+#define MINOR_VERSION 0
+#define VENDOR_VERSION 0
+
+#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
+ ((((unsigned long long)(_major)) << 48) | \
+ (((unsigned long long)(_minor)) << 32) | \
+ (_vendor))
+
+#define LOWEST_INTR_PRIORITY 0xff
+
+#define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0)
+
+CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
+ sdei_critical_must_have_higher_priority);
+
+static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
+
+/* Initialise SDEI map entries */
+static void init_map(sdei_ev_map_t *map)
+{
+ map->reg_count = 0;
+}
+
+/* Convert mapping to SDEI class */
+sdei_class_t map_to_class(sdei_ev_map_t *map)
+{
+ return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
+}
+
+/* Clear SDEI event entries except state */
+static void clear_event_entries(sdei_entry_t *se)
+{
+ se->ep = 0;
+ se->arg = 0;
+ se->affinity = 0;
+ se->reg_flags = 0;
+}
+
+/* Perform CPU-specific state initialisation */
+static void *sdei_cpu_on_init(const void *arg)
+{
+ int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Initialize private mappings on this CPU */
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ clear_event_entries(se);
+ se->state = 0;
+ }
+
+ SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
+
+ /* All PEs start with SDEI events masked */
+ sdei_pe_mask();
+
+ return 0;
+}
+
+/* Initialise an SDEI class */
+void sdei_class_init(sdei_class_t class)
+{
+ unsigned int i, zero_found __unused = 0;
+ int ev_num_so_far __unused;
+ sdei_ev_map_t *map;
+
+ /* Sanity check and configuration of shared events */
+ ev_num_so_far = -1;
+ for_each_shared_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ /* Event 0 must not be shared */
+ assert(map->ev_num != SDEI_EVENT_0);
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a shared event */
+ assert(is_event_shared(map));
+
+ /* No shared mapping should have signalable property */
+ assert(!is_event_signalable(map));
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_shrd_slots++;
+ } else {
+ /* Shared mappings must be bound to shared interrupt */
+ assert(plat_ic_is_spi(map->intr));
+ set_map_bound(map);
+ }
+
+ init_map(map);
+ }
+
+ /* Sanity check and configuration of private events for this CPU */
+ ev_num_so_far = -1;
+ for_each_private_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ if (map->ev_num == SDEI_EVENT_0) {
+ zero_found = 1;
+
+ /* Event 0 must be a Secure SGI */
+ assert(is_secure_sgi(map->intr));
+
+ /*
+ * Event 0 can have only have signalable flag (apart
+ * from being private
+ */
+ assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
+ SDEI_MAPF_PRIVATE));
+ } else {
+ /* No other mapping should have signalable property */
+ assert(!is_event_signalable(map));
+ }
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a private event */
+ assert(is_event_private(map));
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (map->ev_num != SDEI_EVENT_0) {
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_priv_slots++;
+ } else {
+ /*
+ * Private mappings must be bound to private
+ * interrupt.
+ */
+ assert(plat_ic_is_ppi(map->intr));
+ set_map_bound(map);
+ }
+ }
+
+ init_map(map);
+ }
+
+ /* Ensure event 0 is in the mapping */
+ assert(zero_found);
+
+ sdei_cpu_on_init(NULL);
+}
+
+/* SDEI dispatcher initialisation */
+void sdei_init(void)
+{
+ sdei_class_init(SDEI_CRITICAL);
+ sdei_class_init(SDEI_NORMAL);
+
+ /* Register priority level handlers */
+ ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
+ sdei_intr_handler);
+ ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
+ sdei_intr_handler);
+}
+
+/* Populate SDEI event entry */
+static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
+ unsigned int flags, uint64_t affinity)
+{
+ assert(se != NULL);
+
+ se->ep = ep;
+ se->arg = arg;
+ se->affinity = (affinity & MPIDR_AFFINITY_MASK);
+ se->reg_flags = flags;
+}
+
+static unsigned long long sdei_version(void)
+{
+ return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
+}
+
+/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
+static int validate_flags(uint64_t flags, uint64_t mpidr)
+{
+ /* Validate flags */
+ switch (flags) {
+ case SDEI_REGF_RM_PE:
+ if (!is_valid_affinity(mpidr))
+ return SDEI_EINVAL;
+ break;
+ case SDEI_REGF_RM_ANY:
+ break;
+ default:
+ /* Unknown flags */
+ return SDEI_EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set routing of an SDEI event */
+static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
+{
+ int ret, routing;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ ret = validate_flags(flags, mpidr);
+ if (ret)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ /* The event must not be private */
+ if (is_event_private(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ if (!is_map_bound(map) || is_event_private(map)) {
+ ret = SDEI_EINVAL;
+ goto finish;
+ }
+
+ if (!can_sdei_state_trans(se, DO_ROUTING)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /* Choose appropriate routing */
+ routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY :
+ INTR_ROUTING_MODE_PE;
+
+ /* Update event registration flag */
+ se->reg_flags = flags;
+
+ /*
+ * ROUTING_SET is permissible only when event composite state is
+ * 'registered, disabled, and not running'. This means that the
+ * interrupt is currently disabled, and not active.
+ */
+ plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Register handler and argument for an SDEI event */
+static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
+ uint64_t flags, uint64_t mpidr)
+{
+ int ret;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ sdei_state_t backup_state;
+
+ if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0))
+ return SDEI_EINVAL;
+
+ ret = validate_flags(flags, mpidr);
+ if (ret)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ /* Private events always target the PE */
+ if (is_event_private(map))
+ flags = SDEI_REGF_RM_PE;
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though register operation is per-event (additionally for private
+ * events, registration is required individually), it has to be
+ * serialised with respect to bind/release, which are global operations.
+ * So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ backup_state = se->state;
+ if (!can_sdei_state_trans(se, DO_REGISTER))
+ goto fallback;
+
+ /*
+ * When registering for dynamic events, make sure it's been bound
+ * already. This has to be the case as, without binding, the client
+ * can't know about the event number to register for.
+ */
+ if (is_map_dynamic(map) && !is_map_bound(map))
+ goto fallback;
+
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ /* Meanwhile, did any PE ACK the interrupt? */
+ if (plat_ic_get_interrupt_active(map->intr))
+ goto fallback;
+
+ /* The interrupt must currently owned by Non-secure */
+ if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
+ goto fallback;
+
+ /*
+ * Disable forwarding of new interrupt triggers to CPU
+ * interface.
+ */
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Any events that are triggered after register and before
+ * enable should remain pending. Clear any previous interrupt
+ * triggers which are pending (except for SGIs). This has no
+ * affect on level-triggered interrupts.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ /* Map interrupt to EL3 and program the correct priority */
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
+
+ /* Program the appropriate interrupt priority */
+ plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
+
+ /*
+ * Set the routing mode for shared event as requested. We
+ * already ensure that shared events get bound to SPIs.
+ */
+ if (is_event_shared(map)) {
+ plat_ic_set_spi_routing(map->intr,
+ ((flags == SDEI_REGF_RM_ANY) ?
+ INTR_ROUTING_MODE_ANY :
+ INTR_ROUTING_MODE_PE),
+ (u_register_t) mpidr);
+ }
+ }
+
+ /* Populate event entries */
+ set_sdei_entry(se, ep, arg, flags, mpidr);
+
+ /* Increment register count */
+ map->reg_count++;
+
+ sdei_map_unlock(map);
+
+ return 0;
+
+fallback:
+ /* Reinstate previous state */
+ se->state = backup_state;
+
+ sdei_map_unlock(map);
+
+ return SDEI_EDENY;
+}
+
+/* Enable SDEI event */
+static int sdei_event_enable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret, before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_ENABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Enable interrupt for bound events only if there's a change in enabled
+ * state.
+ */
+ if (is_map_bound(map) && (!before && after))
+ plat_ic_enable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Disable SDEI event */
+static int sdei_event_disable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret, before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_DISABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Disable interrupt for bound events only if there's a change in
+ * enabled state.
+ */
+ if (is_map_bound(map) && (before && !after))
+ plat_ic_disable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query SDEI event information */
+static uint64_t sdei_event_get_info(int ev_num, int info)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ unsigned int flags, registered;
+ uint64_t affinity;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Sample state under lock */
+ registered = GET_EV_STATE(se, REGISTERED);
+ flags = se->reg_flags;
+ affinity = se->affinity;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ switch (info) {
+ case SDEI_INFO_EV_TYPE:
+ return is_event_shared(map);
+
+ case SDEI_INFO_EV_NOT_SIGNALED:
+ return !is_event_signalable(map);
+
+ case SDEI_INFO_EV_PRIORITY:
+ return is_event_critical(map);
+
+ case SDEI_INFO_EV_ROUTING_MODE:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ return (flags == SDEI_REGF_RM_PE);
+
+ case SDEI_INFO_EV_ROUTING_AFF:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ if (flags != SDEI_REGF_RM_PE)
+ return SDEI_EINVAL;
+ return affinity;
+
+ default:
+ return SDEI_EINVAL;
+ }
+}
+
+/* Unregister an SDEI event */
+static int sdei_event_unregister(int ev_num)
+{
+ int ret = 0;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though unregister operation is per-event (additionally for
+ * private events, unregistration is required individually), it has to
+ * be serialised with respect to bind/release, which are global
+ * operations. So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
+ /*
+ * Even if the call is invalid, and the handler is running (for
+ * example, having unregistered from a running handler earlier),
+ * return pending error code; otherwise, return deny.
+ */
+ ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
+
+ goto finish;
+ }
+
+ map->reg_count--;
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Clear pending interrupt. Skip for SGIs as they may not be
+ * cleared on interrupt controllers.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
+ plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
+ }
+
+ clear_event_entries(se);
+
+ /*
+ * If the handler is running at the time of unregister, return the
+ * pending error code.
+ */
+ if (GET_EV_STATE(se, RUNNING))
+ ret = SDEI_EPEND;
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query status of an SDEI event */
+static int sdei_event_status(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_state_t state;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* State value directly maps to the expected return format */
+ state = se->state;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return state;
+}
+
+/* Bind an SDEI event to an interrupt */
+static int sdei_interrupt_bind(int intr_num)
+{
+ sdei_ev_map_t *map;
+ int retry = 1, shared_mapping;
+
+ /* SGIs are not allowed to be bound */
+ if (plat_ic_is_sgi(intr_num))
+ return SDEI_EINVAL;
+
+ shared_mapping = plat_ic_is_spi(intr_num);
+ do {
+ /*
+ * Bail out if there is already an event for this interrupt,
+ * either platform-defined or dynamic.
+ */
+ map = find_event_map_by_intr(intr_num, shared_mapping);
+ if (map) {
+ if (is_map_dynamic(map)) {
+ if (is_map_bound(map)) {
+ /*
+ * Dynamic event, already bound. Return
+ * event number.
+ */
+ return map->ev_num;
+ }
+ } else {
+ /* Binding non-dynamic event */
+ return SDEI_EINVAL;
+ }
+ }
+
+ /*
+ * The interrupt is not bound yet. Try to find a free slot to
+ * bind it. Free dynamic mappings have their interrupt set as
+ * SDEI_DYN_IRQ.
+ */
+ map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
+ if (!map)
+ return SDEI_ENOMEM;
+
+ /* The returned mapping must be dynamic */
+ assert(is_map_dynamic(map));
+
+ /*
+ * We cannot assert for bound maps here, as we might be racing
+ * with another bind.
+ */
+
+ /* The requested interrupt must already belong to NS */
+ if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
+ return SDEI_EDENY;
+
+ /*
+ * Interrupt programming and ownership transfer are deferred
+ * until register.
+ */
+
+ sdei_map_lock(map);
+ if (!is_map_bound(map)) {
+ map->intr = intr_num;
+ set_map_bound(map);
+ retry = 0;
+ }
+ sdei_map_unlock(map);
+ } while (retry);
+
+ return map->ev_num;
+}
+
+/* Release a bound SDEI event previously to an interrupt */
+static int sdei_interrupt_release(int ev_num)
+{
+ int ret = 0;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (!map)
+ return SDEI_EINVAL;
+
+ if (!is_map_dynamic(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ /* Event must have been unregistered before release */
+ if (map->reg_count != 0) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt release never causes the state to change. We only check
+ * whether it's permissible or not.
+ */
+ if (!can_sdei_state_trans(se, DO_RELEASE)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ if (is_map_bound(map)) {
+ /*
+ * Deny release if the interrupt is active, which means it's
+ * probably being acknowledged and handled elsewhere.
+ */
+ if (plat_ic_get_interrupt_active(map->intr)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt programming and ownership transfer are already done
+ * during unregister.
+ */
+
+ map->intr = SDEI_DYN_IRQ;
+ clr_map_bound(map);
+ } else {
+ SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
+ map->reg_count);
+ ret = SDEI_EINVAL;
+ }
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Perform reset of private SDEI events */
+static int sdei_private_reset(void)
+{
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0, i;
+
+ /* Unregister all private events */
+ for_each_private_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = ret;
+ }
+
+ return final_ret;
+}
+
+/* Perform reset of shared SDEI events */
+static int sdei_shared_reset(void)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0, i, j;
+
+ /* Unregister all shared events */
+ for_each_shared_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = ret;
+ }
+
+ if (final_ret != 0)
+ return final_ret;
+
+ /*
+ * Loop through both private and shared mappings, and release all
+ * bindings.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ /*
+ * Release bindings for mappings that are dynamic and
+ * bound.
+ */
+ if (is_map_dynamic(map) && is_map_bound(map)) {
+ /*
+ * Any failure to release would mean there is at
+ * least a PE registered for the event.
+ */
+ ret = sdei_interrupt_release(map->ev_num);
+ if ((ret != 0) && (final_ret == 0))
+ final_ret = ret;
+ }
+ }
+ }
+
+ return final_ret;
+}
+
+/* Send a signal to another SDEI client PE */
+int sdei_signal(int event, uint64_t target_pe)
+{
+ sdei_ev_map_t *map;
+
+ /* Only event 0 can be signalled */
+ if (event != SDEI_EVENT_0)
+ return SDEI_EINVAL;
+
+ /* Find mapping for event 0 */
+ map = find_event_map(SDEI_EVENT_0);
+ if (!map)
+ return SDEI_EINVAL;
+
+ /* The event must be signalable */
+ if (!is_event_signalable(map))
+ return SDEI_EINVAL;
+
+ /* Validate target */
+ if (plat_core_pos_by_mpidr(target_pe) < 0)
+ return SDEI_EINVAL;
+
+ /* Raise SGI. Platform will validate target_pe */
+ plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe);
+
+ return 0;
+}
+
+/* Query SDEI dispatcher features */
+uint64_t sdei_features(unsigned int feature)
+{
+ if (feature == SDEI_FEATURE_BIND_SLOTS) {
+ return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
+ num_dyn_shrd_slots);
+ }
+
+ return SDEI_EINVAL;
+}
+
+/* SDEI top level handler for servicing SMCs */
+uint64_t sdei_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+
+ uint64_t x5;
+ int ss = get_interrupt_src_ss(flags);
+ int64_t ret;
+ unsigned int resume = 0;
+
+ if (ss != NON_SECURE)
+ SMC_RET1(handle, SMC_UNK);
+
+ /* Verify the caller EL */
+ if (GET_EL(read_spsr_el3()) != sdei_client_el())
+ SMC_RET1(handle, SMC_UNK);
+
+ switch (smc_fid) {
+ case SDEI_VERSION:
+ SDEI_LOG("> VER\n");
+ ret = sdei_version();
+ SDEI_LOG("< VER:%lx\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_REGISTER:
+ x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+ SDEI_LOG("> REG(n:%d e:%lx a:%lx f:%x m:%lx)\n", (int) x1,
+ x2, x3, (int) x4, x5);
+ ret = sdei_event_register(x1, x2, x3, x4, x5);
+ SDEI_LOG("< REG:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_ENABLE:
+ SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
+ ret = sdei_event_enable(x1);
+ SDEI_LOG("< ENABLE:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_DISABLE:
+ SDEI_LOG("> DISABLE(n:%d)\n", (int) x1);
+ ret = sdei_event_disable(x1);
+ SDEI_LOG("< DISABLE:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_CONTEXT:
+ SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
+ ret = sdei_event_context(handle, x1);
+ SDEI_LOG("< CTX:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_COMPLETE_AND_RESUME:
+ resume = 1;
+ /* Fall through */
+
+ case SDEI_EVENT_COMPLETE:
+ SDEI_LOG("> COMPLETE(r:%d sta/ep:%lx):%lx\n", resume, x1,
+ read_mpidr_el1());
+ ret = sdei_event_complete(resume, x1);
+ SDEI_LOG("< COMPLETE:%lx\n", ret);
+
+ /*
+ * Set error code only if the call failed. If the call
+ * succeeded, we discard the dispatched context, and restore the
+ * interrupted context to a pristine condition, and therefore
+ * shouldn't be modified. We don't return to the caller in this
+ * case anyway.
+ */
+ if (ret)
+ SMC_RET1(handle, ret);
+
+ SMC_RET0(handle);
+ break;
+
+ case SDEI_EVENT_STATUS:
+ SDEI_LOG("> STAT(n:%d)\n", (int) x1);
+ ret = sdei_event_status(x1);
+ SDEI_LOG("< STAT:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_GET_INFO:
+ SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2);
+ ret = sdei_event_get_info(x1, x2);
+ SDEI_LOG("< INFO:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_UNREGISTER:
+ SDEI_LOG("> UNREG(n:%d)\n", (int) x1);
+ ret = sdei_event_unregister(x1);
+ SDEI_LOG("< UNREG:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_PE_UNMASK:
+ SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
+ sdei_pe_unmask();
+ SDEI_LOG("< UNMASK:%d\n", 0);
+ SMC_RET1(handle, 0);
+ break;
+
+ case SDEI_PE_MASK:
+ SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
+ ret = sdei_pe_mask();
+ SDEI_LOG("< MASK:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_INTERRUPT_BIND:
+ SDEI_LOG("> BIND(%d)\n", (int) x1);
+ ret = sdei_interrupt_bind(x1);
+ SDEI_LOG("< BIND:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_INTERRUPT_RELEASE:
+ SDEI_LOG("> REL(%d)\n", (int) x1);
+ ret = sdei_interrupt_release(x1);
+ SDEI_LOG("< REL:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_SHARED_RESET:
+ SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_shared_reset();
+ SDEI_LOG("< S_RESET:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_PRIVATE_RESET:
+ SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_private_reset();
+ SDEI_LOG("< P_RESET:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_ROUTING_SET:
+ SDEI_LOG("> ROUTE_SET(n:%d f:%lx aff:%lx)\n", (int) x1, x2, x3);
+ ret = sdei_event_routing_set(x1, x2, x3);
+ SDEI_LOG("< ROUTE_SET:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_FEATURES:
+ SDEI_LOG("> FTRS(f:%lx)\n", x1);
+ ret = sdei_features(x1);
+ SDEI_LOG("< FTRS:%lx\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+
+ case SDEI_EVENT_SIGNAL:
+ SDEI_LOG("> SIGNAL(e:%lx t:%lx)\n", x1, x2);
+ ret = sdei_signal(x1, x2);
+ SDEI_LOG("< SIGNAL:%ld\n", ret);
+ SMC_RET1(handle, ret);
+ break;
+ default:
+ break;
+ }
+
+ WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+}
+
+/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
diff --git a/services/std_svc/sdei/sdei_private.h b/services/std_svc/sdei/sdei_private.h
new file mode 100644
index 00000000..44db4193
--- /dev/null
+++ b/services/std_svc/sdei/sdei_private.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SDEI_PRIVATE_H__
+#define __SDEI_PRIVATE_H__
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errno.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <sdei.h>
+#include <spinlock.h>
+#include <stdbool.h>
+#include <types.h>
+#include <utils_def.h>
+
+#ifdef AARCH32
+# error SDEI is implemented only for AArch64 systems
+#endif
+
+#ifndef PLAT_SDEI_CRITICAL_PRI
+# error Platform must define SDEI critical priority value
+#endif
+
+#ifndef PLAT_SDEI_NORMAL_PRI
+# error Platform must define SDEI normal priority value
+#endif
+
+/* Output SDEI logs as verbose */
+#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
+
+/* SDEI handler unregistered state. This is the default state. */
+#define SDEI_STATE_UNREGISTERED 0
+
+/* SDE event status values in bit position */
+#define SDEI_STATF_REGISTERED 0
+#define SDEI_STATF_ENABLED 1
+#define SDEI_STATF_RUNNING 2
+
+/* SDEI SMC error codes */
+#define SDEI_EINVAL (-2)
+#define SDEI_EDENY (-3)
+#define SDEI_EPEND (-5)
+#define SDEI_ENOMEM (-10)
+
+/*
+ * 'info' parameter to SDEI_EVENT_GET_INFO SMC.
+ *
+ * Note that the SDEI v1.0 speification mistakenly enumerates the
+ * SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
+ * future version.
+ */
+#define SDEI_INFO_EV_TYPE 0
+#define SDEI_INFO_EV_NOT_SIGNALED 1
+#define SDEI_INFO_EV_PRIORITY 2
+#define SDEI_INFO_EV_ROUTING_MODE 3
+#define SDEI_INFO_EV_ROUTING_AFF 4
+
+#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_PRIV])
+#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_SHRD])
+
+#define for_each_mapping_type(_i, _mapping) \
+ for (_i = 0, _mapping = &sdei_global_mappings[i]; \
+ _i < _SDEI_MAP_IDX_MAX; \
+ _i++, _mapping = &sdei_global_mappings[i])
+
+#define iterate_mapping(_mapping, _i, _map) \
+ for (_map = (_mapping)->map, _i = 0; \
+ _i < (_mapping)->num_maps; \
+ _i++, _map++)
+
+#define for_each_private_map(_i, _map) \
+ iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
+
+#define for_each_shared_map(_i, _map) \
+ iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
+
+/* SDEI_FEATURES */
+#define SDEI_FEATURE_BIND_SLOTS 0
+#define BIND_SLOTS_MASK 0xffff
+#define FEATURES_SHARED_SLOTS_SHIFT 16
+#define FEATURES_PRIVATE_SLOTS_SHIFT 0
+#define FEATURE_BIND_SLOTS(_priv, _shrd) \
+ ((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
+ (((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
+
+#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
+#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
+
+static inline int is_event_private(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT(_SDEI_MAPF_PRIVATE_SHIFT)) != 0);
+}
+
+static inline int is_event_shared(sdei_ev_map_t *map)
+{
+ return !is_event_private(map);
+}
+
+static inline int is_event_critical(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT(_SDEI_MAPF_CRITICAL_SHIFT)) != 0);
+}
+
+static inline int is_event_normal(sdei_ev_map_t *map)
+{
+ return !is_event_critical(map);
+}
+
+static inline int is_event_signalable(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)) != 0);
+}
+
+static inline int is_map_dynamic(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT(_SDEI_MAPF_DYNAMIC_SHIFT)) != 0);
+}
+
+/*
+ * Checks whether an event is associated with an interrupt. Static events always
+ * return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
+ * called on them. This can be used on both static or dynamic events to check
+ * for an associated interrupt.
+ */
+static inline int is_map_bound(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT(_SDEI_MAPF_BOUND_SHIFT)) != 0);
+}
+
+static inline void set_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags |= BIT(_SDEI_MAPF_BOUND_SHIFT);
+}
+
+static inline void clr_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags &= ~(BIT(_SDEI_MAPF_BOUND_SHIFT));
+}
+
+static inline int is_secure_sgi(unsigned int intr)
+{
+ return (plat_ic_is_sgi(intr) &&
+ (plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
+}
+
+/*
+ * Determine EL of the client. If EL2 is implemented (hence the enabled HCE
+ * bit), deem EL2; otherwise, deem EL1.
+ */
+static inline unsigned int sdei_client_el(void)
+{
+ return read_scr_el3() & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+}
+
+static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
+{
+ return is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
+ PLAT_SDEI_NORMAL_PRI;
+}
+
+static inline int get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ return ((se->state & BIT(bit_no)) != 0);
+}
+
+static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ se->state &= ~BIT(bit_no);
+}
+
+/* SDEI actions for state transition */
+typedef enum {
+ /*
+ * Actions resulting from client requests. These directly map to SMC
+ * calls. Note that the state table columns are listed in this order
+ * too.
+ */
+ DO_REGISTER = 0,
+ DO_RELEASE = 1,
+ DO_ENABLE = 2,
+ DO_DISABLE = 3,
+ DO_UNREGISTER = 4,
+ DO_ROUTING = 5,
+ DO_CONTEXT = 6,
+ DO_COMPLETE = 7,
+ DO_COMPLETE_RESUME = 8,
+
+ /* Action for event dispatch */
+ DO_DISPATCH = 9,
+
+ DO_MAX,
+} sdei_action_t;
+
+typedef enum {
+ SDEI_NORMAL,
+ SDEI_CRITICAL
+} sdei_class_t;
+
+static inline void sdei_map_lock(sdei_ev_map_t *map)
+{
+ spin_lock(&map->lock);
+}
+
+static inline void sdei_map_unlock(sdei_ev_map_t *map)
+{
+ spin_unlock(&map->lock);
+}
+
+extern const sdei_mapping_t sdei_global_mappings[];
+extern sdei_entry_t sdei_private_event_table[];
+extern sdei_entry_t sdei_shared_event_table[];
+
+void init_sdei_state(void);
+
+sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared);
+sdei_ev_map_t *find_event_map(int ev_num);
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
+
+int sdei_event_context(void *handle, unsigned int param);
+int sdei_event_complete(int resume, uint64_t arg);
+
+void sdei_pe_unmask(void);
+unsigned int sdei_pe_mask(void);
+
+int sdei_intr_handler(uint32_t intr, uint32_t flags, void *handle,
+ void *cookie);
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
+
+#endif /* __SDEI_PRIVATE_H__ */
diff --git a/services/std_svc/sdei/sdei_state.c b/services/std_svc/sdei/sdei_state.c
new file mode 100644
index 00000000..3f60dfd8
--- /dev/null
+++ b/services/std_svc/sdei/sdei_state.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <cassert.h>
+#include <stdbool.h>
+#include "sdei_private.h"
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_ 0
+#define R_ (1u << SDEI_STATF_RUNNING)
+
+#define e_ 0
+#define E_ (1u << SDEI_STATF_ENABLED)
+
+#define g_ 0
+#define G_ (1u << SDEI_STATF_REGISTERED)
+
+/* All possible composite handler states */
+#define reg_ (r_ | e_ | g_)
+#define reG_ (r_ | e_ | G_)
+#define rEg_ (r_ | E_ | g_)
+#define rEG_ (r_ | E_ | G_)
+#define Reg_ (R_ | e_ | g_)
+#define ReG_ (R_ | e_ | G_)
+#define REg_ (R_ | E_ | g_)
+#define REG_ (R_ | E_ | G_)
+
+#define MAX_STATES (REG_ + 1)
+
+/* Invalid state */
+#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
+
+/* No change in state */
+#define SDEI_STATE_NOP ((sdei_state_t) (-2))
+
+#define X___ SDEI_STATE_INVALID
+#define NOP_ SDEI_STATE_NOP
+
+/* Ensure special states don't overlap with valid ones */
+CASSERT(X___ > REG_, sdei_state_overlap_invalid);
+CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
+
+/*
+ * SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
+ * specification:
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
+ *
+ * Not all calls contribute to handler state transition. This table is also used
+ * to validate whether a call is permissible at a given handler state:
+ *
+ * - X___ denotes a forbidden transition;
+ * - NOP_ denotes a permitted transition, but there's no change in state;
+ * - Otherwise, XXX_ gives the new state.
+ *
+ * DISP[atch] is a transition added for the implementation, but is not mentioned
+ * in the spec.
+ *
+ * Those calls that the spec mentions as can be made any time don't picture in
+ * this table.
+ */
+
+static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
+/*
+ * Action: REG REL ENA DISA UREG ROUT CTX COMP COMPR DISP
+ * Notes: [3] [1] [3] [3][4] [2]
+ */
+ /* Handler unregistered, disabled, and not running. This is the default state. */
+/* 0 */ [reg_] = { reG_, NOP_, X___, X___, X___, X___, X___, X___, X___, X___, },
+
+ /* Handler unregistered and running */
+/* 4 */ [Reg_] = { X___, X___, X___, X___, X___, X___, NOP_, reg_, reg_, X___, },
+
+ /* Handler registered */
+/* 1 */ [reG_] = { X___, X___, rEG_, NOP_, reg_, NOP_, X___, X___, X___, X___, },
+
+ /* Handler registered and running */
+/* 5 */ [ReG_] = { X___, X___, REG_, NOP_, Reg_, X___, NOP_, reG_, reG_, X___, },
+
+ /* Handler registered and enabled */
+/* 3 */ [rEG_] = { X___, X___, NOP_, reG_, reg_, X___, X___, X___, X___, REG_, },
+
+ /* Handler registered, enabled, and running */
+/* 7 */ [REG_] = { X___, X___, NOP_, ReG_, Reg_, X___, NOP_, rEG_, rEG_, X___, },
+
+ /*
+ * Invalid states: no valid transition would leave the handler in these
+ * states; and no transition from these states is possible either.
+ */
+
+ /*
+ * Handler can't be enabled without being registered. I.e., XEg is
+ * impossible.
+ */
+/* 2 */ [rEg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+/* 6 */ [REg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+};
+
+/*
+ * [1] Unregister will always also disable the event, so the new state will have
+ * Xeg.
+ * [2] Event is considered for dispatch only when it's both registered and
+ * enabled.
+ * [3] Never causes change in state.
+ * [4] Only allowed when running.
+ */
+
+/*
+ * Given an action, transition the state of an event by looking up the state
+ * table above:
+ *
+ * - Return false for invalid transition;
+ * - Return true for valid transition that causes no change in state;
+ * - Otherwise, update state and return true.
+ *
+ * This function assumes that the caller holds necessary locks. If the
+ * transition has constrains other than the state table describes, the caller is
+ * expected to restore the previous state. See sdei_event_register() for
+ * example.
+ */
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
+{
+ sdei_state_t next;
+
+ assert(act < DO_MAX);
+ if (se->state >= MAX_STATES) {
+ WARN(" event state invalid: %x\n", se->state);
+ return false;
+ }
+
+ next = sdei_state_table[se->state][act];
+ switch (next) {
+ case SDEI_STATE_INVALID:
+ return false;
+
+ case SDEI_STATE_NOP:
+ return true;
+
+ default:
+ /* Valid transition. Update state. */
+ SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
+ se->state = next;
+
+ return true;
+ }
+}
diff --git a/services/std_svc/spm/aarch64/spm_helpers.S b/services/std_svc/spm/aarch64/spm_helpers.S
new file mode 100644
index 00000000..aa35811f
--- /dev/null
+++ b/services/std_svc/spm/aarch64/spm_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spm_private.h"
+
+ .global spm_secure_partition_enter
+ .global spm_secure_partition_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SP_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SP_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SP_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SP_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SP_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SP_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SP_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spm_secure_partition_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context
+ * saved in spm_secure_partition_enter().
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spm_secure_partition_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SP_C_RT_CTX_X19 - SP_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SP_C_RT_CTX_X21 - SP_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SP_C_RT_CTX_X23 - SP_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SP_C_RT_CTX_X25 - SP_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SP_C_RT_CTX_X27 - SP_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SP_C_RT_CTX_X29 - SP_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spm_secure_partition_exit
diff --git a/services/std_svc/spm/aarch64/spm_shim_exceptions.S b/services/std_svc/spm/aarch64/spm_shim_exceptions.S
new file mode 100644
index 00000000..218245d8
--- /dev/null
+++ b/services/std_svc/spm/aarch64/spm_shim_exceptions.S
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+ .globl spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
+ b .
+ check_vector_size SynchronousExceptionSP0
+
+vector_entry IrqSP0, .spm_shim_exceptions
+ b .
+ check_vector_size IrqSP0
+
+vector_entry FiqSP0, .spm_shim_exceptions
+ b .
+ check_vector_size FiqSP0
+
+vector_entry SErrorSP0, .spm_shim_exceptions
+ b .
+ check_vector_size SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
+ b .
+ check_vector_size SynchronousExceptionSPx
+
+vector_entry IrqSPx, .spm_shim_exceptions
+ b .
+ check_vector_size IrqSPx
+
+vector_entry FiqSPx, .spm_shim_exceptions
+ b .
+ check_vector_size FiqSPx
+
+vector_entry SErrorSPx, .spm_shim_exceptions
+ b .
+ check_vector_size SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since secure_partition does not implement
+ * a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA64, .spm_shim_exceptions
+ msr tpidr_el1, x30
+ mrs x30, esr_el1
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH64_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH32_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq handle_sys_trap
+
+ /* Fail in all the other cases */
+ b panic
+
+ /* ---------------------------------------------
+ * Tell SPM that we are done initialising
+ * ---------------------------------------------
+ */
+do_smc:
+ mrs x30, tpidr_el1
+ smc #0
+ eret
+
+ /* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+ b panic
+ check_vector_size SynchronousExceptionA64
+
+vector_entry IrqA64, .spm_shim_exceptions
+ b .
+ check_vector_size IrqA64
+
+vector_entry FiqA64, .spm_shim_exceptions
+ b .
+ check_vector_size FiqA64
+
+vector_entry SErrorA64, .spm_shim_exceptions
+ b .
+ check_vector_size SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA32, .spm_shim_exceptions
+ b .
+ check_vector_size SynchronousExceptionA32
+
+vector_entry IrqA32, .spm_shim_exceptions
+ b .
+ check_vector_size IrqA32
+
+vector_entry FiqA32, .spm_shim_exceptions
+ b .
+ check_vector_size FiqA32
+
+vector_entry SErrorA32, .spm_shim_exceptions
+ b .
+ check_vector_size SErrorA32
diff --git a/services/std_svc/spm/secure_partition_setup.c b/services/std_svc/spm/secure_partition_setup.c
new file mode 100644
index 00000000..67301608
--- /dev/null
+++ b/services/std_svc/spm/secure_partition_setup.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_spm_def.h>
+#include <assert.h>
+#include <common_def.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <platform.h>
+#include <secure_partition.h>
+#include <string.h>
+#include <types.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+#include "spm_shim_private.h"
+
+/* Allocate and initialise the translation context for the secure partition. */
+REGISTER_XLAT_CONTEXT2(secure_partition,
+ PLAT_SP_IMAGE_MMAP_REGIONS,
+ PLAT_SP_IMAGE_MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
+ EL1_EL0_REGIME);
+
+/* Export a handle on the secure partition translation context */
+xlat_ctx_t *secure_partition_xlat_ctx_handle = &secure_partition_xlat_ctx;
+
+/* Setup context of the Secure Partition */
+void secure_partition_setup(void)
+{
+ VERBOSE("S-EL1/S-EL0 context setup start...\n");
+
+ cpu_context_t *ctx = cm_get_context(SECURE);
+
+ /* Make sure that we got a Secure context. */
+ assert(ctx != NULL);
+
+ /* Assert we are in Secure state. */
+ assert((read_scr_el3() & SCR_NS_BIT) == 0);
+
+ /* Disable MMU at EL1. */
+ disable_mmu_icache_el1();
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+
+ /*
+ * General-Purpose registers
+ * -------------------------
+ */
+
+ /*
+ * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
+ * The buffer will be mapped in the Secure EL1 translation regime
+ * with Normal IS WBWA attributes and RO data and Execute Never
+ * instruction access permissions.
+ *
+ * X1: Size of the buffer in bytes
+ *
+ * X2: cookie value (Implementation Defined)
+ *
+ * X3: cookie value (Implementation Defined)
+ *
+ * X4 to X30 = 0 (already done by cm_init_my_context())
+ */
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE);
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE);
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0);
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1);
+
+ /*
+ * SP_EL0: A non-zero value will indicate to the SP that the SPM has
+ * initialized the stack pointer for the current CPU through
+ * implementation defined means. The value will be 0 otherwise.
+ */
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
+ PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE);
+
+ /*
+ * Setup translation tables
+ * ------------------------
+ */
+
+#if ENABLE_ASSERTIONS
+
+ /* Get max granularity supported by the platform. */
+
+ u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1();
+
+ int tgran64_supported =
+ ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+
+ int tgran16_supported =
+ ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
+
+ int tgran4_supported =
+ ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
+ ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
+ ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
+
+ uintptr_t max_granule_size;
+
+ if (tgran64_supported) {
+ max_granule_size = 64 * 1024;
+ } else if (tgran16_supported) {
+ max_granule_size = 16 * 1024;
+ } else {
+ assert(tgran4_supported);
+ max_granule_size = 4 * 1024;
+ }
+
+ VERBOSE("Max translation granule supported: %lu KiB\n",
+ max_granule_size);
+
+ uintptr_t max_granule_size_mask = max_granule_size - 1;
+
+ /* Base must be aligned to the max granularity */
+ assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0);
+
+ /* Size must be a multiple of the max granularity */
+ assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0);
+
+#endif /* ENABLE_ASSERTIONS */
+
+ /* This region contains the exception vectors used at S-EL1. */
+ const mmap_region_t sel1_exception_vectors =
+ MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
+ SPM_SHIM_EXCEPTIONS_SIZE,
+ MT_CODE | MT_SECURE | MT_PRIVILEGED);
+ mmap_add_region_ctx(&secure_partition_xlat_ctx,
+ &sel1_exception_vectors);
+
+ mmap_add_ctx(&secure_partition_xlat_ctx,
+ plat_get_secure_partition_mmap(NULL));
+
+ init_xlat_tables_ctx(&secure_partition_xlat_ctx);
+
+ /*
+ * MMU-related registers
+ * ---------------------
+ */
+
+ /* Set attributes in the right indices of the MAIR */
+ u_register_t mair_el1 =
+ MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) |
+ MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) |
+ MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1);
+
+ /* Setup TCR_EL1. */
+ u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);
+
+ u_register_t tcr_el1 =
+ /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
+ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) |
+ /* Inner and outer WBWA, shareable. */
+ TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |
+ /* Set the granularity to 4KB. */
+ TCR_TG0_4K |
+ /* Limit Intermediate Physical Address Size. */
+ tcr_ps_bits << TCR_EL1_IPS_SHIFT |
+ /* Disable translations using TBBR1_EL1. */
+ TCR_EPD1_BIT
+ /* The remaining fields related to TBBR1_EL1 are left as zero. */
+ ;
+
+ tcr_el1 &= ~(
+ /* Enable translations using TBBR0_EL1 */
+ TCR_EPD0_BIT
+ );
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1);
+
+ /* Setup SCTLR_EL1 */
+ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+ sctlr_el1 |=
+ /*SCTLR_EL1_RES1 |*/
+ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+ SCTLR_UCI_BIT |
+ /* RW regions at xlat regime EL1&0 are forced to be XN. */
+ SCTLR_WXN_BIT |
+ /* Don't trap to EL1 execution of WFI or WFE at EL0. */
+ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
+ /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+ SCTLR_UCT_BIT |
+ /* Don't trap to EL1 execution of DZ ZVA at EL0. */
+ SCTLR_DZE_BIT |
+ /* Enable SP Alignment check for EL0 */
+ SCTLR_SA0_BIT |
+ /* Allow cacheable data and instr. accesses to normal memory. */
+ SCTLR_C_BIT | SCTLR_I_BIT |
+ /* Alignment fault checking enabled when at EL1 and EL0. */
+ SCTLR_A_BIT |
+ /* Enable MMU. */
+ SCTLR_M_BIT
+ ;
+
+ sctlr_el1 &= ~(
+ /* Explicit data accesses at EL0 are little-endian. */
+ SCTLR_E0E_BIT |
+ /* Accesses to DAIF from EL0 are trapped to EL1. */
+ SCTLR_UMA_BIT
+ );
+
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+
+ /* Point TTBR0_EL1 at the tables of the context created for the SP. */
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ (u_register_t)secure_partition_base_xlat_table);
+
+ /*
+ * Setup other system registers
+ * ----------------------------
+ */
+
+ /* Shim Exception Vector Base Address */
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ SPM_SHIM_EXCEPTIONS_PTR);
+
+ /*
+ * FPEN: Forbid the Secure Partition to access FP/SIMD registers.
+ * TTA: Enable access to trace registers.
+ * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ */
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL));
+
+ /*
+ * Prepare information in buffer shared between EL3 and S-EL0
+ * ----------------------------------------------------------
+ */
+
+ void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE;
+
+ /* Copy the boot information into the shared buffer with the SP. */
+ assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)
+ <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE));
+
+ assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1));
+
+ const secure_partition_boot_info_t *sp_boot_info =
+ plat_get_secure_partition_boot_info(NULL);
+
+ assert(sp_boot_info != NULL);
+
+ memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
+ sizeof(secure_partition_boot_info_t));
+
+ /* Pointer to the MP information from the platform port. */
+ secure_partition_mp_info_t *sp_mp_info =
+ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ assert(sp_mp_info != NULL);
+
+ /*
+ * Point the shared buffer MP information pointer to where the info will
+ * be populated, just after the boot info.
+ */
+ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info =
+ (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr
+ + sizeof(secure_partition_boot_info_t));
+
+ /*
+ * Update the shared buffer pointer to where the MP information for the
+ * payload will be populated
+ */
+ shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;
+
+ /*
+ * Copy the cpu information into the shared buffer area after the boot
+ * information.
+ */
+ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);
+
+ assert((uintptr_t)shared_buf_ptr
+ <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE -
+ (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));
+
+ memcpy(shared_buf_ptr, (const void *) sp_mp_info,
+ sp_boot_info->num_cpus * sizeof(*sp_mp_info));
+
+ /*
+ * Calculate the linear indices of cores in boot information for the
+ * secure partition and flag the primary CPU
+ */
+ sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr;
+
+ for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
+ u_register_t mpidr = sp_mp_info[index].mpidr;
+
+ sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
+ if (plat_my_core_pos() == sp_mp_info[index].linear_id)
+ sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
+ }
+
+ VERBOSE("S-EL1/S-EL0 context setup end.\n");
+}
diff --git a/services/std_svc/spm/spm.mk b/services/std_svc/spm/spm.mk
new file mode 100644
index 00000000..562eaee4
--- /dev/null
+++ b/services/std_svc/spm/spm.mk
@@ -0,0 +1,25 @@
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${SPD},none)
+ $(error "Error: SPD and SPM are incompatible build options.")
+endif
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPM is only supported on aarch64.")
+endif
+
+# SPM sources
+
+
+SPM_SOURCES := $(addprefix services/std_svc/spm/, \
+ spm_main.c \
+ ${ARCH}/spm_helpers.S \
+ secure_partition_setup.c \
+ ${ARCH}/spm_shim_exceptions.S)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
diff --git a/services/std_svc/spm/spm_main.c b/services/std_svc/spm/spm_main.c
new file mode 100644
index 00000000..1b40d81d
--- /dev/null
+++ b/services/std_svc/spm/spm_main.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <secure_partition.h>
+#include <smcc.h>
+#include <smcc_helpers.h>
+#include <spinlock.h>
+#include <spm_svc.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+
+/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
+static spinlock_t mem_attr_smc_lock;
+
+/*******************************************************************************
+ * Secure Partition context information.
+ ******************************************************************************/
+static secure_partition_context_t sp_ctx;
+unsigned int sp_init_in_progress;
+
+/*******************************************************************************
+ * Replace the S-EL1 re-entry information with S-EL0 re-entry
+ * information
+ ******************************************************************************/
+void spm_setup_next_eret_into_sel0(cpu_context_t *secure_context)
+{
+ assert(secure_context == cm_get_context(SECURE));
+
+ cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and:
+ * 1. Applies the S-EL1 system register context from sp_ctx->cpu_ctx.
+ * 2. Saves the current C runtime state (callee-saved registers) on the stack
+ * frame and saves a reference to this state.
+ * 3. Calls el3_exit() so that the EL3 system and general purpose registers
+ * from the sp_ctx->cpu_ctx are used to enter the secure payload image.
+ ******************************************************************************/
+static uint64_t spm_synchronous_sp_entry(secure_partition_context_t *sp_ctx_ptr)
+{
+ uint64_t rc;
+
+ assert(sp_ctx_ptr != NULL);
+ assert(sp_ctx_ptr->c_rt_ctx == 0);
+ assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
+
+ /* Apply the Secure EL1 system register context and switch to it */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ VERBOSE("%s: We're about to enter the Secure partition...\n", __func__);
+
+ rc = spm_secure_partition_enter(&sp_ctx_ptr->c_rt_ctx);
+#if ENABLE_ASSERTIONS
+ sp_ctx_ptr->c_rt_ctx = 0;
+#endif
+
+ return rc;
+}
+
+
+/*******************************************************************************
+ * This function takes a Secure partition context pointer and:
+ * 1. Saves the S-EL1 system register context tp sp_ctx->cpu_ctx.
+ * 2. Restores the current C runtime state (callee saved registers) from the
+ * stack frame using the reference to this state saved in
+ * spm_secure_partition_enter().
+ * 3. It does not need to save any general purpose or EL3 system register state
+ * as the generic smc entry routine should have saved those.
+ ******************************************************************************/
+static void __dead2 spm_synchronous_sp_exit(
+ secure_partition_context_t *sp_ctx_ptr, uint64_t ret)
+{
+ assert(sp_ctx_ptr != NULL);
+ /* Save the Secure EL1 system register context */
+ assert(cm_get_context(SECURE) == &sp_ctx_ptr->cpu_ctx);
+ cm_el1_sysregs_context_save(SECURE);
+
+ assert(sp_ctx_ptr->c_rt_ctx != 0);
+ spm_secure_partition_exit(sp_ctx_ptr->c_rt_ctx, ret);
+
+ /* Should never reach here */
+ assert(0);
+}
+
+/*******************************************************************************
+ * This function passes control to the Secure Partition image (BL32) for the
+ * first time on the primary cpu after a cold boot. It assumes that a valid
+ * secure context has already been created by spm_setup() which can be directly
+ * used. This function performs a synchronous entry into the Secure payload.
+ * The SP passes control back to this routine through a SMC.
+ ******************************************************************************/
+int32_t spm_init(void)
+{
+ entry_point_info_t *secure_partition_ep_info;
+ uint64_t rc;
+
+ VERBOSE("%s entry\n", __func__);
+
+ /*
+ * Get information about the Secure Partition (BL32) image. Its
+ * absence is a critical failure.
+ */
+ secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ assert(secure_partition_ep_info);
+
+ /*
+ * Initialise the common context and then overlay the S-EL0 specific
+ * context on top of it.
+ */
+ cm_init_my_context(secure_partition_ep_info);
+ secure_partition_setup();
+
+ /*
+ * Arrange for an entry into the secure payload.
+ */
+ sp_init_in_progress = 1;
+ rc = spm_synchronous_sp_entry(&sp_ctx);
+ assert(rc == 0);
+ sp_init_in_progress = 0;
+ VERBOSE("SP_MEM_ATTRIBUTES_SET_AARCH64 availability has been revoked\n");
+
+ return rc;
+}
+
+/*******************************************************************************
+ * Given a secure payload entrypoint info pointer, entry point PC & pointer to
+ * a context data structure, this function will initialize the SPM context and
+ * entry point info for the secure payload
+ ******************************************************************************/
+void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
+ uint64_t pc,
+ secure_partition_context_t *sp_ctx_ptr)
+{
+ uint32_t ep_attr;
+
+ assert(sp_ep_info);
+ assert(pc);
+ assert(sp_ctx_ptr);
+
+ cm_set_context(&sp_ctx_ptr->cpu_ctx, SECURE);
+
+ /* initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if (read_sctlr_el3() & SCTLR_EE_BIT)
+ ep_attr |= EP_EE_BIG;
+ SET_PARAM_HEAD(sp_ep_info, PARAM_EP, VERSION_1, ep_attr);
+
+ sp_ep_info->pc = pc;
+ /* The SPM payload runs in S-EL0 */
+ sp_ep_info->spsr = SPSR_64(MODE_EL0,
+ MODE_SP_EL0,
+ DISABLE_ALL_EXCEPTIONS);
+
+ zeromem(&sp_ep_info->args, sizeof(sp_ep_info->args));
+}
+
+/*******************************************************************************
+ * Secure Partition Manager setup. The SPM finds out the SP entrypoint if not
+ * already known and initialises the context for entry into the SP for its
+ * initialisation.
+ ******************************************************************************/
+int32_t spm_setup(void)
+{
+ entry_point_info_t *secure_partition_ep_info;
+
+ VERBOSE("%s entry\n", __func__);
+
+ /*
+ * Get information about the Secure Partition (BL32) image. Its
+ * absence is a critical failure.
+ */
+ secure_partition_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (!secure_partition_ep_info) {
+ WARN("No SPM provided by BL2 boot loader, Booting device"
+ " without SPM initialization. SMCs destined for SPM"
+ " will return SMC_UNK\n");
+ return 1;
+ }
+
+ /*
+ * If there's no valid entry point for SP, we return a non-zero value
+ * signalling failure initializing the service. We bail out without
+ * registering any handlers
+ */
+ if (!secure_partition_ep_info->pc) {
+ return 1;
+ }
+
+ spm_init_sp_ep_state(secure_partition_ep_info,
+ secure_partition_ep_info->pc,
+ &sp_ctx);
+
+ /*
+ * All SPM initialization done. Now register our init function with
+ * BL31 for deferred invocation
+ */
+ bl31_register_bl32_init(&spm_init);
+
+ VERBOSE("%s exit\n", __func__);
+
+ return 0;
+}
+
+/*
+ * Attributes are encoded using a different format in the SMC interface than in
+ * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
+ * converts an attributes value from the SMC format to the mmap_attr_t format by
+ * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
+ * The other fields are left as 0 because they are ignored by the function
+ * change_mem_attributes().
+ */
+static mmap_attr_t smc_attr_to_mmap_attr(unsigned int attributes)
+{
+ mmap_attr_t tf_attr = 0;
+
+ unsigned int access = (attributes & SP_MEM_ATTR_ACCESS_MASK)
+ >> SP_MEM_ATTR_ACCESS_SHIFT;
+
+ if (access == SP_MEM_ATTR_ACCESS_RW) {
+ tf_attr |= MT_RW | MT_USER;
+ } else if (access == SP_MEM_ATTR_ACCESS_RO) {
+ tf_attr |= MT_RO | MT_USER;
+ } else {
+ /* Other values are reserved. */
+ assert(access == SP_MEM_ATTR_ACCESS_NOACCESS);
+ /* The only requirement is that there's no access from EL0 */
+ tf_attr |= MT_RO | MT_PRIVILEGED;
+ }
+
+ if ((attributes & SP_MEM_ATTR_NON_EXEC) == 0) {
+ tf_attr |= MT_EXECUTE;
+ } else {
+ tf_attr |= MT_EXECUTE_NEVER;
+ }
+
+ return tf_attr;
+}
+
+/*
+ * This function converts attributes from the Trusted Firmware format into the
+ * SMC interface format.
+ */
+static int smc_mmap_to_smc_attr(mmap_attr_t attr)
+{
+ int smc_attr = 0;
+
+ int data_access;
+
+ if ((attr & MT_USER) == 0) {
+ /* No access from EL0. */
+ data_access = SP_MEM_ATTR_ACCESS_NOACCESS;
+ } else {
+ if ((attr & MT_RW) != 0) {
+ assert(MT_TYPE(attr) != MT_DEVICE);
+ data_access = SP_MEM_ATTR_ACCESS_RW;
+ } else {
+ data_access = SP_MEM_ATTR_ACCESS_RO;
+ }
+ }
+
+ smc_attr |= (data_access & SP_MEM_ATTR_ACCESS_MASK) << SP_MEM_ATTR_ACCESS_SHIFT;
+
+ if (attr & MT_EXECUTE_NEVER) {
+ smc_attr |= SP_MEM_ATTR_NON_EXEC;
+ }
+
+ return smc_attr;
+}
+
+static int spm_memory_attributes_get_smc_handler(uintptr_t base_va)
+{
+ spin_lock(&mem_attr_smc_lock);
+
+ mmap_attr_t attributes;
+ int rc = get_mem_attributes(secure_partition_xlat_ctx_handle,
+ base_va, &attributes);
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of get_mem_attributes() into SPM ones. */
+ assert(rc == 0 || rc == -EINVAL);
+
+ if (rc == 0) {
+ return smc_mmap_to_smc_attr(attributes);
+ } else {
+ return SPM_INVALID_PARAMETER;
+ }
+}
+
+static int spm_memory_attributes_set_smc_handler(u_register_t page_address,
+ u_register_t pages_count,
+ u_register_t smc_attributes)
+{
+ uintptr_t base_va = (uintptr_t) page_address;
+ size_t size = (size_t) (pages_count * PAGE_SIZE);
+ unsigned int attributes = (unsigned int) smc_attributes;
+
+ INFO(" Start address : 0x%lx\n", base_va);
+ INFO(" Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
+ INFO(" Attributes : 0x%x\n", attributes);
+
+ spin_lock(&mem_attr_smc_lock);
+
+ int ret = change_mem_attributes(secure_partition_xlat_ctx_handle,
+ base_va, size, smc_attr_to_mmap_attr(attributes));
+
+ spin_unlock(&mem_attr_smc_lock);
+
+ /* Convert error codes of change_mem_attributes() into SPM ones. */
+ assert(ret == 0 || ret == -EINVAL);
+
+ return (ret == 0) ? SPM_SUCCESS : SPM_INVALID_PARAMETER;
+}
+
+
+uint64_t spm_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ cpu_context_t *ns_cpu_context;
+ unsigned int ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ if (ns == SMC_FROM_SECURE) {
+
+ /* Handle SMCs from Secure world. */
+
+ switch (smc_fid) {
+
+ case SPM_VERSION_AARCH32:
+ SMC_RET1(handle, SPM_VERSION_COMPILED);
+
+ case SP_EVENT_COMPLETE_AARCH64:
+ assert(handle == cm_get_context(SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+ spm_setup_next_eret_into_sel0(handle);
+
+ if (sp_init_in_progress) {
+ /*
+ * SPM reports completion. The SPM must have
+ * initiated the original request through a
+ * synchronous entry into the secure
+ * partition. Jump back to the original C
+ * runtime context.
+ */
+ spm_synchronous_sp_exit(&sp_ctx, x1);
+ assert(0);
+ }
+
+ /*
+ * This is the result from the Secure partition of an
+ * earlier request. Copy the result into the non-secure
+ * context, save the secure state and return to the
+ * non-secure state.
+ */
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /* Restore non-secure state */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ /* Return to normal world */
+ SMC_RET1(ns_cpu_context, x1);
+
+ case SP_MEM_ATTRIBUTES_GET_AARCH64:
+ INFO("Received SP_MEM_ATTRIBUTES_GET_AARCH64 SMC\n");
+
+ if (!sp_init_in_progress) {
+ WARN("SP_MEM_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, spm_memory_attributes_get_smc_handler(x1));
+
+ case SP_MEM_ATTRIBUTES_SET_AARCH64:
+ INFO("Received SP_MEM_ATTRIBUTES_SET_AARCH64 SMC\n");
+
+ if (!sp_init_in_progress) {
+ WARN("SP_MEM_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+ }
+ SMC_RET1(handle, spm_memory_attributes_set_smc_handler(x1, x2, x3));
+ default:
+ break;
+ }
+ } else {
+
+ /* Handle SMCs from Non-secure world. */
+
+ switch (smc_fid) {
+
+ case SP_VERSION_AARCH64:
+ case SP_VERSION_AARCH32:
+ SMC_RET1(handle, SP_VERSION_COMPILED);
+
+ case SP_COMMUNICATE_AARCH32:
+ case SP_COMMUNICATE_AARCH64:
+
+ /* Save the Normal world context */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * Restore the secure world context and prepare for
+ * entry in S-EL0
+ */
+ assert(&sp_ctx.cpu_ctx == cm_get_context(SECURE));
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ if (x2 != 0) {
+ VERBOSE("SP_COMMUNICATE_AARCH32/64: X2 is not 0 as recommended.");
+ }
+
+ SMC_RET4(&sp_ctx.cpu_ctx,
+ smc_fid, x2, x3, plat_my_core_pos());
+
+ case SP_MEM_ATTRIBUTES_GET_AARCH64:
+ case SP_MEM_ATTRIBUTES_SET_AARCH64:
+ /* SMC interfaces reserved for secure callers. */
+ SMC_RET1(handle, SPM_NOT_SUPPORTED);
+
+ default:
+ break;
+ }
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/spm/spm_private.h b/services/std_svc/spm/spm_private.h
new file mode 100644
index 00000000..16993e8c
--- /dev/null
+++ b/services/std_svc/spm/spm_private.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPM_PRIVATE_H__
+#define __SPM_PRIVATE_H__
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19 0x0
+#define SP_C_RT_CTX_X20 0x8
+#define SP_C_RT_CTX_X21 0x10
+#define SP_C_RT_CTX_X22 0x18
+#define SP_C_RT_CTX_X23 0x20
+#define SP_C_RT_CTX_X24 0x28
+#define SP_C_RT_CTX_X25 0x30
+#define SP_C_RT_CTX_X26 0x38
+#define SP_C_RT_CTX_X27 0x40
+#define SP_C_RT_CTX_X28 0x48
+#define SP_C_RT_CTX_X29 0x50
+#define SP_C_RT_CTX_X30 0x58
+
+#define SP_C_RT_CTX_SIZE 0x60
+#define SP_C_RT_CTX_ENTRIES (SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+#include <xlat_tables_v2.h>
+
+/* Handle on the Secure partition translation context */
+extern xlat_ctx_t *secure_partition_xlat_ctx_handle;
+
+struct entry_point_info;
+
+typedef struct secure_partition_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+} secure_partition_context_t;
+
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+void spm_init_sp_ep_state(struct entry_point_info *sp_ep_info,
+ uint64_t pc,
+ secure_partition_context_t *sp_ctx_ptr);
+#endif /* __ASSEMBLY__ */
+
+#endif /* __SPM_PRIVATE_H__ */
diff --git a/services/std_svc/spm/spm_shim_private.h b/services/std_svc/spm/spm_shim_private.h
new file mode 100644
index 00000000..ad953cde
--- /dev/null
+++ b/services/std_svc/spm/spm_shim_private.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SPM_SHIM_PRIVATE__
+#define __SPM_SHIM_PRIVATE__
+
+#include <types.h>
+
+/* Assembly source */
+extern uintptr_t spm_shim_exceptions_ptr;
+
+/* Linker symbols */
+extern uintptr_t __SPM_SHIM_EXCEPTIONS_START__;
+extern uintptr_t __SPM_SHIM_EXCEPTIONS_END__;
+
+/* Definitions */
+#define SPM_SHIM_EXCEPTIONS_PTR (uintptr_t)(&spm_shim_exceptions_ptr)
+
+#define SPM_SHIM_EXCEPTIONS_START \
+ (uintptr_t)(&__SPM_SHIM_EXCEPTIONS_START__)
+#define SPM_SHIM_EXCEPTIONS_END \
+ (uintptr_t)(&__SPM_SHIM_EXCEPTIONS_END__)
+#define SPM_SHIM_EXCEPTIONS_SIZE \
+ (SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
+
+#endif /* __SPM_SHIM_PRIVATE__ */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 8e690467..ffc34716 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,7 +11,9 @@
#include <psci.h>
#include <runtime_instr.h>
#include <runtime_svc.h>
+#include <sdei.h>
#include <smcc_helpers.h>
+#include <spm_svc.h>
#include <std_svc.h>
#include <stdint.h>
#include <uuid.h>
@@ -25,15 +27,31 @@ DEFINE_SVC_UUID(arm_svc_uid,
static int32_t std_svc_setup(void)
{
uintptr_t svc_arg;
+ int ret = 0;
svc_arg = get_arm_std_svc_args(PSCI_FID_MASK);
assert(svc_arg);
/*
- * PSCI is the only specification implemented as a Standard Service.
+ * PSCI is one of the specifications implemented as a Standard Service.
* The `psci_setup()` also does EL3 architectural setup.
*/
- return psci_setup((const psci_lib_args_t *)svc_arg);
+ if (psci_setup((const psci_lib_args_t *)svc_arg) != PSCI_E_SUCCESS) {
+ ret = 1;
+ }
+
+#if ENABLE_SPM
+ if (spm_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
+#if SDEI_SUPPORT
+ /* SDEI initialisation */
+ sdei_init();
+#endif
+
+ return ret;
}
/*
@@ -80,6 +98,24 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, ret);
}
+#if ENABLE_SPM
+ /*
+ * Dispatch SPM calls to SPM SMC handler and return its return
+ * value
+ */
+ if (is_spm_fid(smc_fid)) {
+ return spm_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif
+
+#if SDEI_SUPPORT
+ if (is_sdei_fid(smc_fid)) {
+ return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif
+
switch (smc_fid) {
case ARM_STD_SVC_CALL_COUNT:
/*