summaryrefslogtreecommitdiff
path: root/bl31/aarch64/runtime_exceptions.S
diff options
context:
space:
mode:
authorYatharth Kochar <yatharth.kochar@arm.com>2015-10-02 17:56:48 +0100
committerYatharth Kochar <yatharth.kochar@arm.com>2015-12-09 17:41:18 +0000
commitbbf8f6f95b1ed9eeaa4e8d746a0660a7620cc415 (patch)
tree14cb9de8e22acef12b2b23cc15118e34c591cbbe /bl31/aarch64/runtime_exceptions.S
parentc76e0d13bf840189ee2a9ecccf111d9ded63a5e6 (diff)
Move context management code to common location
The upcoming Firmware Update feature needs transitioning across Secure/Normal worlds to complete the FWU process and hence requires context management code to perform this task. Currently context management code is part of BL31 stage only. This patch moves the code from (include)/bl31 to (include)/common. Some function declarations/definitions and macros have also moved to different files to help code sharing. Change-Id: I3858b08aecdb76d390765ab2b099f457873f7b0c
Diffstat (limited to 'bl31/aarch64/runtime_exceptions.S')
-rw-r--r--bl31/aarch64/runtime_exceptions.S84
1 files changed, 2 insertions, 82 deletions
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 28353202..dc11e0a7 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,7 +36,6 @@
#include <runtime_svc.h>
.globl runtime_exceptions
- .globl el3_exit
/* -----------------------------------------------------
* Handle SMC exceptions separately from other sync.
@@ -426,38 +425,7 @@ smc_handler64:
#endif
blr x15
- /* -----------------------------------------------------
- * This routine assumes that the SP_EL3 is pointing to
- * a valid context structure from where the gp regs and
- * other special registers can be retrieved.
- *
- * Keep it in the same section as smc_handler as this
- * function uses a fall-through to el3_exit
- * -----------------------------------------------------
- */
-el3_exit: ; .type el3_exit, %function
- /* -----------------------------------------------------
- * Save the current SP_EL0 i.e. the EL3 runtime stack
- * which will be used for handling the next SMC. Then
- * switch to SP_EL3
- * -----------------------------------------------------
- */
- mov x17, sp
- msr spsel, #1
- str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
-
- /* -----------------------------------------------------
- * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
- * -----------------------------------------------------
- */
- ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
- ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
- msr scr_el3, x18
- msr spsr_el3, x16
- msr elr_el3, x17
-
- /* Restore saved general purpose registers and return */
- b restore_gp_registers_eret
+ b el3_exit
smc_unknown:
/*
@@ -479,51 +447,3 @@ rt_svc_fw_critical_error:
msr spsel, #1 /* Switch to SP_ELx */
bl report_unhandled_exception
endfunc smc_handler
-
- /* -----------------------------------------------------
- * The following functions are used to saved and restore
- * all the general pupose registers. Ideally we would
- * only save and restore the callee saved registers when
- * a world switch occurs but that type of implementation
- * is more complex. So currently we will always save and
- * restore these registers on entry and exit of EL3.
- * These are not macros to ensure their invocation fits
- * within the 32 instructions per exception vector.
- * -----------------------------------------------------
- */
-func save_gp_registers
- stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
- stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
- stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
- save_x18_to_x29_sp_el0
- ret
-endfunc save_gp_registers
-
-func restore_gp_registers_eret
- ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
- ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-
-restore_gp_registers_callee_eret:
- ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
- ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
- ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
- ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
- ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
- ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
- ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
- ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
- ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
- ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
- ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
- ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
- msr sp_el0, x17
- ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
- eret
-endfunc restore_gp_registers_eret