summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-03-17 16:37:22 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-24 23:57:32 +0200
commit0f2c87695219b1129ccf93e0f58acdcdd49724b9 (patch)
tree8c0d50e19a880f10d0fc5c92666b043d0257eb2d /arch/x86/xen
parentdbe9e994c99ac9ac12d2b66ea42f44558f54fa52 (diff)
xen: jump to iret fixup
Use jmp rather than call for the iret fixup, so its consistent with the sysexit fixup, and it simplifies the stack (which is already complex). Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/xen-asm.S22
1 files changed, 9 insertions, 13 deletions
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 1ac08082a4b4..53cae923e148 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -223,9 +223,7 @@ hyper_iret:
ds } SAVE_ALL state
eax }
: :
- ebx }
- ----------------
- return addr <- esp
+ ebx }<- esp
----------------
In order to deliver the nested exception properly, we need to shift
@@ -240,10 +238,8 @@ hyper_iret:
it's usermode state which we eventually need to restore.
*/
ENTRY(xen_iret_crit_fixup)
- /* offsets +4 for return address */
-
/*
- Paranoia: Make sure we're really coming from userspace.
+ Paranoia: Make sure we're really coming from kernel space.
One could imagine a case where userspace jumps into the
critical range address, but just before the CPU delivers a GP,
it decides to deliver an interrupt instead. Unlikely?
@@ -252,32 +248,32 @@ ENTRY(xen_iret_crit_fixup)
jump instruction itself, not the destination, but some virtual
environments get this wrong.
*/
- movl PT_CS+4(%esp), %ecx
+ movl PT_CS(%esp), %ecx
andl $SEGMENT_RPL_MASK, %ecx
cmpl $USER_RPL, %ecx
je 2f
- lea PT_ORIG_EAX+4(%esp), %esi
- lea PT_EFLAGS+4(%esp), %edi
+ lea PT_ORIG_EAX(%esp), %esi
+ lea PT_EFLAGS(%esp), %edi
/* If eip is before iret_restore_end then stack
hasn't been restored yet. */
cmp $iret_restore_end, %eax
jae 1f
- movl 0+4(%edi),%eax /* copy EAX */
- movl %eax, PT_EAX+4(%esp)
+ movl 0+4(%edi),%eax /* copy EAX (just above top of frame) */
+ movl %eax, PT_EAX(%esp)
lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
/* set up the copy */
1: std
- mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */
+ mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
rep movsl
cld
lea 4(%edi),%esp /* point esp to new frame */
-2: ret
+2: jmp xen_do_upcall
ENTRY(xen_sysexit)