summaryrefslogtreecommitdiff
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S57
1 files changed, 42 insertions, 15 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index a36f4526689b..c363458a4e63 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -179,16 +179,29 @@ not_angel:
bl cache_on
restart: adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
- ldr sp, [r0, #32]
+ ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
+ ldr sp, [r0, #28]
/*
* We might be running at a different address. We need
* to fix up various pointers.
*/
sub r0, r0, r1 @ calculate the delta offset
- add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
+ add r10, r10, r0 @ inflated kernel size location
+
+ /*
+ * The kernel build system appends the size of the
+ * decompressed kernel at the end of the compressed data
+ * in little-endian form.
+ */
+ ldrb r9, [r10, #0]
+ ldrb lr, [r10, #1]
+ orr r9, r9, lr, lsl #8
+ ldrb lr, [r10, #2]
+ ldrb r10, [r10, #3]
+ orr r9, r9, lr, lsl #16
+ orr r9, r9, r10, lsl #24
#ifndef CONFIG_ZBOOT_ROM
/* malloc space is above the relocated stack (64k max) */
@@ -206,31 +219,40 @@ restart: adr r0, LC0
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
- * r5 = start of this image
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
- * r4 >= r10 -> OK
- * r4 + image length <= r5 -> OK
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= current position (pc) -> OK
*/
+ add r10, r10, #16384
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
- cmp r10, r5
+ ARM( cmp r10, pc )
+ THUMB( mov lr, pc )
+ THUMB( cmp r10, lr )
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
- * r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
- /* Round up to next 256-byte boundary. */
- add r10, r10, #256
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
bic r10, r10, #255
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
@@ -245,6 +267,11 @@ restart: adr r0, LC0
/* Preserve offset to relocated code. */
sub r6, r9, r6
+#ifndef CONFIG_ZBOOT_ROM
+ /* cache_clean_flush may use the stack, so relocate it */
+ add sp, sp, r6
+#endif
+
bl cache_clean_flush
adr r0, BSYM(restart)
@@ -333,12 +360,11 @@ not_relocated: mov r0, #0
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
- .word _start @ r5
.word _edata @ r6
- .word _image_size @ r9
+ .word input_data_end - 4 @ r10 (inflated size location)
.word _got_start @ r11
.word _got_end @ ip
- .word user_stack_end @ sp
+ .word .L_user_stack_end @ sp
.size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC
@@ -1056,8 +1082,9 @@ memdump: mov r12, r0
#endif
.ltorg
+reloc_code_end:
.align
.section ".stack", "aw", %nobits
-user_stack: .space 4096
-user_stack_end:
+.L_user_stack: .space 4096
+.L_user_stack_end: