summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/calls.S10
-rw-r--r--arch/arm/kernel/crunch.c13
-rw-r--r--arch/arm/kernel/entry-armv.S179
-rw-r--r--arch/arm/kernel/entry-common.S59
-rw-r--r--arch/arm/kernel/entry-header.S92
-rw-r--r--arch/arm/kernel/head-common.S15
-rw-r--r--arch/arm/kernel/head-nommu.S16
-rw-r--r--arch/arm/kernel/head.S28
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/module.c53
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/return_address.c71
-rw-r--r--arch/arm/kernel/setup.c28
-rw-r--r--arch/arm/kernel/signal.c94
-rw-r--r--arch/arm/kernel/stacktrace.c4
-rw-r--r--arch/arm/kernel/unwind.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
20 files changed, 483 insertions, 201 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ff89d0b3abc5..3213c9382b17 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -8,10 +8,12 @@ ifdef CONFIG_DYNAMIC_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
endif
+CFLAGS_REMOVE_return_address.o = -pg
+
# Object file lists.
obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
- process.o ptrace.o setup.o signal.o \
+ process.o ptrace.o return_address.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 531e1860e546..0e627705f746 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -186,4 +186,5 @@ EXPORT_SYMBOL(_find_next_bit_be);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(mcount);
+EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index f776e72a4cb8..ecfa98954d1d 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -81,7 +81,7 @@
CALL(sys_ni_syscall) /* was sys_ssetmask */
/* 70 */ CALL(sys_setreuid16)
CALL(sys_setregid16)
- CALL(sys_sigsuspend_wrapper)
+ CALL(sys_sigsuspend)
CALL(sys_sigpending)
CALL(sys_sethostname)
/* 75 */ CALL(sys_setrlimit)
@@ -188,7 +188,7 @@
CALL(sys_rt_sigpending)
CALL(sys_rt_sigtimedwait)
CALL(sys_rt_sigqueueinfo)
- CALL(sys_rt_sigsuspend_wrapper)
+ CALL(sys_rt_sigsuspend)
/* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64))
CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
CALL(sys_chown16)
@@ -344,8 +344,8 @@
CALL(sys_readlinkat)
CALL(sys_fchmodat)
CALL(sys_faccessat)
-/* 335 */ CALL(sys_ni_syscall) /* eventually pselect6 */
- CALL(sys_ni_syscall) /* eventually ppoll */
+/* 335 */ CALL(sys_pselect6)
+ CALL(sys_ppoll)
CALL(sys_unshare)
CALL(sys_set_robust_list)
CALL(sys_get_robust_list)
@@ -355,7 +355,7 @@
CALL(sys_vmsplice)
CALL(sys_move_pages)
/* 345 */ CALL(sys_getcpu)
- CALL(sys_ni_syscall) /* eventually epoll_pwait */
+ CALL(sys_epoll_pwait)
CALL(sys_kexec_load)
CALL(sys_utimensat)
CALL(sys_signalfd)
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
index 99995c2b2312..769abe15cf91 100644
--- a/arch/arm/kernel/crunch.c
+++ b/arch/arm/kernel/crunch.c
@@ -31,7 +31,7 @@ void crunch_task_release(struct thread_info *thread)
static int crunch_enabled(u32 devcfg)
{
- return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE);
+ return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
}
static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
@@ -56,11 +56,16 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
break;
case THREAD_NOTIFY_SWITCH:
- devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG);
+ devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
- devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
+ /*
+ * We don't use ep93xx_syscon_swlocked_write() here
+ * because we are on the context switch path and
+ * preemption is already disabled.
+ */
+ devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG);
+ __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
}
break;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fc8af43c5000..3d727a8a23bc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -34,7 +34,7 @@
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne asm_do_IRQ
#ifdef CONFIG_SMP
@@ -46,13 +46,13 @@
*/
test_for_ipi r0, r6, r5, lr
movne r0, sp
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
movne r0, sp
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif
@@ -70,7 +70,10 @@
*/
.macro inv_entry, reason
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - lr}
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
.macro svc_entry, stack_hole=0
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+ SPFIX( mov r0, sp )
+ SPFIX( tst r0, #4 ) @ test original stack alignment
+ SPFIX( ldr r0, [sp] ) @ restored
+#else
SPFIX( tst sp, #4 )
- SPFIX( bicne sp, sp, #4 )
- stmib sp, {r1 - r12}
+#endif
+ SPFIX( subeq sp, sp, #4 )
+ stmia sp, {r1 - r12}
ldmia r0, {r1 - r3}
- add r5, sp, #S_SP @ here for interlock avoidance
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
- add r0, sp, #(S_FRAME_SIZE + \stack_hole)
- SPFIX( addne r0, r0, #4 )
- str r1, [sp] @ save the "real" r0 copied
+ add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX( addeq r0, r0, #4 )
+ str r1, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
mov r1, lr
@@ -151,6 +161,8 @@ ENDPROC(__und_invalid)
@ r4 - orig_r0 (see pt_regs definition in ptrace.h)
@
stmia r5, {r0 - r4}
+
+ asm_trace_hardirqs_off
.endm
.align 5
@@ -196,9 +208,8 @@ __dabt_svc:
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -206,9 +217,6 @@ ENDPROC(__dabt_svc)
__irq_svc:
svc_entry
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -225,13 +233,12 @@ __irq_svc:
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
- ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr_cxsf, r0
+ ldr r4, [sp, #S_PSR] @ irqs are already disabled
#ifdef CONFIG_TRACE_IRQFLAGS
- tst r0, #PSR_I_BIT
+ tst r4, #PSR_I_BIT
bleq trace_hardirqs_on
#endif
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ svc_exit r4 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
@@ -266,7 +273,7 @@ __und_svc:
@ r0 - instruction
@
ldr r0, [r2, #-4]
- adr r9, 1f
+ adr r9, BSYM(1f)
bl call_fpe
mov r0, sp @ struct pt_regs *regs
@@ -280,9 +287,8 @@ __und_svc:
@
@ restore SPSR and restart the instruction
@
- ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr_cxsf, lr
- ldmia sp, {r0 - pc}^ @ Restore SVC registers
+ ldr r2, [sp, #S_PSR] @ Get SVC cpsr
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
@@ -323,9 +329,8 @@ __pabt_svc:
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
@@ -353,7 +358,8 @@ ENDPROC(__pabt_svc)
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - r12}
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3}
add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +378,8 @@ ENDPROC(__pabt_svc)
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r2 - r4}
- stmdb r0, {sp, lr}^
+ ARM( stmdb r0, {sp, lr}^ )
+ THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@
@ Enable the alignment trap while in kernel mode
@@ -383,6 +390,8 @@ ENDPROC(__pabt_svc)
@ Clear FP to mark the first stack frame
@
zero_fp
+
+ asm_trace_hardirqs_off
.endm
.macro kuser_cmpxchg_check
@@ -427,7 +436,7 @@ __dabt_usr:
@
enable_irq
mov r2, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_DataAbort
UNWIND(.fnend )
ENDPROC(__dabt_usr)
@@ -437,9 +446,6 @@ __irq_usr:
usr_entry
kuser_cmpxchg_check
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -452,7 +458,9 @@ __irq_usr:
ldr r0, [tsk, #TI_PREEMPT]
str r8, [tsk, #TI_PREEMPT]
teq r0, r7
- strne r0, [r0, -r0]
+ ARM( strne r0, [r0, -r0] )
+ THUMB( movne r0, #0 )
+ THUMB( strne r0, [r0] )
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
@@ -476,9 +484,10 @@ __und_usr:
@
@ r0 - instruction
@
- adr r9, ret_from_exception
- adr lr, __und_usr_unknown
+ adr r9, BSYM(ret_from_exception)
+ adr lr, BSYM(__und_usr_unknown)
tst r3, #PSR_T_BIT @ Thumb mode?
+ itet eq @ explicit IT needed for the 1f label
subeq r4, r2, #4 @ ARM instr at LR - 4
subne r4, r2, #2 @ Thumb instr at LR - 2
1: ldreqt r0, [r4]
@@ -488,7 +497,10 @@ __und_usr:
beq call_fpe
@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
-2: ldrht r5, [r4], #2
+2:
+ ARM( ldrht r5, [r4], #2 )
+ THUMB( ldrht r5, [r4] )
+ THUMB( add r4, r4, #2 )
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
cmp r0, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
@@ -577,9 +589,11 @@ call_fpe:
moveq pc, lr
get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number
+ THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
- strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
+ ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
+ THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -587,36 +601,38 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
- add pc, pc, r8, lsr #6
- mov r0, r0
-
- mov pc, lr @ CP#0
- b do_fpe @ CP#1 (FPE)
- b do_fpe @ CP#2 (FPE)
- mov pc, lr @ CP#3
+ ARM( add pc, pc, r8, lsr #6 )
+ THUMB( lsl r8, r8, #2 )
+ THUMB( add pc, r8 )
+ nop
+
+ W(mov) pc, lr @ CP#0
+ W(b) do_fpe @ CP#1 (FPE)
+ W(b) do_fpe @ CP#2 (FPE)
+ W(mov) pc, lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
- mov pc, lr @ CP#4
- mov pc, lr @ CP#5
- mov pc, lr @ CP#6
+ W(mov) pc, lr @ CP#4
+ W(mov) pc, lr @ CP#5
+ W(mov) pc, lr @ CP#6
#endif
- mov pc, lr @ CP#7
- mov pc, lr @ CP#8
- mov pc, lr @ CP#9
+ W(mov) pc, lr @ CP#7
+ W(mov) pc, lr @ CP#8
+ W(mov) pc, lr @ CP#9
#ifdef CONFIG_VFP
- b do_vfp @ CP#10 (VFP)
- b do_vfp @ CP#11 (VFP)
+ W(b) do_vfp @ CP#10 (VFP)
+ W(b) do_vfp @ CP#11 (VFP)
#else
- mov pc, lr @ CP#10 (VFP)
- mov pc, lr @ CP#11 (VFP)
+ W(mov) pc, lr @ CP#10 (VFP)
+ W(mov) pc, lr @ CP#11 (VFP)
#endif
- mov pc, lr @ CP#12
- mov pc, lr @ CP#13
- mov pc, lr @ CP#14 (Debug)
- mov pc, lr @ CP#15 (Control)
+ W(mov) pc, lr @ CP#12
+ W(mov) pc, lr @ CP#13
+ W(mov) pc, lr @ CP#14 (Debug)
+ W(mov) pc, lr @ CP#15 (Control)
#ifdef CONFIG_NEON
.align 6
@@ -667,7 +683,7 @@ no_fp: mov pc, lr
__und_usr_unknown:
enable_irq
mov r0, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_undefinstr
ENDPROC(__und_usr_unknown)
@@ -711,7 +727,10 @@ ENTRY(__switch_to)
UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
- stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
+ ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
+ THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
+ THUMB( str sp, [ip], #4 )
+ THUMB( str lr, [ip], #4 )
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
@@ -736,8 +755,12 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
+ THUMB( mov ip, r4 )
mov r0, r5
- ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
+ THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
+ THUMB( ldr sp, [ip], #4 )
+ THUMB( ldr pc, [ip] )
UNWIND(.fnend )
ENDPROC(__switch_to)
@@ -772,6 +795,7 @@ ENDPROC(__switch_to)
* if your compiled code is not going to use the new instructions for other
* purpose.
*/
+ THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
@@ -1020,6 +1044,7 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+ THUMB( .thumb )
/*
* Vector stubs.
@@ -1054,17 +1079,23 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
- eor r0, r0, #(\mode ^ SVC_MODE)
+ eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
+ THUMB( adr r0, 1f )
+ THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
- ldr lr, [pc, lr, lsl #2]
+ ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
+
+ .align 2
+ @ handler addresses follow this label
+1:
.endm
.globl __stubs_start
@@ -1202,14 +1233,16 @@ __stubs_end:
.globl __vectors_start
__vectors_start:
- swi SYS_ERROR0
- b vector_und + stubs_offset
- ldr pc, .LCvswi + stubs_offset
- b vector_pabt + stubs_offset
- b vector_dabt + stubs_offset
- b vector_addrexcptn + stubs_offset
- b vector_irq + stubs_offset
- b vector_fiq + stubs_offset
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ W(b) vector_und + stubs_offset
+ W(ldr) pc, .LCvswi + stubs_offset
+ W(b) vector_pabt + stubs_offset
+ W(b) vector_dabt + stubs_offset
+ W(b) vector_addrexcptn + stubs_offset
+ W(b) vector_irq + stubs_offset
+ W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 8c3de1a350b5..807cfebb0f44 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -33,14 +33,7 @@ ret_fast_syscall:
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ fast_restore_user_regs
- ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
/*
@@ -51,7 +44,7 @@ fast_work_pending:
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
- tst r1, #_TIF_SIGPENDING
+ tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
@@ -73,14 +66,7 @@ no_work_pending:
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ slow_restore_user_regs
- ldr r1, [sp, #S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user)
/*
@@ -132,6 +118,25 @@ ftrace_call:
#else
+ENTRY(__gnu_mcount_nc)
+ stmdb sp!, {r0-r3, lr}
+ ldr r0, =ftrace_trace_function
+ ldr r2, [r0]
+ adr r0, ftrace_stub
+ cmp r0, r2
+ bne gnu_trace
+ ldmia sp!, {r0-r3, ip, lr}
+ bx ip
+
+gnu_trace:
+ ldr r1, [sp, #20] @ lr of instrumented routine
+ mov r0, lr
+ sub r0, r0, #MCOUNT_INSN_SIZE
+ mov lr, pc
+ mov pc, r2
+ ldmia sp!, {r0-r3, ip, lr}
+ bx ip
+
ENTRY(mcount)
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
@@ -182,8 +187,10 @@ ftrace_stub:
ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
- add r8, sp, #S_PC
- stmdb r8, {sp, lr}^ @ Calling sp, lr
+ ARM( add r8, sp, #S_PC )
+ ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
+ THUMB( mov r8, sp )
+ THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
@@ -272,7 +279,7 @@ ENTRY(vector_swi)
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
- adr lr, ret_fast_syscall @ return address
+ adr lr, BSYM(ret_fast_syscall) @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
@@ -293,7 +300,7 @@ __sys_trace:
mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace
- adr lr, __sys_trace_return @ return address
+ adr lr, BSYM(__sys_trace_return) @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
@@ -373,16 +380,6 @@ sys_clone_wrapper:
b sys_clone
ENDPROC(sys_clone_wrapper)
-sys_sigsuspend_wrapper:
- add r3, sp, #S_OFF
- b sys_sigsuspend
-ENDPROC(sys_sigsuspend_wrapper)
-
-sys_rt_sigsuspend_wrapper:
- add r2, sp, #S_OFF
- b sys_rt_sigsuspend
-ENDPROC(sys_rt_sigsuspend_wrapper)
-
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
b sys_sigreturn
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 87ab4e157997..a4eaf4f920c5 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -36,11 +36,6 @@
#endif
.endm
- .macro get_thread_info, rd
- mov \rd, sp, lsr #13
- mov \rd, \rd, lsl #13
- .endm
-
.macro alignment_trap, rtemp
#ifdef CONFIG_ALIGNMENT_TRAP
ldr \rtemp, .LCcralign
@@ -49,6 +44,93 @@
#endif
.endm
+ @
+ @ Store/load the USER SP and LR registers by switching to the SYS
+ @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
+ @ available. Should only be called from SVC mode
+ @
+ .macro store_user_sp_lr, rd, rtemp, offset = 0
+ mrs \rtemp, cpsr
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch to the SYS mode
+
+ str sp, [\rd, #\offset] @ save sp_usr
+ str lr, [\rd, #\offset + 4] @ save lr_usr
+
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
+ .macro load_user_sp_lr, rd, rtemp, offset = 0
+ mrs \rtemp, cpsr
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch to the SYS mode
+
+ ldr sp, [\rd, #\offset] @ load sp_usr
+ ldr lr, [\rd, #\offset + 4] @ load lr_usr
+
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .macro svc_exit, rpsr
+ msr spsr_cxsf, \rpsr
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #\offset + S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ .if \fast
+ ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ .else
+ ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
+ .endif
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+#else /* CONFIG_THUMB2_KERNEL */
+ .macro svc_exit, rpsr
+ ldr r0, [sp, #S_SP] @ top of the stack
+ ldr r1, [sp, #S_PC] @ return address
+ tst r0, #4 @ orig stack 8-byte aligned?
+ stmdb r0, {r1, \rpsr} @ rfe context
+ ldmia sp, {r0 - r12}
+ ldr lr, [sp, #S_LR]
+ addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
+ addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
+ rfeia sp!
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ mov r2, sp
+ load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
+ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #\offset + S_PC] @ get pc
+ add sp, sp, #\offset + S_SP
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ .if \fast
+ ldmdb sp, {r1 - r12} @ get calling r1 - r12
+ .else
+ ldmdb sp, {r0 - r12} @ get calling r0 - r12
+ .endif
+ add sp, sp, #S_FRAME_SIZE - S_SP
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp
+ lsr \rd, \rd, #13
+ mov \rd, \rd, lsl #13
+ .endm
+#endif /* !CONFIG_THUMB2_KERNEL */
/*
* These are the registers used in the syscall handler, and allow us to
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 991952c644d1..93ad576b2d74 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -14,6 +14,7 @@
#define ATAG_CORE 0x54410001
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
+ .align 2
.type __switch_data, %object
__switch_data:
.long __mmap_switched
@@ -51,7 +52,9 @@ __mmap_switched:
strcc fp, [r6],#4
bcc 1b
- ldmia r3, {r4, r5, r6, r7, sp}
+ ARM( ldmia r3, {r4, r5, r6, r7, sp})
+ THUMB( ldmia r3, {r4, r5, r6, r7} )
+ THUMB( ldr sp, [r3, #16] )
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
@@ -155,7 +158,8 @@ ENDPROC(__error)
*/
__lookup_processor_type:
adr r3, 3f
- ldmda r3, {r5 - r7}
+ ldmia r3, {r5 - r7}
+ add r3, r3, #8
sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
@@ -185,9 +189,10 @@ ENDPROC(lookup_processor_type)
* Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
* more information about the __proc_info and __arch_info structures.
*/
- .long __proc_info_begin
+ .align 2
+3: .long __proc_info_begin
.long __proc_info_end
-3: .long .
+4: .long .
.long __arch_info_begin
.long __arch_info_end
@@ -203,7 +208,7 @@ ENDPROC(lookup_processor_type)
* r5 = mach_info pointer in physical address space
*/
__lookup_machine_type:
- adr r3, 3b
+ adr r3, 4b
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index cc87e1765ed2..e5dfc2895e24 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -34,7 +34,7 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
@@ -50,8 +50,10 @@ ENTRY(stext)
ldr r13, __switch_data @ address to jump to after
@ the initialization is done
- adr lr, __after_proc_init @ return (PIC) address
- add pc, r10, #PROCINFO_INITFUNC
+ adr lr, BSYM(__after_proc_init) @ return (PIC) address
+ ARM( add pc, r10, #PROCINFO_INITFUNC )
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(stext)
/*
@@ -59,7 +61,10 @@ ENDPROC(stext)
*/
__after_proc_init:
#ifdef CONFIG_CPU_CP15
- mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ /*
+ * CP15 system control register value returned in r0 from
+ * the CPU init function.
+ */
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
#else
@@ -82,7 +87,8 @@ __after_proc_init:
mcr p15, 0, r0, c1, c0, 0 @ write control reg
#endif /* CONFIG_CPU_CP15 */
- mov pc, r13 @ clear the BSS and jump
+ mov r3, r13
+ mov pc, r3 @ clear the BSS and jump
@ to start_kernel
ENDPROC(__after_proc_init)
.ltorg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 21e17dc94cb5..38ccbe1d3b2c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -76,7 +76,7 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -97,8 +97,10 @@ ENTRY(stext)
*/
ldr r13, __switch_data @ address to jump to after
@ mmu has been enabled
- adr lr, __enable_mmu @ return (PIC) address
- add pc, r10, #PROCINFO_INITFUNC
+ adr lr, BSYM(__enable_mmu) @ return (PIC) address
+ ARM( add pc, r10, #PROCINFO_INITFUNC )
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(stext)
#if defined(CONFIG_SMP)
@@ -110,7 +112,7 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
@@ -121,12 +123,15 @@ ENTRY(secondary_startup)
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
- ldmia r4, {r5, r7, r13} @ address to jump to after
+ ldmia r4, {r5, r7, r12} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir
- adr lr, __enable_mmu @ return address
- add pc, r10, #PROCINFO_INITFUNC @ initialise processor
- @ (return control reg)
+ adr lr, BSYM(__enable_mmu) @ return address
+ mov r13, r12 @ __secondary_switched address
+ ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
+ @ (return control reg)
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(secondary_startup)
/*
@@ -193,8 +198,8 @@ __turn_mmu_on:
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
mov r3, r3
- mov r3, r3
- mov pc, r13
+ mov r3, r13
+ mov pc, r3
ENDPROC(__turn_mmu_on)
@@ -235,7 +240,8 @@ __create_page_tables:
* will be removed by paging_init(). We use our current program
* counter to determine corresponding section base address.
*/
- mov r6, pc, lsr #20 @ start of kernel section
+ mov r6, pc
+ mov r6, r6, lsr #20 @ start of kernel section
orr r3, r7, r6, lsl #20 @ flags + kernel base
str r3, [r4, r6, lsl #2] @ identity mapping
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index b7c3490eaa24..c9a8619f3856 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -86,7 +86,7 @@ int show_interrupts(struct seq_file *p, void *v)
unlock:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
-#ifdef CONFIG_ARCH_ACORN
+#ifdef CONFIG_FIQ
show_fiq_list(p, v);
#endif
#ifdef CONFIG_SMP
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index bac03c81489d..f28c5e9c51ea 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -102,6 +102,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned long loc;
Elf32_Sym *sym;
s32 offset;
+ u32 upper, lower, sign, j1, j2;
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
@@ -184,6 +185,58 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
(offset & 0x0fff);
break;
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+
+ /*
+ * 25 bit signed address range (Thumb-2 BL and B.W
+ * instructions):
+ * S:I1:I2:imm10:imm11:0
+ * where:
+ * S = upper[10] = offset[24]
+ * I1 = ~(J1 ^ S) = offset[23]
+ * I2 = ~(J2 ^ S) = offset[22]
+ * imm10 = upper[9:0] = offset[21:12]
+ * imm11 = lower[10:0] = offset[11:1]
+ * J1 = lower[13]
+ * J2 = lower[11]
+ */
+ sign = (upper >> 10) & 1;
+ j1 = (lower >> 13) & 1;
+ j2 = (lower >> 11) & 1;
+ offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
+ ((~(j2 ^ sign) & 1) << 22) |
+ ((upper & 0x03ff) << 12) |
+ ((lower & 0x07ff) << 1);
+ if (offset & 0x01000000)
+ offset -= 0x02000000;
+ offset += sym->st_value - loc;
+
+ /* only Thumb addresses allowed (no interworking) */
+ if (!(offset & 1) ||
+ offset <= (s32)0xff000000 ||
+ offset >= (s32)0x01000000) {
+ printk(KERN_ERR
+ "%s: relocation out of range, section "
+ "%d reloc %d sym '%s'\n", module->name,
+ relindex, i, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+
+ sign = (offset >> 24) & 1;
+ j1 = sign ^ (~(offset >> 23) & 1);
+ j2 = sign ^ (~(offset >> 22) & 1);
+ *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ ((offset >> 12) & 0x03ff));
+ *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+ break;
+
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 39196dff478c..790fbee92ec5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -388,7 +388,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_r2 = (unsigned long)fn;
regs.ARM_r3 = (unsigned long)kernel_thread_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
- regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
+ regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 89882a1d0187..a2ea3854cb3c 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -521,7 +521,13 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
return -EIO;
tmp = 0;
- if (off < sizeof(struct pt_regs))
+ if (off == PT_TEXT_ADDR)
+ tmp = tsk->mm->start_code;
+ else if (off == PT_DATA_ADDR)
+ tmp = tsk->mm->start_data;
+ else if (off == PT_TEXT_END_ADDR)
+ tmp = tsk->mm->end_code;
+ else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
return put_user(tmp, ret);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
new file mode 100644
index 000000000000..df246da4ceca
--- /dev/null
+++ b/arch/arm/kernel/return_address.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/kernel/return_address.c
+ *
+ * Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ * for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/module.h>
+
+#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
+#include <linux/sched.h>
+
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static int save_return_addr(struct stackframe *frame, void *d)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)frame->lr;
+
+ return 1;
+ } else {
+ --data->level;
+ return 0;
+ }
+}
+
+void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+ struct stackframe frame;
+ register unsigned long current_sp asm ("sp");
+
+ data.level = level + 1;
+
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)return_address;
+
+ walk_stackframe(&frame, save_return_addr, &data);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+}
+
+#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
+
+#if defined(CONFIG_ARM_UNWIND)
+#warning "TODO: return_address should use unwind tables"
+#endif
+
+void *return_address(unsigned int level)
+{
+ return NULL;
+}
+
+#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bc5e4128f9f3..d4d4f77c91b2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/fs.h>
+#include <asm/unified.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
@@ -327,25 +328,38 @@ void cpu_init(void)
}
/*
+ * Define the placement constraint for the inline asm directive below.
+ * In Thumb-2, msr with an immediate value is not allowed.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define PLC "r"
+#else
+#define PLC "I"
+#endif
+
+ /*
* setup stacks for re-entrant exception handlers
*/
__asm__ (
"msr cpsr_c, %1\n\t"
- "add sp, %0, %2\n\t"
+ "add r14, %0, %2\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %3\n\t"
- "add sp, %0, %4\n\t"
+ "add r14, %0, %4\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %5\n\t"
- "add sp, %0, %6\n\t"
+ "add r14, %0, %6\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %7"
:
: "r" (stk),
- "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f6bc5d442782..1423a3419789 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -12,6 +12,7 @@
#include <linux/personality.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
+#include <linux/tracehook.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -47,57 +48,22 @@ const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
-static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
-
/*
* atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
{
- sigset_t saveset;
-
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- regs->ARM_r0 = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&saveset, regs, 0))
- return regs->ARM_r0;
- }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- regs->ARM_r0 = -EINTR;
-
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&saveset, regs, 0))
- return regs->ARM_r0;
- }
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_restore_sigmask();
+ return -ERESTARTNOHAND;
}
asmlinkage int
@@ -545,7 +511,7 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset,
struct pt_regs * regs, int syscall)
@@ -596,7 +562,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
if (ret != 0) {
force_sigsegv(sig, tsk);
- return;
+ return ret;
}
/*
@@ -610,6 +576,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
+ return 0;
}
/*
@@ -621,7 +588,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
+static void do_signal(struct pt_regs *regs, int syscall)
{
struct k_sigaction ka;
siginfo_t info;
@@ -634,7 +601,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
* if so.
*/
if (!user_mode(regs))
- return 0;
+ return;
if (try_to_freeze())
goto no_signal;
@@ -643,9 +610,24 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- handle_signal(signr, &ka, &info, oldset, regs, syscall);
+ sigset_t *oldset;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+ if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
+ /*
+ * A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
single_step_set(current);
- return 1;
+ return;
}
no_signal:
@@ -697,14 +679,28 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
regs->ARM_r0 == -ERESTARTNOINTR) {
setup_syscall_restart(regs);
}
+
+ /* If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
}
single_step_set(current);
- return 0;
}
asmlinkage void
do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
if (thread_flags & _TIF_SIGPENDING)
- do_signal(&current->blocked, regs, syscall);
+ do_signal(regs, syscall);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 9f444e5cc165..20b7411e47fd 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -21,7 +21,7 @@
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
-int unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
@@ -43,7 +43,7 @@ int unwind_frame(struct stackframe *frame)
}
#endif
-void walk_stackframe(struct stackframe *frame,
+void notrace walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
{
while (1) {
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd56e11f339a..39baf1128bfa 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -62,7 +62,11 @@ struct unwind_ctrl_block {
};
enum regs {
+#ifdef CONFIG_THUMB2_KERNEL
+ FP = 7,
+#else
FP = 11,
+#endif
SP = 13,
LR = 14,
PC = 15
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 69371028a202..5cc4812c9763 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -83,6 +83,7 @@ SECTIONS
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit)
+ *(.discard)
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
#ifndef CONFIG_HOTPLUG_CPU