1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
|
#include <linux/config.h> /* for CONFIG_ARCH_xxxx */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/errno.h>
#include <asm/hardware.h>
#include <asm/arch/irqs.h>
#include <asm/arch/entry-macro.S>
#ifndef MODE_SVC
#define MODE_SVC 0x13
#endif
.macro zero_fp
#ifdef CONFIG_FRAME_POINTER
mov fp, #0
#endif
.endm
.text
@ Bad Abort numbers
@ -----------------
@
#define BAD_PREFETCH 0
#define BAD_DATA 1
#define BAD_ADDREXCPTN 2
#define BAD_IRQ 3
#define BAD_UNDEFINSTR 4
#define PT_TRACESYS 0x00000002
@ OS version number used in SWIs
@ RISC OS is 0
@ RISC iX is 8
@
#define OS_NUMBER 9
#define ARMSWI_OFFSET 0x000f0000
@
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
@
#define S_OFF 8
/*
* The SWI code relies on the fact that R0 is at the bottom of the stack
* (due to slow/fast restore user regs).
*/
#if S_R0 != 0
#error "Please fix"
#endif
#if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq
cpsid i
.endm
.macro enable_irq
cpsie i
.endm
#else
.macro disable_irq
msr cpsr_c, #PSR_I_BIT | SVC_MODE
.endm
.macro enable_irq
msr cpsr_c, #SVC_MODE
.endm
#endif
.macro save_user_regs
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ @ Calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
.endm
.macro restore_user_regs
ldr r1, [sp, #S_PSR] @ Get calling cpsr
disable_irq ip @ disable IRQs
ldr lr, [sp, #S_PC]! @ Get PC
msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
/*
* Must be called with IRQs already disabled.
*/
.macro fast_restore_user_regs
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
/*
* Must be called with IRQs already disabled.
*/
.macro slow_restore_user_regs
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
.endm
.macro mask_pc, rd, rm
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
mov \rd, \rd, lsl #13
.endm
.macro alignment_trap, rbase, rtemp, sym
#ifdef CONFIG_ALIGNMENT_TRAP
#define OFF_CR_ALIGNMENT(x) cr_alignment - x
ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)]
mcr p15, 0, \rtemp, c1, c0
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
*
* r7 is reserved for the system call number for thumb mode.
*
* Note that tbl == why is intentional.
*
* We must set at least "tsk" and "why" when calling ret_with_reschedule.
*/
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
/*
* Get the system call number.
*/
.macro get_scno
#ifdef CONFIG_ARM_THUMB
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #OS_NUMBER << 20 @ put OS number in
ldreq scno, [lr, #-4]
#else
mask_pc lr, lr
ldr scno, [lr, #-4] @ get SWI instruction
#endif
.endm
|