diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-06-12 13:49:02 +0530 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-06-22 19:23:20 +0530 |
commit | 3e1ae441886b82fbf605f37ac0756b811d55f3d5 (patch) | |
tree | 36c7e28d41922288ae1b3a466cf851f7a64da68e | |
parent | 3abc94480225677ea08af817d56edfb0df9e9b80 (diff) |
ARC: [mm] Remove @write argument to do_page_fault()
This can be ascertained within do_page_fault() since it gets the full
ECR (Exception Cause Register).
Further, for both the callers of do_page_fault(): Prot-V / D-TLB-Miss,
the cause sub-fields in ECR are same for same type of access, making the
code much more simpler.
D-TLB-Miss [LD] 0x00_21_01_00
Prot-V [LD] 0x00_23_01_00
^^
D-TLB-Miss [ST] 0x00_21_02_00
Prot-V [ST] 0x00_23_02_00
^^
D-TLB-Miss [EX] 0x00_21_03_00
Prot-V [EX] 0x00_23_03_00
^^
This helps code consolidation, which is even better when moving code from
assembler to "C".
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r-- | arch/arc/kernel/entry.S | 14 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 14 |
3 files changed, 8 insertions, 23 deletions
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 0c6d664d4a83..53655bf4c9d7 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -355,8 +355,8 @@ ARC_ENTRY EV_TLBProtV ; ecr and efa were not saved in case an Intr sneaks in ; after fake rtie ; - lr r3, [ecr] - lr r4, [efa] + lr r2, [ecr] + lr r1, [efa] ; Faulting Data address ; --------(4) Return from CPU Exception Mode --------- ; Fake a rtie, but rtie to next label @@ -371,23 +371,17 @@ ARC_ENTRY EV_TLBProtV ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW ; -Unaligned Access (READ/WRITE on odd boundary) ; - cmp r3, 0x230400 ; Misaligned data access ? + cmp r2, 0x230400 ; Misaligned data access ? beq 4f ;========= (6a) Access Violation Processing ======== - cmp r3, 0x230100 - mov r1, 0x0 ; if LD exception ? write = 0 - mov.ne r1, 0x1 ; else write = 1 - - mov r2, r4 ; faulting address mov r0, sp ; pt_regs bl do_page_fault b ret_from_exception ;========== (6b) Non aligned access ============ 4: - mov r0, r3 ; cause code - mov r1, r4 ; faulting address + mov r0, r2 ; cause code mov r2, sp ; pt_regs #ifdef CONFIG_ARC_MISALIGN_ACCESS diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index c0decc1f8d22..fdafeb1917cc 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -52,7 +52,7 @@ bad_area: return 1; } -void do_page_fault(struct pt_regs *regs, int write, unsigned long address, +void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long cause_code) { struct vm_area_struct *vma = NULL; @@ -60,6 +60,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address, struct mm_struct *mm = tsk->mm; siginfo_t info; int fault, ret; + int write = cause_code & (1 << ECR_C_BIT_DTLB_ST_MISS); /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | (write ? FAULT_FLAG_WRITE : 0); diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 7bf811d51af8..bd8bc90f61d3 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -381,18 +381,8 @@ do_slow_path_pf: ; ------- setup args for Linux Page fault Hanlder --------- mov_s r0, sp - lr r2, [efa] - lr r3, [ecr] - - ; Both st and ex imply WRITE access of some sort, hence do_page_fault( ) - ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or - ; DTLB-ld Miss - ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03 - ; Following code uses that fact that st/ex have one bit in common - - btst_s r3, ECR_C_BIT_DTLB_ST_MISS - mov.z r1, 0 - mov.nz r1, 1 + lr r1, [efa] + lr r2, [ecr] ; We don't want exceptions to be disabled while the fault is handled. ; Now that we have saved the context we return from exception hence |