diff options
-rw-r--r-- | arch/parisc/kernel/syscall.S | 32 |
1 files changed, 9 insertions, 23 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f5f96021caa0..68e75ce838d6 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -423,9 +422,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -442,7 +438,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -473,7 +469,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -529,7 +525,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -572,8 +567,6 @@ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -601,13 +594,11 @@ cas_action: 1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif /* Return to userspace, set no error */ b lws_exit @@ -615,12 +606,10 @@ cas_action: 3: /* Error occured on load or store */ -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ @@ -672,7 +661,6 @@ ENTRY(sys_call_table64) END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks @@ -694,8 +682,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end |