summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2009-09-14 14:00:13 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2009-09-14 14:06:29 +0100
commitfa907a1e8eb0c20a8c0b7b84b13b9363adc3f4d1 (patch)
tree623a399f0e1f7fdf90d6b0baf5ef685bc6c16c2c
parent9ccb64e4df7de0f4ef160b79af9b0e78879643ca (diff)
Clear the exclusive monitor when returning from an exception
The patch adds a CLREX or dummy STREX to the exception return path. This is needed because several atomic/locking operations use a pair of LDREX/STREXEQ and the EQ condition may not always be satisfied. This would leave the exclusive monitor status set and may cause problems with atomic/locking operations in the interrupted code. With this patch, the atomic_set() operation can be a simple STR instruction (on SMP systems, the global exclusive monitor is cleared by STR anyway). Clearing the exclusive monitor during context switch is no longer needed as this is handled by the exception return path anyway. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm/include/asm/atomic.h21
-rw-r--r--arch/arm/kernel/entry-armv.S7
-rw-r--r--arch/arm/kernel/entry-header.S16
3 files changed, 18 insertions, 26 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9f6591571bf8..ff12bb2cce05 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -21,6 +21,7 @@ typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -47,24 +48,8 @@ static inline int atomic_backoff_delay(void)
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
- * to ensure that the update happens. Writing to 'v->counter'
- * without using the following operations WILL break the atomic
- * nature of these ops.
+ * to ensure that the update happens.
*/
-static inline void atomic_set(atomic_t *v, int i)
-{
- unsigned long tmp;
-
- do {
- __asm__ __volatile__("@ atomic_set\n"
-"1: ldrex %0, [%1]\n"
-" strex %0, %2, [%1]\n"
- : "=&r" (tmp)
- : "r" (&v->counter), "r" (i)
- : "cc");
- } while (tmp && atomic_backoff_delay());
-}
-
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
@@ -143,8 +128,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#define atomic_set(v,i) (((v)->counter) = (i))
-
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 6d0e25b24682..c5ac65a46591 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -753,13 +753,6 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
-#if __LINUX_ARM_ARCH__ >= 6
-#ifdef CONFIG_CPU_32v6K
- clrex
-#else
- strex r5, r4, [ip] @ Clear exclusive monitor
-#endif
-#endif
#if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index b185c945fb02..38e202d194f8 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -88,6 +88,7 @@
.endm
.macro v7m_exception_fast_exit
+ clrex @ clear the exclusive monitor
ldmia sp!, {r4-r12, lr} @ restore previously saved state
cmp lr, #0xfffffffd @ check the return stack
it eq
@@ -97,6 +98,7 @@
.endm
.macro v7m_exception_slow_exit ret_r0
+ clrex @ clear the exclusive monitor
cpsid i
ldr lr, [sp, #S_EXC_LR] @ read exception LR
cmp lr, #0xfffffffd @ check the return stack
@@ -160,13 +162,25 @@
#ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr
msr spsr_cxsf, \rpsr
+#if defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#elif defined (CONFIG_CPU_V6)
+ ldr r0, [sp]
+ strex r1, r2, [sp] @ clear the exclusive monitor
+ ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
+#endif
.endm
.macro restore_user_regs, fast = 0, offset = 0
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
+#if defined(CONFIG_CPU_32v6K)
+ clrex @ clear the exclusive monitor
+#elif defined (CONFIG_CPU_V6)
+ strex r1, r2, [sp] @ clear the exclusive monitor
+#endif
.if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
.else
@@ -182,6 +196,7 @@
.endm
#else /* CONFIG_THUMB2_KERNEL */
.macro svc_exit, rpsr
+ clrex @ clear the exclusive monitor
ldr r0, [sp, #S_SP] @ top of the stack
ldr r1, [sp, #S_PC] @ return address
tst r0, #4 @ orig stack 8-byte aligned?
@@ -203,6 +218,7 @@
.endm
#else /* !CONFIG_CPU_V7M */
.macro restore_user_regs, fast = 0, offset = 0
+ clrex @ clear the exclusive monitor
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr