diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2009-09-14 14:00:13 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2009-09-14 14:06:29 +0100 |
commit | fa907a1e8eb0c20a8c0b7b84b13b9363adc3f4d1 (patch) | |
tree | 623a399f0e1f7fdf90d6b0baf5ef685bc6c16c2c /arch/arm/include | |
parent | 9ccb64e4df7de0f4ef160b79af9b0e78879643ca (diff) |
Clear the exclusive monitor when returning from an exception
The patch adds a CLREX or dummy STREX to the exception return path. This
is needed because several atomic/locking operations use a pair of
LDREX/STREXEQ and the EQ condition may not always be satisfied. This
would leave the exclusive monitor status set and may cause problems with
atomic/locking operations in the interrupted code.
With this patch, the atomic_set() operation can be a simple STR
instruction (on SMP systems, the global exclusive monitor is cleared by
STR anyway). Clearing the exclusive monitor during context switch is no
longer needed as this is handled by the exception return path anyway.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 21 |
1 files changed, 2 insertions, 19 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9f6591571bf8..ff12bb2cce05 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -21,6 +21,7 @@ typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ #define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -47,24 +48,8 @@ static inline int atomic_backoff_delay(void) /* * ARMv6 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. Writing to 'v->counter' - * without using the following operations WILL break the atomic - * nature of these ops. + * to ensure that the update happens. */ -static inline void atomic_set(atomic_t *v, int i) -{ - unsigned long tmp; - - do { - __asm__ __volatile__("@ atomic_set\n" -"1: ldrex %0, [%1]\n" -" strex %0, %2, [%1]\n" - : "=&r" (tmp) - : "r" (&v->counter), "r" (i) - : "cc"); - } while (tmp && atomic_backoff_delay()); -} - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long tmp; @@ -143,8 +128,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #error SMP not supported on pre-ARMv6 CPUs #endif -#define atomic_set(v,i) (((v)->counter) = (i)) - static inline int atomic_add_return(int i, atomic_t *v) { unsigned long flags; |