summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/system.h
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-05-25 20:58:00 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-05-28 19:39:27 +0100
commitbac4e960b5ce2453d862beaf20e59aa68af3b43a (patch)
tree69ba3b450a769fa4a613a1f8c4e6454cdcfae5aa /arch/arm/include/asm/system.h
parent290815710b51de23f9ed6799d3e0bb762d4f907c (diff)
[ARM] barriers: improve xchg, bitops and atomic SMP barriers
Mathieu Desnoyers pointed out that the ARM barriers were lacking: - cmpxchg, xchg and atomic add return need memory barriers on architectures which can reorder the relative order in which memory read/writes can be seen between CPUs, which seems to include recent ARM architectures. Those barriers are currently missing on ARM. - test_and_xxx_bit were missing SMP barriers. So put these barriers in. Provide separate atomic_add/atomic_sub operations which do not require barriers. Reported-Reviewed-and-Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/system.h')
-rw-r--r--arch/arm/include/asm/system.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index bd4dc8ed53d5..7fce8f3b391d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif
+ smp_mb();
+
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
+ smp_mb();
return ret;
}