diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-03 10:39:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-03 10:39:02 -0700 |
commit | 7e31aa11fc672bbe0dd0da59513c9efe3809ced7 (patch) | |
tree | 0aafe6a0045230950ce6b26579a053da12614a49 /arch/arm/include/asm/atomic.h | |
parent | 071f4924844c435a3ae0cdbab7d7df2f1da85713 (diff) | |
parent | 9cb7117fa4858468014f76bd996076985111e955 (diff) |
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm:
[ARM] 5182/1: pxa: Fix pcm990 compilation
[ARM] Fix explicit asm(-arm)?/arch-foo references
[ARM] move include/asm-arm to arch/arm/include/asm
[ARM] Remove explicit dependency for misc.o from compressed/Makefile
[ARM] initrd: claim initrd memory exclusively
[ARM] pxa: add support for L2 outer cache on XScale3 (attempt 2)
[ARM] 5180/1: at91: Fix at91_nand -> atmel_nand rename fallout
[ARM] add Sascha Hauer as Freescale i.MX Maintainer
[ARM] i.MX: add missing clock functions exports
[ARM] i.MX: remove set_imx_fb_info() export
[ARM] mx1ads: make mmc platform data available for modules
[ARM] mx2: add missing Kconfig dependency
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 212 |
1 files changed, 212 insertions, 0 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h new file mode 100644 index 000000000000..325f881ccb50 --- /dev/null +++ b/arch/arm/include/asm/atomic.h @@ -0,0 +1,212 @@ +/* + * arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_ATOMIC_H +#define __ASM_ARM_ATOMIC_H + +#include <linux/compiler.h> +#include <asm/system.h> + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ + +#define atomic_read(v) ((v)->counter) + +#if __LINUX_ARM_ARCH__ >= 6 + +/* + * ARMv6 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. Writing to 'v->counter' + * without using the following operations WILL break the atomic + * nature of these ops. + */ +static inline void atomic_set(atomic_t *v, int i) +{ + unsigned long tmp; + + __asm__ __volatile__("@ atomic_set\n" +"1: ldrex %0, [%1]\n" +" strex %0, %2, [%1]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_add_return\n" +"1: ldrex %0, [%2]\n" +" add %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + __asm__ __volatile__("@ atomic_sub_return\n" +"1: ldrex %0, [%2]\n" +" sub %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp) + : "r" (&v->counter), "Ir" (i) + : "cc"); + + return result; +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long oldval, res; + + do { + __asm__ __volatile__("@ atomic_cmpxchg\n" + "ldrex %1, [%2]\n" + "mov %0, #0\n" + "teq %1, %3\n" + "strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (&ptr->counter), "Ir" (old), "r" (new) + : "cc"); + } while (res); + + return oldval; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__("@ atomic_clear_mask\n" +"1: ldrex %0, [%2]\n" +" bic %0, %0, %3\n" +" strex %1, %0, [%2]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (tmp), "=&r" (tmp2) + : "r" (addr), "Ir" (mask) + : "cc"); +} + +#else /* ARM_ARCH_6 */ + +#include <asm/system.h> + +#ifdef CONFIG_SMP +#error SMP not supported on pre-ARMv6 CPUs +#endif + +#define atomic_set(v,i) (((v)->counter) = (i)) + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + raw_local_irq_save(flags); + val = v->counter; + v->counter = val += i; + raw_local_irq_restore(flags); + + return val; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long flags; + int val; + + raw_local_irq_save(flags); + val = v->counter; + v->counter = val -= i; + raw_local_irq_restore(flags); + + return val; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + raw_local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + raw_local_irq_restore(flags); + + return ret; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long flags; + + raw_local_irq_save(flags); + *addr &= ~mask; + raw_local_irq_restore(flags); +} + +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#define atomic_add(i, v) (void) atomic_add_return(i, v) +#define atomic_inc(v) (void) atomic_add_return(1, v) +#define atomic_sub(i, v) (void) atomic_sub_return(i, v) +#define atomic_dec(v) (void) atomic_sub_return(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +/* Atomic operations are already serializing on ARM */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include <asm-generic/atomic.h> +#endif +#endif |