summaryrefslogtreecommitdiff
path: root/arch/blackfin/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-04-23 21:44:42 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 14:06:22 +0200
commitd835b6c4cc02507b3bf3f8ee6c86857cf0ee67ab (patch)
tree4bee7d7516eb8f382d137cde96b46fe2b6eea442 /arch/blackfin/include
parentf8a570e270bf62363cd498ac2ac8ea07a76ad4d6 (diff)
blackfin: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. TODO: use inline asm or at least asm macros to collapse the lot. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/blackfin/include')
-rw-r--r--arch/blackfin/include/asm/atomic.h28
1 files changed, 21 insertions, 7 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98e9978..eafa55b81a7b 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,33 @@
#include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
+
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+
+#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_and(~mask, v);
+}
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ atomic_or(mask, v);
+}
#endif