summaryrefslogtreecommitdiff
path: root/arch/mn10300/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/mn10300/include
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/mn10300/include')
-rw-r--r--arch/mn10300/include/asm/atomic.h125
1 files changed, 42 insertions, 83 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index cadeb1e2cdfc..5be655e83e70 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -33,7 +33,6 @@
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) (ACCESS_ONCE((v)->counter))
@@ -43,102 +42,62 @@
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int retval;
-#ifdef CONFIG_SMP
- int status;
-
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " add %5,%1 \n"
- " mov %1,(_ADR,%3) \n"
- " mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(retval), "=m"(v->counter)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
- : "memory", "cc");
-
-#else
- unsigned long flags;
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int retval, status; \
+ \
+ asm volatile( \
+ "1: mov %4,(_AAR,%3) \n" \
+ " mov (_ADR,%3),%1 \n" \
+ " " #op " %5,%1 \n" \
+ " mov %1,(_ADR,%3) \n" \
+ " mov (_ADR,%3),%0 \n" /* flush */ \
+ " mov (_ASR,%3),%0 \n" \
+ " or %0,%0 \n" \
+ " bne 1b \n" \
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
+ : "memory", "cc"); \
+}
- flags = arch_local_cli_save();
- retval = v->counter;
- retval += i;
- v->counter = retval;
- arch_local_irq_restore(flags);
-#endif
- return retval;
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int retval, status; \
+ \
+ asm volatile( \
+ "1: mov %4,(_AAR,%3) \n" \
+ " mov (_ADR,%3),%1 \n" \
+ " " #op " %5,%1 \n" \
+ " mov %1,(_ADR,%3) \n" \
+ " mov (_ADR,%3),%0 \n" /* flush */ \
+ " mov (_ASR,%3),%0 \n" \
+ " or %0,%0 \n" \
+ " bne 1b \n" \
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
+ : "memory", "cc"); \
+ return retval; \
}
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int retval;
-#ifdef CONFIG_SMP
- int status;
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " sub %5,%1 \n"
- " mov %1,(_ADR,%3) \n"
- " mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(retval), "=m"(v->counter)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
- : "memory", "cc");
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-#else
- unsigned long flags;
- flags = arch_local_cli_save();
- retval = v->counter;
- retval -= i;
- v->counter = retval;
- arch_local_irq_restore(flags);
-#endif
- return retval;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);