diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 17:52:19 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2006-09-27 17:52:19 +0900 |
commit | 781125ca58dfbd47635cfc0e408f1f9d7e10b227 (patch) | |
tree | 92d44b262478d5cece046ff661694b1109ab1e9d /include/asm-sh/atomic.h | |
parent | 15f57a29a19ad0dbb468363cb617b06f71f6de92 (diff) |
sh: New atomic ops for SH-4A movli.l/movco.l
SH-4A implements LL/SC instructions, so we implement a simple
set of atomic operations using these.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/atomic.h')
-rw-r--r-- | include/asm-sh/atomic.h | 105 |
1 files changed, 96 insertions, 9 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index 049eb2dda6b6..8bdc1ba56f73 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -22,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t; * forward to code at the end of this object's .text section, then * branch back to restart the operation. */ - -static __inline__ void atomic_add(int i, atomic_t * v) +static inline void atomic_add(int i, atomic_t *v) { +#ifdef CONFIG_CPU_SH4A + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_add \n" +" add %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" + : "=&z" (tmp), "=r" (&v->counter) + : "r" (i), "r" (&v->counter) + : "t"); +#else unsigned long flags; local_irq_save(flags); *(long *)v += i; local_irq_restore(flags); +#endif } -static __inline__ void atomic_sub(int i, atomic_t *v) +static inline void atomic_sub(int i, atomic_t *v) { +#ifdef CONFIG_CPU_SH4A + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_sub \n" +" sub %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" + : "=&z" (tmp), "=r" (&v->counter) + : "r" (i), "r" (&v->counter) + : "t"); +#else unsigned long flags; local_irq_save(flags); *(long *)v -= i; local_irq_restore(flags); +#endif } -static __inline__ int atomic_add_return(int i, atomic_t * v) +/* + * SH-4A note: + * + * We basically get atomic_xxx_return() for free compared with + * atomic_xxx(). movli.l/movco.l require r0 due to the instruction + * encoding, so the retval is automatically set without having to + * do any special work. + */ +static inline int atomic_add_return(int i, atomic_t *v) { - unsigned long temp, flags; + unsigned long temp; + +#ifdef CONFIG_CPU_SH4A + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_add_return \n" +" add %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp), "=r" (&v->counter) + : "r" (i), "r" (&v->counter) + : "t"); +#else + unsigned long flags; local_irq_save(flags); temp = *(long *)v; temp += i; *(long *)v = temp; local_irq_restore(flags); +#endif return temp; } #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static __inline__ int atomic_sub_return(int i, atomic_t * v) +static inline int atomic_sub_return(int i, atomic_t *v) { - unsigned long temp, flags; + unsigned long temp; + +#ifdef CONFIG_CPU_SH4A + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_sub_return \n" +" sub %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp), "=r" (&v->counter) + : "r" (i), "r" (&v->counter) + : "t"); +#else + unsigned long flags; local_irq_save(flags); temp = *(long *)v; temp -= i; *(long *)v = temp; local_irq_restore(flags); +#endif return temp; } @@ -119,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { +#ifdef CONFIG_CPU_SH4A + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_clear_mask \n" +" and %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" + : "=&z" (tmp), "=r" (&v->counter) + : "r" (~mask), "r" (&v->counter) + : "t"); +#else unsigned long flags; local_irq_save(flags); *(long *)v &= ~mask; local_irq_restore(flags); +#endif } -static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { +#ifdef CONFIG_CPU_SH4A + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%3, %0 ! atomic_set_mask \n" +" or %2, %0 \n" +" movco.l %0, @%3 \n" +" bf 1b \n" + : "=&z" (tmp), "=r" (&v->counter) + : "r" (mask), "r" (&v->counter) + : "t"); +#else unsigned long flags; local_irq_save(flags); *(long *)v |= mask; local_irq_restore(flags); +#endif } /* Atomic operations are already serializing on SH */ |