diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2007-11-30 16:12:36 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-28 13:18:58 +0900 |
commit | 1efe4ce3ca126da77e450d5a83f7201949d76f62 (patch) | |
tree | fbae9902aa4103a9e86d06f841d580f24682e7b3 /include/asm-sh/bitops-grb.h | |
parent | 53ff09422e5e7a6d6198b767c8f494e43ec8e3ae (diff) |
sh: GUSA atomic rollback support.
This implements kernel-level atomic rollback built on top of gUSA,
as an alternative non-IRQ based atomicity method. This is generally
a faster method for platforms that are lacking the LL/SC pairs that
SH-4A and later use, and is only supportable on legacy cores.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/bitops-grb.h')
-rw-r--r-- | include/asm-sh/bitops-grb.h | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/include/asm-sh/bitops-grb.h b/include/asm-sh/bitops-grb.h new file mode 100644 index 000000000000..a5907b94395b --- /dev/null +++ b/include/asm-sh/bitops-grb.h @@ -0,0 +1,169 @@ +#ifndef __ASM_SH_BITOPS_GRB_H +#define __ASM_SH_BITOPS_GRB_H + +static inline void set_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " or %2, %0 \n\t" /* or */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline void clear_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = ~(1 << (nr & 0x1f)); + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " and %2, %0 \n\t" /* and */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline void change_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%1, %0 \n\t" /* load old value */ + " xor %2, %0 \n\t" /* xor */ + " mov.l %0, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1"); +} + +static inline int test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN: r15 = size */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " or %3, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1" ,"t"); + + return retval; +} + +static inline int test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval,not_mask; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + not_mask = ~mask; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" /* %1 = *a */ + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " and %4, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask), + "r" (not_mask) + : "memory" , "r0", "r1", "t"); + + return retval; +} + +static inline int test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long tmp; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-14, r15 \n\t" /* LOGIN */ + " mov.l @%2, %0 \n\t" /* load old value */ + " mov %0, %1 \n\t" /* %1 = *a */ + " tst %1, %3 \n\t" /* T = ((*a & mask) == 0) */ + " mov #-1, %1 \n\t" /* retvat = -1 */ + " negc %1, %1 \n\t" /* retval = (mask & *a) != 0 */ + " xor %3, %0 \n\t" + " mov.l %0, @%2 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (tmp), + "=&r" (retval), + "+r" (a) + : "r" (mask) + : "memory" , "r0", "r1", "t"); + + return retval; +} +#endif /* __ASM_SH_BITOPS_GRB_H */ |