diff options
author | Anton Blanchard <anton@samba.org> | 2010-02-10 01:04:06 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-02-17 14:03:15 +1100 |
commit | f10e2e5b4b4c9937de596f96ffe028be3a565598 (patch) | |
tree | 89808d05159ac5bd4aeea53b52dc0ddb3373fe65 /arch/powerpc/include/asm/atomic.h | |
parent | 66d99b883419b8df6d0a24ca957da7ab4831cf6e (diff) |
powerpc: Rename LWSYNC_ON_SMP to PPC_RELEASE_BARRIER, ISYNC_ON_SMP to PPC_ACQUIRE_BARRIER
For performance reasons we are about to change ISYNC_ON_SMP to sometimes be
lwsync. Now that the macro name doesn't make sense, change it and LWSYNC_ON_SMP
to better explain what the barriers are doing.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 4012483b1899..b8f152ece025 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) int t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) int t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%2 # atomic_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) int t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%1 # atomic_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) int t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%1 # atomic_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -194,7 +194,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) int t; __asm__ __volatile__ ( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%1 # atomic_add_unless\n\ cmpw 0,%0,%3 \n\ beq- 2f \n\ @@ -202,7 +202,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) PPC405_ERR77(0,%2) " stwcx. %0,0,%1 \n\ bne- 1b \n" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) @@ -227,7 +227,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) int t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ cmpwi %0,1\n\ addi %0,%0,-1\n\ @@ -235,7 +235,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER "\n\ 2:" : "=&b" (t) : "r" (&v->counter) @@ -286,12 +286,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%2 # atomic64_add_return\n\ add %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -320,12 +320,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%2 # atomic64_sub_return\n\ subf %0,%1,%0\n\ stdcx. %0,0,%2 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -352,12 +352,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) long t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%1 # atomic64_inc_return\n\ addic %0,%0,1\n\ stdcx. %0,0,%1 \n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -394,12 +394,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) long t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_return\n\ addic %0,%0,-1\n\ stdcx. %0,0,%1\n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER : "=&r" (t) : "r" (&v->counter) : "cc", "xer", "memory"); @@ -419,13 +419,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) long t; __asm__ __volatile__( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n\ stdcx. %0,0,%1\n\ bne- 1b" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER "\n\ 2:" : "=&r" (t) : "r" (&v->counter) @@ -451,14 +451,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) long t; __asm__ __volatile__ ( - LWSYNC_ON_SMP + PPC_RELEASE_BARRIER "1: ldarx %0,0,%1 # atomic_add_unless\n\ cmpd 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" " stdcx. %0,0,%1 \n\ bne- 1b \n" - ISYNC_ON_SMP + PPC_ACQUIRE_BARRIER " subf %0,%2,%0 \n\ 2:" : "=&r" (t) |