summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 13:17:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-11 13:17:24 -0700
commit4e3408d9f71a70316ebe844c20ef0d7715281f84 (patch)
tree365f67fbcbe8e047a5fbead3db5d2e7ac20b3618 /arch/powerpc/include
parenta66a50054e46ec2a03244bc14c48b9125fcd75a7 (diff)
parent96910b6dc8a4fdb75e69f09f47b62d41743d36ba (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (32 commits) locking, m68k/asm-offsets: Rename signal defines locking: Inline spinlock code for all locking variants on s390 locking: Simplify spinlock inlining locking: Allow arch-inlined spinlocks locking: Move spinlock function bodies to header file locking, m68k: Calculate thread_info offset with asm offset locking, m68k/asm-offsets: Rename pt_regs offset defines locking, sparc: Rename __spin_try_lock() and friends locking, powerpc: Rename __spin_try_lock() and friends lockdep: Remove recursion stattistics lockdep: Simplify lock_stat seqfile code lockdep: Simplify lockdep_chains seqfile code lockdep: Simplify lockdep seqfile code lockdep: Fix missing entries in /proc/lock_chains lockdep: Fix missing entry in /proc/lock_stat lockdep: Fix memory usage info of BFS lockdep: Reintroduce generation count to make BFS faster lockdep: Deal with many similar locks lockdep: Introduce lockdep_assert_held() lockdep: Fix style nits ...
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/spinlock.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c3b193121f81..198266cf9e2d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
CLEAR_IO_SYNC;
- return __spin_trylock(lock) == 0;
+ return arch_spin_trylock(lock) == 0;
}
/*
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
- if (likely(__spin_trylock(lock) == 0))
+ if (likely(arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
CLEAR_IO_SYNC;
while (1) {
- if (likely(__spin_trylock(lock) == 0))
+ if (likely(arch_spin_trylock(lock) == 0))
break;
local_save_flags(flags_dis);
local_irq_restore(flags);
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long __read_trylock(raw_rwlock_t *rw)
+static inline long arch_read_trylock(raw_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long __write_trylock(raw_rwlock_t *rw)
+static inline long arch_write_trylock(raw_rwlock_t *rw)
{
long tmp, token;
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
while (1) {
- if (likely(__read_trylock(rw) > 0))
+ if (likely(arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
while (1) {
- if (likely(__write_trylock(rw) == 0))
+ if (likely(arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
- return __read_trylock(rw) > 0;
+ return arch_read_trylock(rw) > 0;
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
- return __write_trylock(rw) == 0;
+ return arch_write_trylock(rw) == 0;
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)