summaryrefslogtreecommitdiff
path: root/include/asm-i386/spinlock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@macmini.osdl.org>2006-07-08 15:24:18 -0700
committerLinus Torvalds <torvalds@macmini.osdl.org>2006-07-08 15:24:18 -0700
commitb862f3b099f3ea672c7438c0b282ce8201d39dfc (patch)
tree62f8cc2dc2b1c9abb6364b16f3b218a04d121f3e /include/asm-i386/spinlock.h
parente2a3d40258fe20d205f8ed592e1e2c0d5529c2e1 (diff)
i386: improve and correct inline asm memory constraints
Use "+m" rather than a combination of "=m" and "m" for improved clarity and consistency. This also fixes some inlines that incorrectly didn't tell the compiler that they read the old value at all, potentially causing the compiler to generate bogus code. It appear that all of those potential bugs were hidden by the use of extra "volatile" specifiers on the data structures in question, though. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/spinlock.h')
-rw-r--r--include/asm-i386/spinlock.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 87c40f830653..d816c62a7a1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -65,7 +65,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
alternative_smp(
__raw_spin_lock_string,
__raw_spin_lock_string_up,
- "=m" (lock->slock) : : "memory");
+ "+m" (lock->slock) : : "memory");
}
/*
@@ -79,7 +79,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
alternative_smp(
__raw_spin_lock_string_flags,
__raw_spin_lock_string_up,
- "=m" (lock->slock) : "r" (flags) : "memory");
+ "+m" (lock->slock) : "r" (flags) : "memory");
}
#endif
@@ -88,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->slock)
+ :"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");
return oldval > 0;
}
@@ -104,7 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#define __raw_spin_unlock_string \
"movb $1,%0" \
- :"=m" (lock->slock) : : "memory"
+ :"+m" (lock->slock) : : "memory"
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
#define __raw_spin_unlock_string \
"xchgb %b0, %1" \
- :"=q" (oldval), "=m" (lock->slock) \
+ :"=q" (oldval), "+m" (lock->slock) \
:"0" (oldval) : "memory"
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -199,13 +199,13 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
- : "=m" (rw->lock) : : "memory");
+ : "+m" (rw->lock) : : "memory");
}
#endif /* __ASM_SPINLOCK_H */