diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 20:40:33 -0700 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 20:40:33 -0700 |
| commit | 5bc0b123dcb2bb65b0b1ec57e591459dcf583d3d (patch) | |
| tree | 6ee79d18fd716755d49d18c465c1b25fabc43597 /include/linux/spinlock.h | |
| parent | eefbc594abbb1b7e6e7eeadb65ae7c7538474210 (diff) | |
| parent | b36f4be3de1b123d8601de062e7dbfc904f305fb (diff) | |
Merge 3.11-rc6 into char-misc-next
We want these fixes in this tree.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/spinlock.h')
| -rw-r--r-- | include/linux/spinlock.h | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -117,9 +117,17 @@ do { \ #endif /*arch_spin_is_contended*/ #endif -/* The lock does not imply full memory barrier. */ -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK -static inline void smp_mb__after_lock(void) { smp_mb(); } +/* + * Despite its name it doesn't necessarily has to be a full barrier. + * It should only guarantee that a STORE before the critical section + * can not be reordered with a LOAD inside this section. + * spin_lock() is the one-way barrier, this LOAD can not escape out + * of the region. So the default implementation simply ensures that + * a STORE can not move into the critical section, smp_wmb() should + * serialize it with another STORE done by spin_lock(). + */ +#ifndef smp_mb__before_spinlock +#define smp_mb__before_spinlock() smp_wmb() #endif /** |
