summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic-long.h56
-rw-r--r--include/asm-generic/atomic.h4
-rw-r--r--include/asm-generic/mutex-dec.h8
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--include/asm-generic/qrwlock_types.h4
-rw-r--r--include/asm-generic/rwsem.h21
6 files changed, 60 insertions, 43 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index a94cbebbc33d..f91093c2984c 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t;
#endif
#define ATOMIC_LONG_READ_OP(mo) \
-static inline long atomic_long_read##mo(atomic_long_t *l) \
+static inline long atomic_long_read##mo(const atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
@@ -43,6 +43,7 @@ static inline long atomic_long_read##mo(atomic_long_t *l) \
}
ATOMIC_LONG_READ_OP()
ATOMIC_LONG_READ_OP(_acquire)
+ATOMIC_LONG_READ_OP(_ctrl)
#undef ATOMIC_LONG_READ_OP
@@ -112,19 +113,23 @@ static inline void atomic_long_dec(atomic_long_t *l)
ATOMIC_LONG_PFX(_dec)(v);
}
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
- ATOMIC_LONG_PFX(_add)(i, v);
+#define ATOMIC_LONG_OP(op) \
+static inline void \
+atomic_long_##op(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_##op)(i, v); \
}
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ATOMIC_LONG_OP(add)
+ATOMIC_LONG_OP(sub)
+ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(or)
+ATOMIC_LONG_OP(xor)
+ATOMIC_LONG_OP(andnot)
- ATOMIC_LONG_PFX(_sub)(i, v);
-}
+#undef ATOMIC_LONG_OP
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
@@ -154,19 +159,24 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
return ATOMIC_LONG_PFX(_add_negative)(i, v);
}
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
- return (long)ATOMIC_LONG_PFX(_inc_return)(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
- return (long)ATOMIC_LONG_PFX(_dec_return)(v);
+#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
}
+ATOMIC_LONG_INC_DEC_OP(inc,)
+ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_INC_DEC_OP(dec,)
+ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_INC_DEC_OP(dec, _release)
+
+#undef ATOMIC_LONG_INC_DEC_OP
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index d4d7e337fdcb..74f1a3704d7a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^)
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#endif
/**
@@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^)
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#include <linux/irqflags.h>
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index d4f9fb4e53df..fd694cfd678a 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -20,7 +20,7 @@
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
fail_fn(count);
}
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
return -1;
return 0;
}
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_inc_return(count) <= 0))
+ if (unlikely(atomic_inc_return_release(count) <= 0))
fail_fn(count);
}
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
return 1;
return 0;
}
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index f169ec064785..a6b4a7bd6ac9 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* to ensure that any waiting tasks are woken up by the
* unlock slow path.
*/
- if (likely(atomic_xchg(count, -1) != 1))
+ if (likely(atomic_xchg_acquire(count, -1) != 1))
fail_fn(count);
}
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_xchg(count, 0) != 1))
+ if (unlikely(atomic_xchg_acquire(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1))
return -1;
return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_xchg(count, 1) != 0))
+ if (unlikely(atomic_xchg_release(count, 1) != 0))
fail_fn(count);
}
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- int prev = atomic_xchg(count, 0);
+ int prev = atomic_xchg_acquire(count, 0);
if (unlikely(prev < 0)) {
/*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
* owner's unlock path needlessly, but that's not a problem
* in practice. ]
*/
- prev = atomic_xchg(count, prev);
+ prev = atomic_xchg_acquire(count, prev);
if (prev < 0)
prev = 0;
}
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 4d76f24df518..0abc6b6062fb 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,12 @@
typedef struct qrwlock {
atomic_t cnts;
- arch_spinlock_t lock;
+ arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
.cnts = ATOMIC_INIT(0), \
- .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d48bf5a95cc1..d6d5dc98d7da 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -33,7 +33,7 @@
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+ if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
long tmp;
while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg(&sem->count, tmp,
+ if (tmp == cmpxchg_acquire(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
- tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+ tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+ /*
+ * When downgrading from exclusive to shared ownership,
+ * anything inside the write-locked region cannot leak
+ * into the read side. In contrast, anything in the
+ * read-locked region is ok to be re-ordered into the
+ * write side. As such, rely on RELEASE semantics.
+ */
+ tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);