summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-03 16:10:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-03 16:10:43 -0800
commitd63a9788650fcd999b34584316afee6bd4378f19 (patch)
tree32135409a61fab9365621ad01adb58a8dc7c536d /include/linux
parent281422869942c19f05a08d4017c633d08d390938 (diff)
parent6e490b0106a2118ee4c37c37847454a5c2dc6e32 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking changes from Ingo Molnar: "The main changes in this cycle were: - More gradual enhancements to atomic ops: new atomic*_read_ctrl() ops, synchronize atomic_{read,set}() ordering requirements between architectures, add atomic_long_t bitops. (Peter Zijlstra) - Add _{relaxed|acquire|release}() variants for inc/dec atomics and use them in various locking primitives: mutex, rtmutex, mcs, rwsem. This enables weakly ordered architectures (such as arm64) to make use of more locking related optimizations. (Davidlohr Bueso) - Implement atomic[64]_{inc,dec}_relaxed() on ARM. (Will Deacon) - Futex kernel data cache footprint micro-optimization. (Rasmus Villemoes) - pvqspinlock runtime overhead micro-optimization. (Waiman Long) - misc smaller fixlets" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ARM, locking/atomics: Implement _relaxed variants of atomic[64]_{inc,dec} locking/rwsem: Use acquire/release semantics locking/mcs: Use acquire/release semantics locking/rtmutex: Use acquire/release semantics locking/mutex: Use acquire/release semantics locking/asm-generic: Add _{relaxed|acquire|release}() variants for inc/dec atomics atomic: Implement atomic_read_ctrl() atomic, arch: Audit atomic_{read,set}() atomic: Add atomic_long_t bitops futex: Force hot variables into a single cache line locking/pvqspinlock: Kick the PV CPU unconditionally when _Q_SLOW_VAL locking/osq: Relax atomic semantics locking/qrwlock: Rename ->lock to ->wait_lock locking/Documentation/lockstat: Fix typo - lokcing -> locking locking/atomics, cmpxchg: Privatize the inclusion of asm/cmpxchg.h
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atomic.h118
1 files changed, 117 insertions, 1 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 00a5763e850e..27e580d232ca 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -4,6 +4,15 @@
#include <asm/atomic.h>
#include <asm/barrier.h>
+#ifndef atomic_read_ctrl
+static inline int atomic_read_ctrl(const atomic_t *v)
+{
+ int val = atomic_read(v);
+ smp_read_barrier_depends(); /* Enforce control dependency. */
+ return val;
+}
+#endif
+
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
@@ -81,6 +90,30 @@
#endif
#endif /* atomic_add_return_relaxed */
+/* atomic_inc_return_relaxed */
+#ifndef atomic_inc_return_relaxed
+#define atomic_inc_return_relaxed atomic_inc_return
+#define atomic_inc_return_acquire atomic_inc_return
+#define atomic_inc_return_release atomic_inc_return
+
+#else /* atomic_inc_return_relaxed */
+
+#ifndef atomic_inc_return_acquire
+#define atomic_inc_return_acquire(...) \
+ __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return_release
+#define atomic_inc_return_release(...) \
+ __atomic_op_release(atomic_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(...) \
+ __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic_inc_return_relaxed */
+
/* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return
@@ -105,6 +138,30 @@
#endif
#endif /* atomic_sub_return_relaxed */
+/* atomic_dec_return_relaxed */
+#ifndef atomic_dec_return_relaxed
+#define atomic_dec_return_relaxed atomic_dec_return
+#define atomic_dec_return_acquire atomic_dec_return
+#define atomic_dec_return_release atomic_dec_return
+
+#else /* atomic_dec_return_relaxed */
+
+#ifndef atomic_dec_return_acquire
+#define atomic_dec_return_acquire(...) \
+ __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return_release
+#define atomic_dec_return_release(...) \
+ __atomic_op_release(atomic_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(...) \
+ __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic_dec_return_relaxed */
+
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
@@ -185,6 +242,31 @@
#endif
#endif /* atomic64_add_return_relaxed */
+/* atomic64_inc_return_relaxed */
+#ifndef atomic64_inc_return_relaxed
+#define atomic64_inc_return_relaxed atomic64_inc_return
+#define atomic64_inc_return_acquire atomic64_inc_return
+#define atomic64_inc_return_release atomic64_inc_return
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+#define atomic64_inc_return_acquire(...) \
+ __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return_release
+#define atomic64_inc_return_release(...) \
+ __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(...) \
+ __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_inc_return_relaxed */
+
+
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
@@ -209,6 +291,30 @@
#endif
#endif /* atomic64_sub_return_relaxed */
+/* atomic64_dec_return_relaxed */
+#ifndef atomic64_dec_return_relaxed
+#define atomic64_dec_return_relaxed atomic64_dec_return
+#define atomic64_dec_return_acquire atomic64_dec_return
+#define atomic64_dec_return_release atomic64_dec_return
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+#define atomic64_dec_return_acquire(...) \
+ __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return_release
+#define atomic64_dec_return_release(...) \
+ __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(...) \
+ __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_dec_return_relaxed */
+
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
@@ -451,11 +557,19 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
+#ifndef atomic64_read_ctrl
+static inline long long atomic64_read_ctrl(const atomic64_t *v)
+{
+ long long val = atomic64_read(v);
+ smp_read_barrier_depends(); /* Enforce control dependency. */
+ return val;
+}
+#endif
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
@@ -463,4 +577,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v)
}
#endif
+#include <asm-generic/atomic-long.h>
+
#endif /* _LINUX_ATOMIC_H */