summaryrefslogtreecommitdiff
path: root/include/linux/lockref.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 08:08:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-03 08:08:21 -0700
commitfc6d0b037678f50014ef409c92c5bedc01208fcd (patch)
treeb7de25e97b03c31ea6c5f2540f641b0be2c91832 /include/linux/lockref.h
parent6e4664525b1db28f8c4e1130957f70a94c19213e (diff)
parentbc08b449ee14ace4d869adaa1bb35a44ce68d775 (diff)
Merge branch 'lockref' (locked reference counts)
Merge lockref infrastructure code by me and Waiman Long. I already merged some of the preparatory patches that didn't actually do any semantic changes earlier, but this merges the actual _reason_ for those preparatory patches. The "lockref" structure is a combination "spinlock and reference count" that allows optimized reference count accesses. In particular, it guarantees that the reference count will be updated AS IF the spinlock was held, but using atomic accesses that cover both the reference count and the spinlock words, we can often do the update without actually having to take the lock. This allows us to avoid the nastiest cases of spinlock contention on large machines under heavy pathname lookup loads. When updating the dentry reference counts on a large system, we'll still end up with the cache line bouncing around, but that's much less noticeable than actually having to spin waiting for the lock. * lockref: lockref: implement lockless reference count updates using cmpxchg() lockref: uninline lockref helper functions vfs: reimplement d_rcu_to_refcount() using lockref_get_or_lock() vfs: use lockref_get_not_zero() for optimistic lockless dget_parent() lockref: add 'lockref_get_or_lock() helper
Diffstat (limited to 'include/linux/lockref.h')
-rw-r--r--include/linux/lockref.h61
1 files changed, 13 insertions, 48 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 01233e01627a..ca07b5028b01 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -17,55 +17,20 @@
#include <linux/spinlock.h>
struct lockref {
- spinlock_t lock;
- unsigned int count;
+ union {
+#ifdef CONFIG_CMPXCHG_LOCKREF
+ aligned_u64 lock_count;
+#endif
+ struct {
+ spinlock_t lock;
+ unsigned int count;
+ };
+ };
};
-/**
- * lockref_get - Increments reference count unconditionally
- * @lockcnt: pointer to lockref structure
- *
- * This operation is only valid if you already hold a reference
- * to the object, so you know the count cannot be zero.
- */
-static inline void lockref_get(struct lockref *lockref)
-{
- spin_lock(&lockref->lock);
- lockref->count++;
- spin_unlock(&lockref->lock);
-}
-
-/**
- * lockref_get_not_zero - Increments count unless the count is 0
- * @lockcnt: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count is 0
- */
-static inline int lockref_get_not_zero(struct lockref *lockref)
-{
- int retval = 0;
-
- spin_lock(&lockref->lock);
- if (lockref->count) {
- lockref->count++;
- retval = 1;
- }
- spin_unlock(&lockref->lock);
- return retval;
-}
-
-/**
- * lockref_put_or_lock - decrements count unless count <= 1 before decrement
- * @lockcnt: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
- */
-static inline int lockref_put_or_lock(struct lockref *lockref)
-{
- spin_lock(&lockref->lock);
- if (lockref->count <= 1)
- return 0;
- lockref->count--;
- spin_unlock(&lockref->lock);
- return 1;
-}
+extern void lockref_get(struct lockref *);
+extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_get_or_lock(struct lockref *);
+extern int lockref_put_or_lock(struct lockref *);
#endif /* __LINUX_LOCKREF_H */