summaryrefslogtreecommitdiff
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-08 13:26:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-08 13:26:18 -0700
commit8aab6a27332bbf2abfcb35224738394e784d940b (patch)
tree2ab91f09238c948a18314487ec6a3574cd78ea32 /fs/dcache.c
parentb409624ad5a99c2e84df6657bd0f7931ac470d2d (diff)
vfs: reorganize dput() memory accesses
This is me being a bit OCD after all the dentry optimization work this merge window: profiles end up showing 'dput()' as a rather expensive operation, and there were two unrelated bad reasons for that. The first reason was reading d_lockref.count for debugging purposes, which touches the lockref cacheline (for reads) before really need to. More importantly, the debugging test in question is _wrong_, and has hidden bugs. It's true that we can only sleep when the count goes down to zero, but the test as-is hides the much more subtle bug that happens if we race with somebody else deleting the file. Anyway we _will_ touch that cacheline, but let's do it for a write and in the right routine (ie in "lockref_put_or_lock()") which annotates the costs better. So remove the misleading debug code. The other was an unnecessary access to the cacheline that contains the d_lru list, just to check whether we already were on the LRU list or not. This is exactly what we have d_flags for, so that we can avoid touching extra cache lines for the common case. So just add another bit for "is this dentry on the LRU". Finally, mark the tests properly likely/unlikely, so that the common fast-paths are dense in the instruction stream. This makes the profiles look much saner. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 761e31bacbc2..bf3c4f9569eb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -308,8 +308,9 @@ static void dentry_unlink_inode(struct dentry * dentry)
*/
static void dentry_lru_add(struct dentry *dentry)
{
- if (list_empty(&dentry->d_lru)) {
+ if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
spin_lock(&dcache_lru_lock);
+ dentry->d_flags |= DCACHE_LRU_LIST;
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
dentry_stat.nr_unused++;
@@ -320,7 +321,7 @@ static void dentry_lru_add(struct dentry *dentry)
static void __dentry_lru_del(struct dentry *dentry)
{
list_del_init(&dentry->d_lru);
- dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+ dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
dentry->d_sb->s_nr_dentry_unused--;
dentry_stat.nr_unused--;
}
@@ -341,6 +342,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
{
spin_lock(&dcache_lru_lock);
if (list_empty(&dentry->d_lru)) {
+ dentry->d_flags |= DCACHE_LRU_LIST;
list_add_tail(&dentry->d_lru, list);
dentry->d_sb->s_nr_dentry_unused++;
dentry_stat.nr_unused++;
@@ -509,24 +511,22 @@ relock:
*/
void dput(struct dentry *dentry)
{
- if (!dentry)
+ if (unlikely(!dentry))
return;
repeat:
- if (dentry->d_lockref.count == 1)
- might_sleep();
if (lockref_put_or_lock(&dentry->d_lockref))
return;
- if (dentry->d_flags & DCACHE_OP_DELETE) {
+ /* Unreachable? Get rid of it */
+ if (unlikely(d_unhashed(dentry)))
+ goto kill_it;
+
+ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
if (dentry->d_op->d_delete(dentry))
goto kill_it;
}
- /* Unreachable? Get rid of it */
- if (d_unhashed(dentry))
- goto kill_it;
-
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);