summaryrefslogtreecommitdiff
path: root/fs/nfsd
diff options
context:
space:
mode:
authorBenny Halevy <bhalevy@primarydata.com>2014-05-30 09:09:27 -0400
committerJ. Bruce Fields <bfields@redhat.com>2014-06-04 15:41:52 -0400
commit931ee56c67573eb4e51c8a4e78598d965b8b059e (patch)
treec9a2dd61a536acd1110ee611c1996cd6b57783ac /fs/nfsd
parenta832e7ae8b4f71fc522089d83fd445693aa7dbc5 (diff)
nfsd4: use recall_lock for delegation hashing
This fixes a bug in the handling of the fi_delegations list. nfs4_setlease does not hold the recall_lock when adding to it. The client_mutex is held, which prevents against concurrent list changes, but nfsd_break_deleg_cb does not hold while walking it. New delegations could theoretically creep onto the list while we're walking it there. Signed-off-by: Benny Halevy <bhalevy@primarydata.com> Signed-off-by: Jeff Layton <jlayton@primarydata.com> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/nfs4state.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 01a0e4384500..05ca64c4599d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -433,12 +433,21 @@ static void unhash_stid(struct nfs4_stid *s)
s->sc_type = 0;
}
+static void
+hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+{
+ lockdep_assert_held(&recall_lock);
+
+ list_add(&dp->dl_perfile, &fp->fi_delegations);
+ list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
+}
+
/* Called under the state lock. */
static void
unhash_delegation(struct nfs4_delegation *dp)
{
- list_del_init(&dp->dl_perclnt);
spin_lock(&recall_lock);
+ list_del_init(&dp->dl_perclnt);
list_del_init(&dp->dl_perfile);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&recall_lock);
@@ -3065,11 +3074,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
if (status)
goto out_free;
- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
fp->fi_lease = fl;
fp->fi_deleg_file = get_file(fl->fl_file);
atomic_set(&fp->fi_delegees, 1);
- list_add(&dp->dl_perfile, &fp->fi_delegations);
+ spin_lock(&recall_lock);
+ hash_delegation_locked(dp, fp);
+ spin_unlock(&recall_lock);
return 0;
out_free:
locks_free_lock(fl);
@@ -3090,9 +3100,8 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
spin_unlock(&recall_lock);
return -EAGAIN;
}
- list_add(&dp->dl_perfile, &fp->fi_delegations);
+ hash_delegation_locked(dp, fp);
spin_unlock(&recall_lock);
- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0;
}
@@ -4903,6 +4912,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
struct nfs4_delegation *dp, *next;
u64 count = 0;
+ lockdep_assert_held(&recall_lock);
list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
if (victims)
list_move(&dp->dl_recall_lru, victims);