summaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-11-13 16:23:44 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:26 -0500
commit8aca67f0ae2d8811165c22326825a645cc8e1b48 (patch)
tree19e82f4bc7b4f865a9dcf4744e7c224ea517ba10 /fs/nfs
parente6b3c4db6fbcd0d33720696f37790d6b8be12313 (diff)
SUNRPC: Fix a potential race in rpc_wake_up_task()
Use RCU to ensure that we can safely call rpc_finish_wakeup after we've called __rpc_do_wake_up_task. If not, there is a theoretical race, in which the rpc_task finishes executing, and gets freed first. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/read.c8
-rw-r--r--fs/nfs/write.c20
2 files changed, 23 insertions, 5 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c2e49c397a27..8b58bbf6e39e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -65,13 +65,19 @@ struct nfs_read_data *nfs_readdata_alloc(size_t len)
return p;
}
-static void nfs_readdata_free(struct nfs_read_data *p)
+static void nfs_readdata_rcu_free(struct rcu_head *head)
{
+ struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
+static void nfs_readdata_free(struct nfs_read_data *rdata)
+{
+ call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
+}
+
void nfs_readdata_release(void *data)
{
nfs_readdata_free(data);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 883dd4a1c157..29d88209199d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -102,13 +102,19 @@ struct nfs_write_data *nfs_commit_alloc(void)
return p;
}
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_rcu_free(struct rcu_head *head)
{
+ struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
+void nfs_commit_free(struct nfs_write_data *wdata)
+{
+ call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
+}
+
struct nfs_write_data *nfs_writedata_alloc(size_t len)
{
unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -131,13 +137,19 @@ struct nfs_write_data *nfs_writedata_alloc(size_t len)
return p;
}
-static void nfs_writedata_free(struct nfs_write_data *p)
+static void nfs_writedata_rcu_free(struct rcu_head *head)
{
+ struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
+static void nfs_writedata_free(struct nfs_write_data *wdata)
+{
+ call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
+}
+
void nfs_writedata_release(void *wdata)
{
nfs_writedata_free(wdata);
@@ -258,7 +270,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
io_error:
nfs_end_data_update(inode);
end_page_writeback(page);
- nfs_writedata_free(wdata);
+ nfs_writedata_release(wdata);
return written ? written : result;
}
@@ -1043,7 +1055,7 @@ out_bad:
while (!list_empty(&list)) {
data = list_entry(list.next, struct nfs_write_data, pages);
list_del(&data->pages);
- nfs_writedata_free(data);
+ nfs_writedata_release(data);
}
nfs_mark_request_dirty(req);
nfs_clear_page_writeback(req);