summaryrefslogtreecommitdiff
path: root/fs/nfs/pagelist.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:04 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-03-20 13:44:04 -0500
commitcd52ed35535ef443f08bf5cd3331d350272885b8 (patch)
tree135c3a80b21ce478816229dc82586b12754eb49f /fs/nfs/pagelist.c
parentb92dccf65bab3b6b7deb79ff3321dc256eb0f53b (diff)
NFS: Avoid races between writebacks and truncation
Currently, there is no serialisation between NFS asynchronous writebacks and truncation at the page level due to the fact that nfs_sync_inode() cannot lock the pages that it is about to write out. This means that it is possible to be flushing out data (and calling something like set_page_writeback()) while the page cache is busy evicting the page. Oops... Use the hooks provided in try_to_release_page() to ensure that dirty pages are always written back to storage before we evict them. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/pagelist.c')
-rw-r--r--fs/nfs/pagelist.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d53857b148e2..d6e076c9dbe1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -85,6 +85,10 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
atomic_set(&req->wb_complete, 0);
req->wb_index = page->index;
page_cache_get(page);
+ BUG_ON(PagePrivate(page));
+ BUG_ON(!PageLocked(page));
+ BUG_ON(page->mapping->host != inode);
+ SetPagePrivate(page);
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
@@ -147,8 +151,10 @@ void nfs_clear_page_writeback(struct nfs_page *req)
*/
void nfs_clear_request(struct nfs_page *req)
{
- if (req->wb_page) {
- page_cache_release(req->wb_page);
+ struct page *page = req->wb_page;
+ if (page != NULL) {
+ ClearPagePrivate(page);
+ page_cache_release(page);
req->wb_page = NULL;
}
}