summaryrefslogtreecommitdiff
path: root/fs/afs/write.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2018-04-06 14:17:26 +0100
committerDavid Howells <dhowells@redhat.com>2018-04-09 21:54:48 +0100
commit5a8132761609bd7e42db642d6f157140d5bf2ae8 (patch)
tree70230e95b522728ccf37c67bc69b70abc62df003 /fs/afs/write.c
parent76a5cb6fc1e22a2a316fb690fc4cdd5121d1c0ff (diff)
afs: Do better accretion of small writes on newly created content
Processes like ld that do lots of small writes that aren't necessarily contiguous result in a lot of small StoreData operations to the server, the idea being that if someone else changes the data on the server, we only write our changes over that and not the space between. Further, we don't want to write back empty space if we can avoid it to make it easier for the server to do sparse files. However, making lots of tiny RPC ops is a lot less efficient for the server than one big one because each op requires allocation of resources and the taking of locks, so we want to compromise a bit. Reduce the load by the following: (1) If a file is just created locally or has just been truncated with O_TRUNC locally, allow subsequent writes to the file to be merged with intervening space if that space doesn't cross an entire intervening page. (2) Don't flush the file on ->flush() but rather on ->release() if the file was open for writing. Just linking vmlinux.o, without this patch, looking in /proc/fs/afs/stats: file-wr : n=441 nb=513581204 and after the patch: file-wr : n=62 nb=513668555 there were 379 fewer StoreData RPC operations at the expense of an extra 87K being written. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/afs/write.c')
-rw-r--r--fs/afs/write.c32
1 files changed, 13 insertions, 19 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index eccc16198f68..160f6cc26c00 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -125,7 +125,12 @@ try_again:
page->index, priv);
goto flush_conflicting_write;
}
- if (to < f || from > t)
+ /* If the file is being filled locally, allow inter-write
+ * spaces to be merged into writes. If it's not, only write
+ * back what the user gives us.
+ */
+ if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
+ (to < f || from > t))
goto flush_conflicting_write;
if (from < f)
f = from;
@@ -419,7 +424,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
primary_page->index, priv);
- if (start >= final_page || to < PAGE_SIZE)
+ if (start >= final_page ||
+ (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
goto no_more;
start++;
@@ -440,9 +446,10 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
for (loop = 0; loop < n; loop++) {
- if (to != PAGE_SIZE)
- break;
page = pages[loop];
+ if (to != PAGE_SIZE &&
+ !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
+ break;
if (page->index > final_page)
break;
if (!trylock_page(page))
@@ -455,7 +462,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(page);
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
- if (f != 0) {
+ if (f != 0 &&
+ !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
unlock_page(page);
break;
}
@@ -741,20 +749,6 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
/*
- * Flush out all outstanding writes on a file opened for writing when it is
- * closed.
- */
-int afs_flush(struct file *file, fl_owner_t id)
-{
- _enter("");
-
- if ((file->f_mode & FMODE_WRITE) == 0)
- return 0;
-
- return vfs_fsync(file, 0);
-}
-
-/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
*/