summaryrefslogtreecommitdiff
path: root/fs/nfsd/nfs4proc.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2012-11-15 14:52:19 -0500
committerJ. Bruce Fields <bfields@redhat.com>2012-11-26 09:08:15 -0500
commitffe1137ba743cdf1c2414d5a89690aec1daa6bba (patch)
treeb774cb101e03779db6dd569a28b3e5d7d4bf06b1 /fs/nfsd/nfs4proc.c
parent70cc7f75b1ee4161dfdea1012223db25712ab1a5 (diff)
nfsd4: delay filling in write iovec array till after xdr decoding
Our server rejects compounds containing more than one write operation. It's unclear whether this is really permitted by the spec; with 4.0, it's possibly OK, with 4.1 (which has clearer limits on compound parameters), it's probably not OK. No client that we're aware of has ever done this, but in theory it could be useful. The source of the limitation: we need an array of iovecs to pass to the write operation. In the worst case that array of iovecs could have hundreds of elements (the maximum rwsize divided by the page size), so it's too big to put on the stack, or in each compound op. So we instead keep a single such array in the compound argument. We fill in that array at the time we decode the xdr operation. But we decode every op in the compound before executing any of them. So once we've used that array we can't decode another write. If we instead delay filling in that array till the time we actually perform the write, we can reuse it. Another option might be to switch to decoding compound ops one at a time. I considered doing that, but it has a number of other side effects, and I'd rather fix just this one problem for now. Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4proc.c')
-rw-r--r--fs/nfsd/nfs4proc.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 1d2396b79574..87d24e5f3ca4 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -881,6 +881,24 @@ out:
return status;
}
+static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
+{
+ int i = 1;
+ int buflen = write->wr_buflen;
+
+ vec[0].iov_base = write->wr_head.iov_base;
+ vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
+ buflen -= vec[0].iov_len;
+
+ while (buflen) {
+ vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
+ vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
+ buflen -= vec[i].iov_len;
+ i++;
+ }
+ return i;
+}
+
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_write *write)
@@ -889,6 +907,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file *filp = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
+ int nvecs;
/* no need to check permission - this will be done in nfsd_write() */
@@ -911,8 +930,11 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
write->wr_how_written = write->wr_stable_how;
gen_boot_verifier(&write->wr_verifier);
+ nvecs = fill_in_write_vector(rqstp->rq_vec, write);
+ WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
+
status = nfsd_write(rqstp, &cstate->current_fh, filp,
- write->wr_offset, rqstp->rq_vec, write->wr_vlen,
+ write->wr_offset, rqstp->rq_vec, nvecs,
&cnt, &write->wr_how_written);
if (filp)
fput(filp);