summaryrefslogtreecommitdiff
path: root/fs/ntfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs/file.c')
-rw-r--r--fs/ntfs/file.c46
1 files changed, 21 insertions, 25 deletions
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2e42c2dcae12..ae2fe0016d2c 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -509,7 +509,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u32 attr_rec_len = 0;
unsigned blocksize, u;
int err, mp_size;
- BOOL rl_write_locked, was_hole, is_retry;
+ bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits;
struct {
u8 runlist_merged:1;
@@ -543,13 +543,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
return -ENOMEM;
}
} while (++u < nr_pages);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
err = 0;
vcn = lcn = -1;
vcn_len = 0;
lcn_block = -1;
- was_hole = FALSE;
+ was_hole = false;
cpos = pos >> vol->cluster_size_bits;
end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
@@ -760,7 +760,7 @@ map_buffer_cached:
}
continue;
}
- is_retry = FALSE;
+ is_retry = false;
if (!rl) {
down_read(&ni->runlist.lock);
retry_remap:
@@ -776,7 +776,7 @@ retry_remap:
* Successful remap, setup the map cache and
* use that to deal with the buffer.
*/
- was_hole = FALSE;
+ was_hole = false;
vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits -
@@ -792,7 +792,7 @@ retry_remap:
if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else
up_read(&ni->runlist.lock);
rl = NULL;
@@ -818,13 +818,13 @@ retry_remap:
*/
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL);
if (likely(!err)) {
- is_retry = TRUE;
+ is_retry = true;
goto retry_remap;
}
/*
@@ -903,7 +903,7 @@ rl_not_mapped_enoent:
if (!rl_write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
/* Find the previous last allocated cluster. */
@@ -917,7 +917,7 @@ rl_not_mapped_enoent:
}
}
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
- FALSE);
+ false);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.",
@@ -1093,7 +1093,7 @@ rl_not_mapped_enoent:
status.mft_attr_mapped = 0;
status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */
- was_hole = TRUE;
+ was_hole = true;
vcn = bh_cpos;
vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
@@ -1105,7 +1105,7 @@ rl_not_mapped_enoent:
*/
if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
}
goto map_buffer_cached;
@@ -1117,7 +1117,7 @@ rl_not_mapped_enoent:
if (likely(!err)) {
if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else if (unlikely(rl))
up_read(&ni->runlist.lock);
rl = NULL;
@@ -1528,19 +1528,19 @@ static inline int ntfs_commit_pages_after_non_resident_write(
do {
s64 bh_pos;
struct page *page;
- BOOL partial;
+ bool partial;
page = pages[u];
bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
- partial = FALSE;
+ partial = false;
do {
s64 bh_end;
bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh))
- partial = TRUE;
+ partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
@@ -1997,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
*/
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
- vol->cluster_size_bits, FALSE);
+ vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
status = -EIO;
@@ -2176,20 +2176,18 @@ out:
/**
* ntfs_file_aio_write -
*/
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
- size_t count, loff_t pos)
+static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
- struct iovec local_iov = { .iov_base = (void __user *)buf,
- .iov_len = count };
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
- ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
+ ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, pos, ret);
@@ -2298,13 +2296,11 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
const struct file_operations ntfs_file_ops = {
.llseek = generic_file_llseek, /* Seek inside file. */
- .read = generic_file_read, /* Read from file. */
+ .read = do_sync_read, /* Read from file. */
.aio_read = generic_file_aio_read, /* Async read from file. */
- .readv = generic_file_readv, /* Read from file. */
#ifdef NTFS_RW
.write = ntfs_file_write, /* Write to file. */
.aio_write = ntfs_file_aio_write, /* Async write to file. */
- .writev = ntfs_file_writev, /* Write to file. */
/*.release = ,*/ /* Last file is closed. See
fs/ext2/file.c::
ext2_release_file() for