summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c2
-rw-r--r--fs/Kconfig27
-rw-r--r--fs/cifs/cifs_debug.c8
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifssmb.c5
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/inode.c9
-rw-r--r--fs/ext3/dir.c3
-rw-r--r--fs/ext3/inode.c32
-rw-r--r--fs/jbd/journal.c26
-rw-r--r--fs/jbd/transaction.c68
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/nfs/client.c18
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/nfs4_fs.h6
-rw-r--r--fs/nfs/nfs4proc.c40
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nilfs2/Kconfig25
-rw-r--r--fs/notify/Kconfig12
-rw-r--r--fs/notify/dnotify/Kconfig2
-rw-r--r--fs/notify/fsnotify.c4
-rw-r--r--fs/notify/inotify/Kconfig2
-rw-r--r--fs/notify/inotify/inotify_user.c109
-rw-r--r--fs/notify/notification.c19
-rw-r--r--fs/pipe.c4
25 files changed, 250 insertions, 190 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 6fcb1e7095cf..92828281a30b 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -57,7 +57,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
buffer = kmap(page);
offset = page_offset(page);
- retval = v9fs_file_readn(filp, buffer, NULL, offset, PAGE_CACHE_SIZE);
+ retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
if (retval < 0)
goto done;
diff --git a/fs/Kconfig b/fs/Kconfig
index a97263be6a91..0e7da7bb5d93 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -186,32 +186,7 @@ source "fs/romfs/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
-
-config NILFS2_FS
- tristate "NILFS2 file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- select CRC32
- help
- NILFS2 is a log-structured file system (LFS) supporting continuous
- snapshotting. In addition to versioning capability of the entire
- file system, users can even restore files mistakenly overwritten or
- destroyed just a few seconds ago. Since this file system can keep
- consistency like conventional LFS, it achieves quick recovery after
- system crashes.
-
- NILFS2 creates a number of checkpoints every few seconds or per
- synchronous write basis (unless there is no change). Users can
- select significant versions among continuously created checkpoints,
- and can change them into snapshots which will be preserved for long
- periods until they are changed back to checkpoints. Each
- snapshot is mountable as a read-only file system concurrently with
- its writable mount, and this feature is convenient for online backup.
-
- Some features including atime, extended attributes, and POSIX ACLs,
- are not supported yet.
-
- To compile this file system support as a module, choose M here: the
- module will be called nilfs2. If unsure, say N.
+source "fs/nilfs2/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 7f19fefd3d45..42cec2a7c0cf 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -261,6 +261,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&tcon->num_reads, 0);
atomic_set(&tcon->num_oplock_brks, 0);
atomic_set(&tcon->num_opens, 0);
+ atomic_set(&tcon->num_posixopens, 0);
+ atomic_set(&tcon->num_posixmkdirs, 0);
atomic_set(&tcon->num_closes, 0);
atomic_set(&tcon->num_deletes, 0);
atomic_set(&tcon->num_mkdirs, 0);
@@ -347,11 +349,15 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
atomic_read(&tcon->num_locks),
atomic_read(&tcon->num_hardlinks),
atomic_read(&tcon->num_symlinks));
- seq_printf(m, "\nOpens: %d Closes: %d"
+ seq_printf(m, "\nOpens: %d Closes: %d "
"Deletes: %d",
atomic_read(&tcon->num_opens),
atomic_read(&tcon->num_closes),
atomic_read(&tcon->num_deletes));
+ seq_printf(m, "\nPosix Opens: %d "
+ "Posix Mkdirs: %d",
+ atomic_read(&tcon->num_posixopens),
+ atomic_read(&tcon->num_posixmkdirs));
seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
atomic_read(&tcon->num_mkdirs),
atomic_read(&tcon->num_rmdirs));
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 63f6cdfa5638..6084d6379c03 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -260,6 +260,8 @@ struct cifsTconInfo {
atomic_t num_closes;
atomic_t num_deletes;
atomic_t num_mkdirs;
+ atomic_t num_posixopens;
+ atomic_t num_posixmkdirs;
atomic_t num_rmdirs;
atomic_t num_renames;
atomic_t num_t2renames;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 922f5fe2084c..1866bc2927d4 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1113,7 +1113,10 @@ PsxCreat:
psx_create_err:
cifs_buf_release(pSMB);
- cifs_stats_inc(&tcon->num_mkdirs);
+ if (posix_flags & SMB_O_DIRECTORY)
+ cifs_stats_inc(&tcon->num_posixmkdirs);
+ else
+ cifs_stats_inc(&tcon->num_posixopens);
if (rc == -EAGAIN)
goto PsxCreat;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e16d7592116a..fc44d316d0bb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2452,10 +2452,10 @@ try_mount_again:
tcon->local_lease = volume_info->local_lease;
}
if (pSesInfo) {
- if (pSesInfo->capabilities & CAP_LARGE_FILES) {
- sb->s_maxbytes = (u64) 1 << 63;
- } else
- sb->s_maxbytes = (u64) 1 << 31; /* 2 GB */
+ if (pSesInfo->capabilities & CAP_LARGE_FILES)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ else
+ sb->s_maxbytes = MAX_NON_LFS;
}
/* BB FIXME fix time_gran to be larger for LANMAN sessions */
@@ -2726,6 +2726,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
/* mostly informational -- no need to fail on error here */
+ kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
bytes_left, is_unicode,
nls_codepage);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 18afe57b2461..82d83839655e 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -212,7 +212,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
* junction to the new submount (ie to setup the fake directory
* which represents a DFS referral).
*/
-void
+static void
cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -388,7 +388,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
}
/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
-void
+static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
@@ -513,9 +513,12 @@ int cifs_get_inode_info(struct inode **pinode,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1) {
- /* BB EOPNOSUPP disable SERVER_INUM? */
cFYI(1, ("GetSrvInodeNum rc %d", rc1));
fattr.cf_uniqueid = iunique(sb, ROOT_I);
+ /* disable serverino if call not supported */
+ if (rc1 == -EINVAL)
+ cifs_sb->mnt_cifs_flags &=
+ ~CIFS_MOUNT_SERVER_INUM;
}
} else {
fattr.cf_uniqueid = iunique(sb, ROOT_I);
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 3d724a95882f..373fa90c796a 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -130,8 +130,7 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
- err = ext3_get_blocks_handle(NULL, inode, blk, 1,
- &map_bh, 0, 0);
+ err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5f51fed5c750..b49908a167ae 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -788,7 +788,7 @@ err_out:
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks,
struct buffer_head *bh_result,
- int create, int extend_disksize)
+ int create)
{
int err = -EIO;
int offsets[4];
@@ -911,13 +911,6 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
if (!err)
err = ext3_splice_branch(handle, inode, iblock,
partial, indirect_blks, count);
- /*
- * i_disksize growing is protected by truncate_mutex. Don't forget to
- * protect it if you're about to implement concurrent
- * ext3_get_block() -bzzz
- */
- if (!err && extend_disksize && inode->i_size > ei->i_disksize)
- ei->i_disksize = inode->i_size;
mutex_unlock(&ei->truncate_mutex);
if (err)
goto cleanup;
@@ -972,7 +965,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock,
}
ret = ext3_get_blocks_handle(handle, inode, iblock,
- max_blocks, bh_result, create, 0);
+ max_blocks, bh_result, create);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
@@ -1005,7 +998,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
err = ext3_get_blocks_handle(handle, inode, block, 1,
- &dummy, create, 1);
+ &dummy, create);
/*
* ext3_get_blocks_handle() returns number of blocks
* mapped. 0 in case of a HOLE.
@@ -1193,15 +1186,16 @@ write_begin_failed:
* i_size_read because we hold i_mutex.
*
* Add inode to orphan list in case we crash before truncate
- * finishes.
+ * finishes. Do this only if ext3_can_truncate() agrees so
+ * that orphan processing code is happy.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ext3_journal_stop(handle);
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
}
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -1287,7 +1281,7 @@ static int ext3_ordered_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ret2 = ext3_journal_stop(handle);
if (!ret)
@@ -1296,7 +1290,7 @@ static int ext3_ordered_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
@@ -1315,14 +1309,14 @@ static int ext3_writeback_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
ret = ext3_journal_stop(handle);
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
@@ -1358,7 +1352,7 @@ static int ext3_journalled_write_end(struct file *file,
* There may be allocated blocks outside of i_size because
* we failed to copy some data. Prepare for truncate.
*/
- if (pos + len > inode->i_size)
+ if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
if (inode->i_size > EXT3_I(inode)->i_disksize) {
@@ -1375,7 +1369,7 @@ static int ext3_journalled_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
+ ext3_truncate(inode);
return ret ? ret : copied;
}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 737f7246a4b5..f96f85092d1c 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -287,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
struct page *new_page;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
+ journal_t *journal = transaction->t_journal;
/*
* The buffer really shouldn't be locked: only the current committing
@@ -300,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+ /* keep subsequent assertions sane */
+ new_bh->b_state = 0;
+ init_buffer(new_bh, NULL, NULL);
+ atomic_set(&new_bh->b_count, 1);
+ new_jh = journal_add_journal_head(new_bh); /* This sleeps */
/*
* If a new transaction has already done a buffer copy-out, then
@@ -361,14 +367,6 @@ repeat:
kunmap_atomic(mapped_data, KM_USER0);
}
- /* keep subsequent assertions sane */
- new_bh->b_state = 0;
- init_buffer(new_bh, NULL, NULL);
- atomic_set(&new_bh->b_count, 1);
- jbd_unlock_bh_state(bh_in);
-
- new_jh = journal_add_journal_head(new_bh); /* This sleeps */
-
set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -385,7 +383,11 @@ repeat:
* copying is moved to the transaction's shadow queue.
*/
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
- journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_lock(&journal->j_list_lock);
+ __journal_file_buffer(jh_in, transaction, BJ_Shadow);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh_in);
+
JBUFFER_TRACE(new_jh, "file as BJ_IO");
journal_file_buffer(new_jh, transaction, BJ_IO);
@@ -848,6 +850,12 @@ static int journal_reset(journal_t *journal)
first = be32_to_cpu(sb->s_first);
last = be32_to_cpu(sb->s_maxlen);
+ if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
+ printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n",
+ first, last);
+ journal_fail_superblock(journal);
+ return -EINVAL;
+ }
journal->j_first = first;
journal->j_last = last;
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 73242ba7c7b1..c03ac11f74be 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -489,34 +489,15 @@ void journal_unlock_updates (journal_t *journal)
wake_up(&journal->j_wait_transaction_locked);
}
-/*
- * Report any unexpected dirty buffers which turn up. Normally those
- * indicate an error, but they can occur if the user is running (say)
- * tune2fs to modify the live filesystem, so we need the option of
- * continuing as gracefully as possible. #
- *
- * The caller should already hold the journal lock and
- * j_list_lock spinlock: most callers will need those anyway
- * in order to probe the buffer's journaling state safely.
- */
-static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+static void warn_dirty_buffer(struct buffer_head *bh)
{
- int jlist;
-
- /* If this buffer is one which might reasonably be dirty
- * --- ie. data, or not part of this journal --- then
- * we're OK to leave it alone, but otherwise we need to
- * move the dirty bit to the journal's own internal
- * JBDDirty bit. */
- jlist = jh->b_jlist;
+ char b[BDEVNAME_SIZE];
- if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
- jlist == BJ_Shadow || jlist == BJ_Forget) {
- struct buffer_head *bh = jh2bh(jh);
-
- if (test_clear_buffer_dirty(bh))
- set_buffer_jbddirty(bh);
- }
+ printk(KERN_WARNING
+ "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
+ "There's a risk of filesystem corruption in case of system "
+ "crash.\n",
+ bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
}
/*
@@ -583,14 +564,16 @@ repeat:
if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction);
+ warn_dirty_buffer(bh);
}
/*
* In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race
* with running write-out.
*/
- JBUFFER_TRACE(jh, "Unexpected dirty buffer");
- jbd_unexpected_dirty_buffer(jh);
+ JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ clear_buffer_dirty(bh);
+ set_buffer_jbddirty(bh);
}
unlock_buffer(bh);
@@ -826,6 +809,15 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) {
+ /*
+ * Previous journal_forget() could have left the buffer
+ * with jbddirty bit set because it was being committed. When
+ * the commit finished, we've filed the buffer for
+ * checkpointing and marked it dirty. Now we are reallocating
+ * the buffer so the transaction freeing it must have
+ * committed and so it's safe to clear the dirty bit.
+ */
+ clear_buffer_dirty(jh2bh(jh));
jh->b_transaction = transaction;
/* first access by this transaction */
@@ -1782,8 +1774,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction");
+ /*
+ * We don't want to write the buffer anymore, clear the
+ * bit so that we don't confuse checks in
+ * __journal_file_buffer
+ */
+ clear_buffer_dirty(bh);
__journal_file_buffer(jh, transaction, BJ_Forget);
- clear_buffer_jbddirty(bh);
may_free = 0;
} else {
JBUFFER_TRACE(jh, "on running transaction");
@@ -2041,12 +2038,17 @@ void __journal_file_buffer(struct journal_head *jh,
if (jh->b_transaction && jh->b_jlist == jlist)
return;
- /* The following list of buffer states needs to be consistent
- * with __jbd_unexpected_dirty_buffer()'s handling of dirty
- * state. */
-
if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) {
+ /*
+ * For metadata buffers, we track dirty bit in buffer_jbddirty
+ * instead of buffer_dirty. We should not see a dirty bit set
+ * here because we clear it in do_get_write_access but e.g.
+ * tune2fs can modify the sb and set the dirty bit at any time
+ * so we try to gracefully handle that.
+ */
+ if (buffer_dirty(bh))
+ warn_dirty_buffer(bh);
if (test_clear_buffer_dirty(bh) ||
test_clear_buffer_jbddirty(bh))
was_dirty = 1;
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 91fa3ad6e8c2..a29c7c3e3fb8 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -67,10 +67,8 @@ static struct posix_acl *jfs_get_acl(struct inode *inode, int type)
acl = posix_acl_from_xattr(value, size);
}
kfree(value);
- if (!IS_ERR(acl)) {
+ if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
- posix_acl_release(acl);
- }
return acl;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c2d061675d80..8d25ccb2d51d 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1242,20 +1242,6 @@ error:
return error;
}
-/*
- * Initialize a session.
- * Note: save the mount rsize and wsize for create_server negotiation.
- */
-static void nfs4_init_session(struct nfs_client *clp,
- unsigned int wsize, unsigned int rsize)
-{
-#if defined(CONFIG_NFS_V4_1)
- if (nfs4_has_session(clp)) {
- clp->cl_session->fc_attrs.max_rqst_sz = wsize;
- clp->cl_session->fc_attrs.max_resp_sz = rsize;
- }
-#endif /* CONFIG_NFS_V4_1 */
-}
/*
* Session has been established, and the client marked ready.
@@ -1350,7 +1336,9 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
- nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+ error = nfs4_init_session(server);
+ if (error < 0)
+ goto error;
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 38d42c29fb92..32062c33c859 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
- case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
+ /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 61bc3a32e1e2..6ea07a3c75d4 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -220,6 +220,7 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
extern int nfs4_proc_create_session(struct nfs_client *, int reset);
extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_init_session(struct nfs_server *server);
#else /* CONFIG_NFS_v4_1 */
static inline int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -227,6 +228,11 @@ static inline int nfs4_setup_sequence(struct nfs_client *clp,
{
return 0;
}
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff0c080db59b..6917311f201c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2040,15 +2040,9 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status;
nfs_fattr_init(info->fattr);
- status = nfs4_recover_expired_lease(server);
- if (!status)
- status = nfs4_check_client_ready(server->nfs_client);
- if (!status)
- status = nfs4_call_sync(server, &msg, &args, &res, 0);
- return status;
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4099,15 +4093,23 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
- if (IS_GETLK(cmd))
- return nfs4_proc_getlk(state, F_GETLK, request);
+ if (IS_GETLK(cmd)) {
+ if (state != NULL)
+ return nfs4_proc_getlk(state, F_GETLK, request);
+ return 0;
+ }
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
- if (request->fl_type == F_UNLCK)
- return nfs4_proc_unlck(state, cmd, request);
+ if (request->fl_type == F_UNLCK) {
+ if (state != NULL)
+ return nfs4_proc_unlck(state, cmd, request);
+ return 0;
+ }
+ if (state == NULL)
+ return -ENOLCK;
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4793,6 +4795,22 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
return status;
}
+int nfs4_init_session(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ int ret;
+
+ if (!nfs4_has_session(clp))
+ return 0;
+
+ clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
+ clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+ ret = nfs4_recover_expired_lease(server);
+ if (!ret)
+ ret = nfs4_check_client_ready(clp);
+ return ret;
+}
+
/*
* Renew the cl_session lease.
*/
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b73c5a728655..65ca8c18476f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -553,6 +553,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
INIT_LIST_HEAD(&lsp->ls_sequence.list);
lsp->ls_seqid.sequence = &lsp->ls_sequence;
atomic_set(&lsp->ls_count, 1);
+ lsp->ls_state = state;
lsp->ls_owner = fl_owner;
spin_lock(&clp->cl_lock);
nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
@@ -587,7 +588,6 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
if (lsp != NULL)
break;
if (new != NULL) {
- new->ls_state = state;
list_add(&new->ls_locks, &state->lock_states);
set_bit(LK_STATE_IN_USE, &state->flags);
lsp = new;
diff --git a/fs/nilfs2/Kconfig b/fs/nilfs2/Kconfig
new file mode 100644
index 000000000000..72da095d4009
--- /dev/null
+++ b/fs/nilfs2/Kconfig
@@ -0,0 +1,25 @@
+config NILFS2_FS
+ tristate "NILFS2 file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ select CRC32
+ help
+ NILFS2 is a log-structured file system (LFS) supporting continuous
+ snapshotting. In addition to versioning capability of the entire
+ file system, users can even restore files mistakenly overwritten or
+ destroyed just a few seconds ago. Since this file system can keep
+ consistency like conventional LFS, it achieves quick recovery after
+ system crashes.
+
+ NILFS2 creates a number of checkpoints every few seconds or per
+ synchronous write basis (unless there is no change). Users can
+ select significant versions among continuously created checkpoints,
+ and can change them into snapshots which will be preserved for long
+ periods until they are changed back to checkpoints. Each
+ snapshot is mountable as a read-only file system concurrently with
+ its writable mount, and this feature is convenient for online backup.
+
+ Some features including atime, extended attributes, and POSIX ACLs,
+ are not supported yet.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called nilfs2. If unsure, say N.
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 31dac7e3b0f1..dffbb0911d02 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,15 +1,5 @@
config FSNOTIFY
- bool "Filesystem notification backend"
- default y
- ---help---
- fsnotify is a backend for filesystem notification. fsnotify does
- not provide any userspace interface but does provide the basis
- needed for other notification schemes such as dnotify, inotify,
- and fanotify.
-
- Say Y here to enable fsnotify suport.
-
- If unsure, say Y.
+ def_bool n
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig
index 904ff8d5405a..f9c1ca139d8f 100644
--- a/fs/notify/dnotify/Kconfig
+++ b/fs/notify/dnotify/Kconfig
@@ -1,6 +1,6 @@
config DNOTIFY
bool "Dnotify support"
- depends on FSNOTIFY
+ select FSNOTIFY
default y
help
Dnotify is a directory-based per-fd file change notification system
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ec2f7bd76818..037e878e03fc 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -159,7 +159,9 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
if (!group->ops->should_send_event(group, to_tell, mask))
continue;
if (!event) {
- event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie);
+ event = fsnotify_create_event(to_tell, mask, data,
+ data_is, file_name, cookie,
+ GFP_KERNEL);
/* shit, we OOM'd and now we can't tell, maybe
* someday someone else will want to do something
* here */
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 5356884289a1..3e56dbffe729 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -15,7 +15,7 @@ config INOTIFY
config INOTIFY_USER
bool "Inotify support for userspace"
- depends on FSNOTIFY
+ select FSNOTIFY
default y
---help---
Say Y here to enable inotify support for userspace, including the
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff27a2965844..f30d9bbc2e1b 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -57,7 +57,6 @@ int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
-static struct fsnotify_event *inotify_ignored_event;
/*
* When inotify registers a new group it increments this and uses that
@@ -365,6 +364,17 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
+static void inotify_remove_from_idr(struct fsnotify_group *group,
+ struct inotify_inode_mark_entry *ientry)
+{
+ struct idr *idr;
+
+ spin_lock(&group->inotify_data.idr_lock);
+ idr = &group->inotify_data.idr;
+ idr_remove(idr, ientry->wd);
+ spin_unlock(&group->inotify_data.idr_lock);
+ ientry->wd = -1;
+}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
* internal reference help on the mark because it is in the idr.
@@ -373,13 +383,19 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group)
{
struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_event *ignored_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
- struct idr *idr;
+
+ ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
+ FSNOTIFY_EVENT_NONE, NULL, 0,
+ GFP_NOFS);
+ if (!ignored_event)
+ return;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
- event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
+ event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
@@ -388,7 +404,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv->group = group;
event_priv->wd = ientry->wd;
- fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
+ fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
/* did the private data get added? */
if (list_empty(&fsn_event_priv->event_list))
@@ -396,14 +412,16 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
skip_send_ignore:
+ /* matches the reference taken when the event was created */
+ fsnotify_put_event(ignored_event);
+
/* remove this entry from the idr */
- spin_lock(&group->inotify_data.idr_lock);
- idr = &group->inotify_data.idr;
- idr_remove(idr, ientry->wd);
- spin_unlock(&group->inotify_data.idr_lock);
+ inotify_remove_from_idr(group, ientry);
/* removed from idr, drop that reference */
fsnotify_put_mark(entry);
+
+ atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
@@ -418,6 +436,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
{
struct fsnotify_mark_entry *entry = NULL;
struct inotify_inode_mark_entry *ientry;
+ struct inotify_inode_mark_entry *tmp_ientry;
int ret = 0;
int add = (arg & IN_MASK_ADD);
__u32 mask;
@@ -428,54 +447,66 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
if (unlikely(!mask))
return -EINVAL;
- ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
- if (unlikely(!ientry))
+ tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_ientry))
return -ENOMEM;
/* we set the mask at the end after attaching it */
- fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
- ientry->wd = 0;
+ fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
+ tmp_ientry->wd = -1;
find_entry:
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
- kmem_cache_free(inotify_inode_mark_cachep, ientry);
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
} else {
- if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
- ret = -ENOSPC;
- goto out_err;
- }
-
- ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
- if (ret == -EEXIST)
- goto find_entry;
- else if (ret)
+ ret = -ENOSPC;
+ if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
-
- entry = &ientry->fsn_entry;
retry:
ret = -ENOMEM;
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
spin_lock(&group->inotify_data.idr_lock);
- /* if entry is added to the idr we keep the reference obtained
- * through fsnotify_mark_add. remember to drop this reference
- * when entry is removed from idr */
- ret = idr_get_new_above(&group->inotify_data.idr, entry,
- ++group->inotify_data.last_wd,
- &ientry->wd);
+ ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
+ group->inotify_data.last_wd,
+ &tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
+
+ ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
+ if (ret) {
+ inotify_remove_from_idr(group, tmp_ientry);
+ if (ret == -EEXIST)
+ goto find_entry;
+ goto out_err;
+ }
+
+ /* tmp_ientry has been added to the inode, so we are all set up.
+ * now we just need to make sure tmp_ientry doesn't get freed and
+ * we need to set up entry and ientry so the generic code can
+ * do its thing. */
+ ientry = tmp_ientry;
+ entry = &ientry->fsn_entry;
+ tmp_ientry = NULL;
+
atomic_inc(&group->inotify_data.user->inotify_watches);
+
+ /* update the idr hint */
+ group->inotify_data.last_wd = ientry->wd;
+
+ /* we put the mark on the idr, take a reference */
+ fsnotify_get_mark(entry);
}
+ ret = ientry->wd;
+
spin_lock(&entry->lock);
old_mask = entry->mask;
@@ -506,14 +537,19 @@ retry:
fsnotify_recalc_group_mask(group);
}
- return ientry->wd;
+ /* this either matches fsnotify_find_mark_entry, or init_mark_entry
+ * depending on which path we took... */
+ fsnotify_put_mark(entry);
out_err:
- /* see this isn't supposed to happen, just kill the watch */
- if (entry) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ /* could be an error, could be that we found an existing mark */
+ if (tmp_ientry) {
+ /* on the idr but didn't make it on the inode */
+ if (tmp_ientry->wd != -1)
+ inotify_remove_from_idr(group, tmp_ientry);
+ kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
}
+
return ret;
}
@@ -721,9 +757,6 @@ static int __init inotify_user_setup(void)
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
- inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
- if (!inotify_ignored_event)
- panic("unable to allocate the inotify ignored event\n");
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 959b73e756fd..521368574e97 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -136,18 +136,24 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
- (old->data_type == new->data_type)) {
+ (old->data_type == new->data_type) &&
+ (old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
- if (old->inode == new->inode)
+ /* remember, after old was put on the wait_q we aren't
+ * allowed to look at the inode any more, only thing
+ * left to check was if the file_name is the same */
+ if (old->name_len &&
+ !strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
+ break;
case (FSNOTIFY_EVENT_NONE):
- return true;
+ return false;
};
}
return false;
@@ -339,18 +345,19 @@ static void initialize_event(struct fsnotify_event *event)
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
- int data_type, const char *name, u32 cookie)
+ int data_type, const char *name, u32 cookie,
+ gfp_t gfp)
{
struct fsnotify_event *event;
- event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
+ event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
initialize_event(event);
if (name) {
- event->file_name = kstrdup(name, GFP_KERNEL);
+ event->file_name = kstrdup(name, gfp);
if (!event->file_name) {
kmem_cache_free(fsnotify_event_cachep, event);
return NULL;
diff --git a/fs/pipe.c b/fs/pipe.c
index f7dd21ad85a6..52c415114838 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -68,8 +68,8 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
- pipe_lock_nested(pipe2, I_MUTEX_CHILD);
- pipe_lock_nested(pipe1, I_MUTEX_PARENT);
+ pipe_lock_nested(pipe2, I_MUTEX_PARENT);
+ pipe_lock_nested(pipe1, I_MUTEX_CHILD);
}
}