summaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c215
1 files changed, 141 insertions, 74 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 36fe82012a33..87c1f4150c64 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -282,29 +282,32 @@ static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
nid_t ino, pgoff_t idx, enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
- struct f2fs_bio_info *io = &sbi->write_io[btype];
- bool ret;
+ enum temp_type temp;
+ struct f2fs_bio_info *io;
+ bool ret = false;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+ io = sbi->write_io[btype] + temp;
+
+ down_read(&io->io_rwsem);
+ ret = __has_merged_page(io, inode, ino, idx);
+ up_read(&io->io_rwsem);
- down_read(&io->io_rwsem);
- ret = __has_merged_page(io, inode, ino, idx);
- up_read(&io->io_rwsem);
+ /* TODO: use HOT temp only for meta pages now. */
+ if (ret || btype == META)
+ break;
+ }
return ret;
}
-static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type, int rw)
+static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ enum page_type type, enum temp_type temp)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
- struct f2fs_bio_info *io;
-
- io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
+ struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
down_write(&io->io_rwsem);
- if (!__has_merged_page(io, inode, ino, idx))
- goto out;
-
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
@@ -314,29 +317,45 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
}
__submit_merged_bio(io);
-out:
up_write(&io->io_rwsem);
}
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
- int rw)
+static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, nid_t ino, pgoff_t idx,
+ enum page_type type, bool force)
{
- __f2fs_submit_merged_bio(sbi, NULL, 0, 0, type, rw);
+ enum temp_type temp;
+
+ if (!force && !has_merged_page(sbi, inode, ino, idx, type))
+ return;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+
+ __f2fs_submit_merged_write(sbi, type, temp);
+
+ /* TODO: use HOT temp only for meta pages now. */
+ if (type >= META)
+ break;
+ }
}
-void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
+{
+ __submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
+}
+
+void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type, int rw)
+ enum page_type type)
{
- if (has_merged_page(sbi, inode, ino, idx, type))
- __f2fs_submit_merged_bio(sbi, inode, ino, idx, type, rw);
+ __submit_merged_write_cond(sbi, inode, ino, idx, type, false);
}
-void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
+void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
{
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- f2fs_submit_merged_bio(sbi, META, WRITE);
+ f2fs_submit_merged_write(sbi, DATA);
+ f2fs_submit_merged_write(sbi, NODE);
+ f2fs_submit_merged_write(sbi, META);
}
/*
@@ -368,16 +387,29 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
return 0;
}
-int f2fs_submit_page_mbio(struct f2fs_io_info *fio)
+int f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
- struct f2fs_bio_info *io;
- bool is_read = is_read_io(fio->op);
+ struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
int err = 0;
- io = is_read ? &sbi->read_io : &sbi->write_io[btype];
+ f2fs_bug_on(sbi, is_read_io(fio->op));
+
+ down_write(&io->io_rwsem);
+next:
+ if (fio->in_list) {
+ spin_lock(&io->io_lock);
+ if (list_empty(&io->io_list)) {
+ spin_unlock(&io->io_lock);
+ goto out_fail;
+ }
+ fio = list_first_entry(&io->io_list,
+ struct f2fs_io_info, list);
+ list_del(&fio->list);
+ spin_unlock(&io->io_lock);
+ }
if (fio->old_blkaddr != NEW_ADDR)
verify_block_addr(sbi, fio->old_blkaddr);
@@ -388,10 +420,7 @@ int f2fs_submit_page_mbio(struct f2fs_io_info *fio)
/* set submitted = 1 as a return value */
fio->submitted = 1;
- if (!is_read)
- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
-
- down_write(&io->io_rwsem);
+ inc_page_count(sbi, WB_DATA_TYPE(bio_page));
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
@@ -402,26 +431,28 @@ alloc_new:
if ((fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
err = -EAGAIN;
- if (!is_read)
- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+ dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr,
- BIO_MAX_PAGES, is_read);
+ BIO_MAX_PAGES, false);
io->fio = *fio;
}
- if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
- PAGE_SIZE) {
+ if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
+
+ trace_f2fs_submit_page_write(fio->page, fio);
+
+ if (fio->in_list)
+ goto next;
out_fail:
up_write(&io->io_rwsem);
- trace_f2fs_submit_page_mbio(fio->page, fio);
return err;
}
@@ -460,14 +491,15 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ int err;
if (!count)
return 0;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
- return -ENOSPC;
+ if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ return err;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
@@ -718,6 +750,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
struct node_info ni;
pgoff_t fofs;
blkcnt_t count = 1;
+ int err;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
@@ -726,15 +759,15 @@ static int __allocate_data_block(struct dnode_of_data *dn)
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
- return -ENOSPC;
+ if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ return err;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, CURSEG_WARM_DATA);
+ &sum, CURSEG_WARM_DATA, NULL, false);
set_data_blkaddr(dn);
/* update i_size */
@@ -1321,7 +1354,7 @@ retry_encrypt:
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
- f2fs_flush_merged_bios(fio->sbi);
+ f2fs_flush_merged_writes(fio->sbi);
congestion_wait(BLK_RW_ASYNC, HZ/50);
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
@@ -1368,13 +1401,14 @@ int do_write_data_page(struct f2fs_io_info *fio)
if (valid_ipu_blkaddr(fio)) {
ipu_force = true;
- fio->need_lock = false;
+ fio->need_lock = LOCK_DONE;
goto got_it;
}
}
- if (fio->need_lock)
- f2fs_lock_op(fio->sbi);
+ /* Deadlock due to between page->lock and f2fs_lock_op */
+ if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
+ return -EAGAIN;
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
@@ -1388,19 +1422,18 @@ int do_write_data_page(struct f2fs_io_info *fio)
goto out_writepage;
}
got_it:
- err = encrypt_one_page(fio);
- if (err)
- goto out_writepage;
-
- set_page_writeback(page);
-
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
+ err = encrypt_one_page(fio);
+ if (err)
+ goto out_writepage;
+
+ set_page_writeback(page);
f2fs_put_dnode(&dn);
- if (fio->need_lock)
+ if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
err = rewrite_data_page(fio);
trace_f2fs_do_write_data_page(fio->page, IPU);
@@ -1408,6 +1441,20 @@ got_it:
return err;
}
+ if (fio->need_lock == LOCK_RETRY) {
+ if (!f2fs_trylock_op(fio->sbi)) {
+ err = -EAGAIN;
+ goto out_writepage;
+ }
+ fio->need_lock = LOCK_REQ;
+ }
+
+ err = encrypt_one_page(fio);
+ if (err)
+ goto out_writepage;
+
+ set_page_writeback(page);
+
/* LFS mode write path */
write_data_page(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
@@ -1417,7 +1464,7 @@ got_it:
out_writepage:
f2fs_put_dnode(&dn);
out:
- if (fio->need_lock)
+ if (fio->need_lock == LOCK_REQ)
f2fs_unlock_op(fio->sbi);
return err;
}
@@ -1443,11 +1490,14 @@ static int __write_data_page(struct page *page, bool *submitted,
.page = page,
.encrypted_page = NULL,
.submitted = false,
- .need_lock = true,
+ .need_lock = LOCK_RETRY,
};
trace_f2fs_writepage(page, DATA);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto redirty_out;
+
if (page->index < end_index)
goto write;
@@ -1461,8 +1511,6 @@ static int __write_data_page(struct page *page, bool *submitted,
zero_user_segment(page, offset, PAGE_SIZE);
write:
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
- goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
/* we should not write 0'th page having journal header */
@@ -1479,7 +1527,7 @@ write:
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
- fio.need_lock = false;
+ fio.need_lock = LOCK_DONE;
err = do_write_data_page(&fio);
goto done;
}
@@ -1498,8 +1546,13 @@ write:
goto out;
}
- if (err == -EAGAIN)
+ if (err == -EAGAIN) {
err = do_write_data_page(&fio);
+ if (err == -EAGAIN) {
+ fio.need_lock = LOCK_REQ;
+ err = do_write_data_page(&fio);
+ }
+ }
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
@@ -1513,8 +1566,7 @@ out:
ClearPageUptodate(page);
if (wbc->for_reclaim) {
- f2fs_submit_merged_bio_cond(sbi, inode, 0, page->index,
- DATA, WRITE);
+ f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
clear_inode_flag(inode, FI_HOT_DATA);
remove_dirty_inode(inode);
submitted = NULL;
@@ -1525,7 +1577,7 @@ out:
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ f2fs_submit_merged_write(sbi, DATA);
submitted = NULL;
}
@@ -1618,7 +1670,7 @@ retry:
}
done_index = page->index;
-
+retry_write:
lock_page(page);
if (unlikely(page->mapping != mapping)) {
@@ -1654,6 +1706,15 @@ continue_unlock:
unlock_page(page);
ret = 0;
continue;
+ } else if (ret == -EAGAIN) {
+ ret = 0;
+ if (wbc->sync_mode == WB_SYNC_ALL) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC,
+ HZ/50);
+ goto retry_write;
+ }
+ continue;
}
done_index = page->index + 1;
done = 1;
@@ -1684,8 +1745,8 @@ continue_unlock:
mapping->writeback_index = done_index;
if (last_idx != ULONG_MAX)
- f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
- 0, last_idx, DATA, WRITE);
+ f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
+ 0, last_idx, DATA);
return ret;
}
@@ -1706,6 +1767,10 @@ static int f2fs_write_data_pages(struct address_space *mapping,
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
+ /* during POR, we don't need to trigger writepage at all. */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
+
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
@@ -1715,10 +1780,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
if (is_inode_flag_set(inode, FI_DO_DEFRAG))
goto skip_write;
- /* during POR, we don't need to trigger writepage at all. */
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
- goto skip_write;
-
trace_f2fs_writepages(mapping->host, wbc, DATA);
/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
@@ -1753,8 +1814,10 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
loff_t i_size = i_size_read(inode);
if (to > i_size) {
+ down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
truncate_blocks(inode, i_size, true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
}
}
@@ -2152,8 +2215,12 @@ int f2fs_migrate_page(struct address_space *mapping,
BUG_ON(PageWriteback(page));
/* migrating an atomic written page is safe with the inmem_lock hold */
- if (atomic_written && !mutex_trylock(&fi->inmem_lock))
- return -EAGAIN;
+ if (atomic_written) {
+ if (mode != MIGRATE_SYNC)
+ return -EBUSY;
+ if (!mutex_trylock(&fi->inmem_lock))
+ return -EAGAIN;
+ }
/*
* A reference is expected if PagePrivate set when move mapping,