summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2026-01-29 13:53:45 +1030
committerDavid Sterba <dsterba@suse.com>2026-02-03 07:59:07 +0100
commite1bc83f8b157689e5de4f651b6fbb9dcdccd33c1 (patch)
tree6a0bddcf13944434d37b1c6a4d29cb610fd3b696
parentdafcfa1c8e377a3d8e2e1d72a76435b57ed1ac7d (diff)
btrfs: get rid of compressed_folios[] usage for encoded writes
Currently only encoded writes utilized btrfs_submit_compressed_write(), which utilized compressed_bio::compressed_folios[] array. Change the only call site to call the new helper, btrfs_alloc_compressed_write(), to allocate a compressed bio, then queue needed folios into that bio, and finally call btrfs_submit_compressed_write() to submit the compressed bio. This change has one hidden benefit, previously we used btrfs_alloc_folio_array() for the folios of btrfs_submit_compressed_read(), which doesn't utilize the compression page pool for bs == ps cases. Now we call btrfs_alloc_compr_folio() which will benefit from the page pool. The other obvious benefit is that we no longer need to allocate an array to hold all those folios, thus one less error path. Reviewed-by: Boris Burkov <boris@bur.io> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c52
-rw-r--r--fs/btrfs/compression.h6
-rw-r--r--fs/btrfs/inode.c61
3 files changed, 57 insertions, 62 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 8501a5e4132d..dcd7bc60107d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -304,25 +304,6 @@ static void end_bbio_compressed_write(struct btrfs_bio *bbio)
bio_put(&cb->bbio.bio);
}
-static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
-{
- struct bio *bio = &cb->bbio.bio;
- u32 offset = 0;
- unsigned int findex = 0;
-
- while (offset < cb->compressed_len) {
- struct folio *folio = cb->compressed_folios[findex];
- u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio));
- int ret;
-
- /* Maximum compressed extent is smaller than bio size limit. */
- ret = bio_add_folio(bio, folio, len, 0);
- ASSERT(ret);
- offset += len;
- findex++;
- }
-}
-
/*
* worker function to build and submit bios for previously compressed pages.
* The corresponding pages in the inode should be marked for writeback
@@ -333,35 +314,44 @@ static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
* the end io hooks.
*/
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios,
- blk_opf_t write_flags,
- bool writeback)
+ struct compressed_bio *cb)
{
struct btrfs_inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct compressed_bio *cb;
ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
+ ASSERT(cb->writeback);
- cb = alloc_compressed_bio(inode, ordered->file_offset,
- REQ_OP_WRITE | write_flags,
- end_bbio_compressed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
- cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
- cb->writeback = writeback;
- cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
- btrfs_add_compressed_bio_folios(cb);
btrfs_submit_bbio(&cb->bbio, 0);
}
/*
+ * Allocate a compressed write bio for @inode file offset @start length @len.
+ *
+ * The caller still needs to properly queue all folios and populate involved
+ * members.
+ */
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len)
+{
+ struct compressed_bio *cb;
+
+ cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE, end_bbio_compressed_write);
+ cb->start = start;
+ cb->len = len;
+ cb->writeback = true;
+
+ return cb;
+}
+
+/*
* Add extra pages in the same compressed file extent so that we don't need to
* re-read the same extent again and again.
*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 7dc48e556313..2d3a28b26997 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -96,10 +96,10 @@ int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
+struct compressed_bio *btrfs_alloc_compressed_write(struct btrfs_inode *inode,
+ u64 start, u64 len);
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct folio **compressed_folios,
- unsigned int nr_folios, blk_opf_t write_flags,
- bool writeback);
+ struct compressed_bio *cb);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
int btrfs_compress_str2level(unsigned int type, const char *str, int *level_ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 279e04892288..1aebd2ee2704 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9828,12 +9828,12 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
struct extent_state *cached_state = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_file_extent file_extent;
+ struct compressed_bio *cb = NULL;
int compression;
size_t orig_count;
+ const u32 min_folio_size = btrfs_min_folio_size(fs_info);
u64 start, end;
u64 num_bytes, ram_bytes, disk_num_bytes;
- unsigned long nr_folios, i;
- struct folio **folios;
struct btrfs_key ins;
bool extent_reserved = false;
struct extent_map *em;
@@ -9922,39 +9922,46 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
* isn't.
*/
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
- nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
- folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
- if (!folios)
- return -ENOMEM;
- for (i = 0; i < nr_folios; i++) {
- size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
+
+ cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
+ for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
+ struct folio *folio;
+ size_t bytes = min(min_folio_size, iov_iter_count(from));
char *kaddr;
- folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
- if (!folios[i]) {
+ folio = btrfs_alloc_compr_folio(fs_info);
+ if (!folio) {
ret = -ENOMEM;
- goto out_folios;
+ goto out_cb;
}
- kaddr = kmap_local_folio(folios[i], 0);
- if (copy_from_iter(kaddr, bytes, from) != bytes) {
- kunmap_local(kaddr);
+ kaddr = kmap_local_folio(folio, 0);
+ ret = copy_from_iter(kaddr, bytes, from);
+ kunmap_local(kaddr);
+ if (ret != bytes) {
+ folio_put(folio);
ret = -EFAULT;
- goto out_folios;
+ goto out_cb;
+ }
+ if (bytes < min_folio_size)
+ folio_zero_range(folio, bytes, min_folio_size - bytes);
+ ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
+ if (unlikely(!ret)) {
+ folio_put(folio);
+ ret = -EINVAL;
+ goto out_cb;
}
- if (bytes < PAGE_SIZE)
- memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_local(kaddr);
}
+ ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
for (;;) {
ret = btrfs_wait_ordered_range(inode, start, num_bytes);
if (ret)
- goto out_folios;
+ goto out_cb;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
if (ret)
- goto out_folios;
+ goto out_cb;
btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
@@ -9986,7 +9993,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
encoded->unencoded_offset == 0 &&
can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
ret = __cow_file_range_inline(inode, encoded->len,
- orig_count, compression, folios[0],
+ orig_count, compression,
+ bio_first_folio_all(&cb->bbio.bio),
true);
if (ret <= 0) {
if (ret == 0)
@@ -10031,7 +10039,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
btrfs_delalloc_release_extents(inode, num_bytes);
- btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
+ btrfs_submit_compressed_write(ordered, cb);
ret = orig_count;
goto out;
@@ -10053,12 +10061,9 @@ out_free_data_space:
btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
out_unlock:
btrfs_unlock_extent(io_tree, start, end, &cached_state);
-out_folios:
- for (i = 0; i < nr_folios; i++) {
- if (folios[i])
- folio_put(folios[i]);
- }
- kvfree(folios);
+out_cb:
+ if (cb)
+ cleanup_compressed_bio(cb);
out:
if (ret >= 0)
iocb->ki_pos += encoded->len;