diff options
| author | Qu Wenruo <wqu@suse.com> | 2026-01-20 10:30:08 +1030 |
|---|---|---|
| committer | David Sterba <dsterba@suse.com> | 2026-02-03 07:56:17 +0100 |
| commit | 37cc07cab7dc311f2b7aeaaa7598face53eddcab (patch) | |
| tree | 5a23b410466a216b825b62f6673e817c31bfcf65 | |
| parent | 4b7ecd0984e34262430b9db7efbfaf293b4b4d3c (diff) | |
btrfs: lzo: use folio_iter to handle lzo_decompress_bio()
Currently lzo_decompress_bio() is using
compressed_bio->compressed_folios[] array to grab each compressed folio.
This is making the code much easier to read, as we only need to maintain
a single iterator, @cur_in, and can easily grab any random folio using
@cur_in >> min_folio_shift as an index.
However lzo_decompress_bio() itself is ensured to only advance to the
next folio at one time, and compressed_folios[] is just a pointer to
each folio of the compressed bio, thus we have no real random access
requirement for lzo_decompress_bio().
Replace the compressed_folios[] access by a helper, get_current_folio(),
which uses folio_iter and an external folio counter to properly switch
the folio when needed.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
| -rw-r--r-- | fs/btrfs/lzo.c | 48 |
1 files changed, 38 insertions, 10 deletions
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 4758f66da449..4024ce416971 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -310,23 +310,43 @@ out: return ret; } +static struct folio *get_current_folio(struct compressed_bio *cb, struct folio_iter *fi, + u32 *cur_folio_index, u32 cur_in) +{ + struct btrfs_fs_info *fs_info = cb_to_fs_info(cb); + const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; + + ASSERT(cur_folio_index); + + /* Need to switch to the next folio. */ + if (cur_in >> min_folio_shift != *cur_folio_index) { + /* We can only do the switch one folio a time. */ + ASSERT(cur_in >> min_folio_shift == *cur_folio_index + 1); + + bio_next_folio(fi, &cb->bbio.bio); + (*cur_folio_index)++; + } + return fi->folio; +} + /* * Copy the compressed segment payload into @dest. * * For the payload there will be no padding, just need to do page switching. */ static void copy_compressed_segment(struct compressed_bio *cb, + struct folio_iter *fi, u32 *cur_folio_index, char *dest, u32 len, u32 *cur_in) { - struct btrfs_fs_info *fs_info = cb_to_fs_info(cb); - const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; u32 orig_in = *cur_in; while (*cur_in < orig_in + len) { - struct folio *cur_folio = cb->compressed_folios[*cur_in >> min_folio_shift]; - u32 copy_len = min_t(u32, orig_in + len - *cur_in, - folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in)); + struct folio *cur_folio = get_current_folio(cb, fi, cur_folio_index, *cur_in); + u32 copy_len; + ASSERT(cur_folio); + copy_len = min_t(u32, orig_in + len - *cur_in, + folio_size(cur_folio) - offset_in_folio(cur_folio, *cur_in)); ASSERT(copy_len); memcpy_from_folio(dest + *cur_in - orig_in, cur_folio, @@ -341,7 +361,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) struct workspace *workspace = list_entry(ws, struct workspace, list); const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; const u32 sectorsize = fs_info->sectorsize; - const u32 min_folio_shift = PAGE_SHIFT + fs_info->block_min_order; + struct folio_iter fi; char *kaddr; int ret; /* Compressed data length, can be unaligned */ @@ -350,8 +370,15 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) u32 cur_in = 0; /* Bytes decompressed so far */ u32 cur_out = 0; - - kaddr = kmap_local_folio(cb->compressed_folios[0], 0); + /* The current folio index number inside the bio. */ + u32 cur_folio_index = 0; + + bio_first_folio(&fi, &cb->bbio.bio, 0); + /* There must be a compressed folio and matches the sectorsize. */ + if (unlikely(!fi.folio)) + return -EINVAL; + ASSERT(folio_size(fi.folio) == sectorsize); + kaddr = kmap_local_folio(fi.folio, 0); len_in = read_compress_length(kaddr); kunmap_local(kaddr); cur_in += LZO_LEN; @@ -388,7 +415,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) */ ASSERT(cur_in / sectorsize == (cur_in + LZO_LEN - 1) / sectorsize); - cur_folio = cb->compressed_folios[cur_in >> min_folio_shift]; + cur_folio = get_current_folio(cb, &fi, &cur_folio_index, cur_in); ASSERT(cur_folio); kaddr = kmap_local_folio(cur_folio, 0); seg_len = read_compress_length(kaddr + offset_in_folio(cur_folio, cur_in)); @@ -410,7 +437,8 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) } /* Copy the compressed segment payload into workspace */ - copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in); + copy_compressed_segment(cb, &fi, &cur_folio_index, workspace->cbuf, + seg_len, &cur_in); /* Decompress the data */ ret = lzo1x_decompress_safe(workspace->cbuf, seg_len, |
