summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2025-04-04 10:20:21 +1030
committerDavid Sterba <dsterba@suse.com>2025-05-15 14:30:43 +0200
commite1fcad644b405928b4bf74b1cfd9322008992019 (patch)
treeb3b42a458a937eaa36a45066ad082e5daeecb04f /fs/btrfs/extent_io.c
parent0f987c099d22c3b8c7d94fd13f957792e46f79c9 (diff)
btrfs: remove unnecessary early exits in delalloc folio lock and unlock
Inside functions unlock_delalloc_folio() and lock_delalloc_folios(), we have the following early exits: if (index == locked_folio->index && end_index == index) return; This allows us to exit early if the range is inside the same locked folio. However the current check relies on page sized folios, if we have a large folio that contains @index but not at @index, then the early exit will no longer trigger. Furthermore without the above early check, the existing code can handle it well, as both __process_folios_contig() and lock_delalloc_folios() will skip any folio page lock/unlock if it's on the locked folio. Here we remove the early exits and let the existing code handle the same index case, to make the code a little simpler. Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 791a9972b2e6..12a668dd5e5e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -224,12 +224,7 @@ static noinline void unlock_delalloc_folio(const struct inode *inode,
const struct folio *locked_folio,
u64 start, u64 end)
{
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
-
ASSERT(locked_folio);
- if (index == locked_folio->index && end_index == index)
- return;
__process_folios_contig(inode->i_mapping, locked_folio, start, end,
PAGE_UNLOCK);
@@ -246,9 +241,6 @@ static noinline int lock_delalloc_folios(struct inode *inode,
u64 processed_end = start;
struct folio_batch fbatch;
- if (index == locked_folio->index && index == end_index)
- return 0;
-
folio_batch_init(&fbatch);
while (index <= end_index) {
unsigned int found_folios, i;