diff options
author | Carlos Maiolino <cem@kernel.org> | 2024-11-12 10:59:34 +0100 |
---|---|---|
committer | Carlos Maiolino <cem@kernel.org> | 2024-11-12 10:59:34 +0100 |
commit | 6b3582aca37180fa1270867d7964e4023a59302f (patch) | |
tree | 1d9cbe4a3d0acd8b586ead0307adc6db350d9e7c /fs/xfs/xfs_discard.c | |
parent | d7a5b69bf07e06b4096ab00fa620e603b9961746 (diff) | |
parent | f220f6da5f4ad7da538c39075cf57e829d5202f7 (diff) |
Merge tag 'incore-rtgroups-6.13_2024-11-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into staging-merge
xfs: create incore rt allocation groups [v5.5 04/10]
Add in-memory data structures for sharding the realtime volume into
independent allocation groups. For existing filesystems, the entire rt
volume is modelled as having a single large group, with (potentially) a
number of rt extents exceeding 2^32 blocks, though these are not likely
to exist because the codebase has been a bit broken for decades. The
next series fills in the ondisk format and other supporting structures.
With a bit of luck, this should all go splendidly.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_discard.c')
-rw-r--r-- | fs/xfs/xfs_discard.c | 100 |
1 files changed, 65 insertions, 35 deletions
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index 019371c865d2..4f3e4736f13e 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -21,6 +21,7 @@ #include "xfs_ag.h" #include "xfs_health.h" #include "xfs_rtbitmap.h" +#include "xfs_rtgroup.h" /* * Notes on an efficient, low latency fstrim algorithm @@ -506,7 +507,7 @@ xfs_discard_rtdev_extents( static int xfs_trim_gather_rtextent( - struct xfs_mount *mp, + struct xfs_rtgroup *rtg, struct xfs_trans *tp, const struct xfs_rtalloc_rec *rec, void *priv) @@ -525,12 +526,12 @@ xfs_trim_gather_rtextent( return -ECANCELED; } - rbno = xfs_rtx_to_rtb(mp, rec->ar_startext); - rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount); + rbno = xfs_rtx_to_rtb(rtg, rec->ar_startext); + rlen = xfs_rtbxlen_to_blen(rtg_mount(rtg), rec->ar_extcount); /* Ignore too small. */ if (rlen < tr->minlen_fsb) { - trace_xfs_discard_rttoosmall(mp, rbno, rlen); + trace_xfs_discard_rttoosmall(rtg_mount(rtg), rbno, rlen); return 0; } @@ -548,69 +549,49 @@ xfs_trim_gather_rtextent( } static int -xfs_trim_rtdev_extents( - struct xfs_mount *mp, - xfs_daddr_t start, - xfs_daddr_t end, +xfs_trim_rtextents( + struct xfs_rtgroup *rtg, + xfs_rtxnum_t low, + xfs_rtxnum_t high, xfs_daddr_t minlen) { + struct xfs_mount *mp = rtg_mount(rtg); struct xfs_trim_rtdev tr = { .minlen_fsb = XFS_BB_TO_FSB(mp, minlen), + .extent_list = LIST_HEAD_INIT(tr.extent_list), }; - xfs_rtxnum_t low, high; struct xfs_trans *tp; - xfs_daddr_t rtdev_daddr; int error; - INIT_LIST_HEAD(&tr.extent_list); - - /* Shift the start and end downwards to match the rt device. */ - rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); - if (start > rtdev_daddr) - start -= rtdev_daddr; - else - start = 0; - - if (end <= rtdev_daddr) - return 0; - end -= rtdev_daddr; - error = xfs_trans_alloc_empty(mp, &tp); if (error) return error; - end = min_t(xfs_daddr_t, end, - XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1); - - /* Convert the rt blocks to rt extents */ - low = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start)); - high = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end)); - /* * Walk the free ranges between low and high. The query_range function * trims the extents returned. */ do { tr.stop_rtx = low + (mp->m_sb.sb_blocksize * NBBY); - xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP); - error = xfs_rtalloc_query_range(mp, tp, low, high, + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP_SHARED); + error = xfs_rtalloc_query_range(rtg, tp, low, high, xfs_trim_gather_rtextent, &tr); if (error == -ECANCELED) error = 0; if (error) { - xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED); xfs_discard_free_rtdev_extents(&tr); break; } if (list_empty(&tr.extent_list)) { - xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED); break; } error = xfs_discard_rtdev_extents(mp, &tr); - xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_BITMAP_SHARED); if (error) break; @@ -620,6 +601,55 @@ xfs_trim_rtdev_extents( xfs_trans_cancel(tp); return error; } + +static int +xfs_trim_rtdev_extents( + struct xfs_mount *mp, + xfs_daddr_t start, + xfs_daddr_t end, + xfs_daddr_t minlen) +{ + xfs_rtblock_t start_rtbno, end_rtbno; + xfs_rtxnum_t start_rtx, end_rtx; + xfs_rgnumber_t start_rgno, end_rgno; + int last_error = 0, error; + struct xfs_rtgroup *rtg = NULL; + + /* Shift the start and end downwards to match the rt device. */ + start_rtbno = xfs_daddr_to_rtb(mp, start); + if (start_rtbno > mp->m_sb.sb_dblocks) + start_rtbno -= mp->m_sb.sb_dblocks; + else + start_rtbno = 0; + start_rtx = xfs_rtb_to_rtx(mp, start_rtbno); + start_rgno = xfs_rtb_to_rgno(mp, start_rtbno); + + end_rtbno = xfs_daddr_to_rtb(mp, end); + if (end_rtbno <= mp->m_sb.sb_dblocks) + return 0; + end_rtbno -= mp->m_sb.sb_dblocks; + end_rtx = xfs_rtb_to_rtx(mp, end_rtbno + mp->m_sb.sb_rextsize - 1); + end_rgno = xfs_rtb_to_rgno(mp, end_rtbno); + + while ((rtg = xfs_rtgroup_next_range(mp, rtg, start_rgno, end_rgno))) { + xfs_rtxnum_t rtg_end = rtg->rtg_extents; + + if (rtg_rgno(rtg) == end_rgno) + rtg_end = min(rtg_end, end_rtx); + + error = xfs_trim_rtextents(rtg, start_rtx, rtg_end, minlen); + if (error) + last_error = error; + + if (xfs_trim_should_stop()) { + xfs_rtgroup_rele(rtg); + break; + } + start_rtx = 0; + } + + return last_error; +} #else # define xfs_trim_rtdev_extents(...) (-EOPNOTSUPP) #endif /* CONFIG_XFS_RT */ |