diff options
author | Christoph Hellwig <hch@lst.de> | 2024-11-03 20:19:29 -0800 |
---|---|---|
committer | Darrick J. Wong <djwong@kernel.org> | 2024-11-05 13:38:42 -0800 |
commit | d162491c5459f4dd72e65b72a2c864591668ec07 (patch) | |
tree | eaab16b130ccdb2ed5b62bd1285be87fcec7d85d /fs/xfs/xfs_rtalloc.c | |
parent | b91afef724710e3dc7d65a28105ffd7a4e861d69 (diff) |
xfs: make the RT allocator rtgroup aware
Make the allocator rtgroup aware by either picking a specific group if
there is a hint, or loop over all groups otherwise. A simple rotor is
provided to pick the placement for initial allocations.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_rtalloc.c')
-rw-r--r-- | fs/xfs/xfs_rtalloc.c | 98 |
1 files changed, 89 insertions, 9 deletions
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 1f53d5e07a91..b27d23bcc064 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -1662,8 +1662,9 @@ xfs_rtalloc_align_minmax( } static int -xfs_rtallocate( +xfs_rtallocate_rtg( struct xfs_trans *tp, + xfs_rgnumber_t rgno, xfs_rtblock_t bno_hint, xfs_rtxlen_t minlen, xfs_rtxlen_t maxlen, @@ -1683,16 +1684,33 @@ xfs_rtallocate( xfs_rtxlen_t len = 0; int error = 0; - args.rtg = xfs_rtgroup_grab(args.mp, 0); + args.rtg = xfs_rtgroup_grab(args.mp, rgno); if (!args.rtg) return -ENOSPC; /* - * Lock out modifications to both the RT bitmap and summary inodes. + * We need to lock out modifications to both the RT bitmap and summary + * inodes for finding free space in xfs_rtallocate_extent_{near,size} + * and join the bitmap and summary inodes for the actual allocation + * down in xfs_rtallocate_range. + * + * For RTG-enabled file system we don't want to join the inodes to the + * transaction until we are committed to allocate to allocate from this + * RTG so that only one inode of each type is locked at a time. + * + * But for pre-RTG file systems we need to already to join the bitmap + * inode to the transaction for xfs_rtpick_extent, which bumps the + * sequence number in it, so we'll have to join the inode to the + * transaction early here. + * + * This is all a bit messy, but at least the mess is contained in + * this function. */ if (!*rtlocked) { xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP); - xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP); + if (!xfs_has_rtgroups(args.mp)) + xfs_rtgroup_trans_join(tp, args.rtg, + XFS_RTGLOCK_BITMAP); *rtlocked = true; } @@ -1702,7 +1720,7 @@ xfs_rtallocate( */ if (bno_hint) start = xfs_rtb_to_rtx(args.mp, bno_hint); - else if (initial_user_data) + else if (!xfs_has_rtgroups(args.mp) && initial_user_data) start = xfs_rtpick_extent(args.rtg, tp, maxlen); if (start) { @@ -1723,8 +1741,16 @@ xfs_rtallocate( prod, &rtx); } - if (error) + if (error) { + if (xfs_has_rtgroups(args.mp)) { + xfs_rtgroup_unlock(args.rtg, XFS_RTGLOCK_BITMAP); + *rtlocked = false; + } goto out_release; + } + + if (xfs_has_rtgroups(args.mp)) + xfs_rtgroup_trans_join(tp, args.rtg, XFS_RTGLOCK_BITMAP); error = xfs_rtallocate_range(&args, rtx, len); if (error) @@ -1743,6 +1769,53 @@ out_release: } static int +xfs_rtallocate_rtgs( + struct xfs_trans *tp, + xfs_fsblock_t bno_hint, + xfs_rtxlen_t minlen, + xfs_rtxlen_t maxlen, + xfs_rtxlen_t prod, + bool wasdel, + bool initial_user_data, + xfs_rtblock_t *bno, + xfs_extlen_t *blen) +{ + struct xfs_mount *mp = tp->t_mountp; + xfs_rgnumber_t start_rgno, rgno; + int error; + + /* + * For now this just blindly iterates over the RTGs for an initial + * allocation. We could try to keep an in-memory rtg_longest member + * to avoid the locking when just looking for big enough free space, + * but for now this keeps things simple. + */ + if (bno_hint != NULLFSBLOCK) + start_rgno = xfs_rtb_to_rgno(mp, bno_hint); + else + start_rgno = (atomic_inc_return(&mp->m_rtgrotor) - 1) % + mp->m_sb.sb_rgcount; + + rgno = start_rgno; + do { + bool rtlocked = false; + + error = xfs_rtallocate_rtg(tp, rgno, bno_hint, minlen, maxlen, + prod, wasdel, initial_user_data, &rtlocked, + bno, blen); + if (error != -ENOSPC) + return error; + ASSERT(!rtlocked); + + if (++rgno == mp->m_sb.sb_rgcount) + rgno = 0; + bno_hint = NULLFSBLOCK; + } while (rgno != start_rgno); + + return -ENOSPC; +} + +static int xfs_rtallocate_align( struct xfs_bmalloca *ap, xfs_rtxlen_t *ralen, @@ -1836,9 +1909,16 @@ retry: if (xfs_bmap_adjacent(ap)) bno_hint = ap->blkno; - error = xfs_rtallocate(ap->tp, bno_hint, raminlen, ralen, prod, - ap->wasdel, initial_user_data, &rtlocked, - &ap->blkno, &ap->length); + if (xfs_has_rtgroups(ap->ip->i_mount)) { + error = xfs_rtallocate_rtgs(ap->tp, bno_hint, raminlen, ralen, + prod, ap->wasdel, initial_user_data, + &ap->blkno, &ap->length); + } else { + error = xfs_rtallocate_rtg(ap->tp, 0, bno_hint, raminlen, ralen, + prod, ap->wasdel, initial_user_data, + &rtlocked, &ap->blkno, &ap->length); + } + if (error == -ENOSPC) { if (!noalign) { /* |