diff options
| -rw-r--r-- | fs/xfs/libxfs/xfs_rtgroup.h | 6 | ||||
| -rw-r--r-- | fs/xfs/xfs_zone_alloc.c | 26 | ||||
| -rw-r--r-- | fs/xfs/xfs_zone_gc.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_zone_priv.h | 1 | ||||
| -rw-r--r-- | fs/xfs/xfs_zone_space_resv.c | 2 |
5 files changed, 25 insertions, 12 deletions
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h index a94e925ae67c..03f1e2493334 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.h +++ b/fs/xfs/libxfs/xfs_rtgroup.h @@ -64,12 +64,6 @@ struct xfs_rtgroup { */ #define XFS_RTG_FREE XA_MARK_0 -/* - * For zoned RT devices this is set on groups that are fully written and that - * have unused blocks. Used by the garbage collection to pick targets. - */ -#define XFS_RTG_RECLAIMABLE XA_MARK_1 - static inline struct xfs_rtgroup *to_rtg(struct xfs_group *xg) { return container_of(xg, struct xfs_rtgroup, rtg_group); diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index ef7a931ebde5..0a118376c57c 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -103,9 +103,6 @@ xfs_zone_account_reclaimable( */ trace_xfs_zone_emptied(rtg); - if (!was_full) - xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE); - spin_lock(&zi->zi_used_buckets_lock); if (!was_full) xfs_zone_remove_from_bucket(zi, rgno, from_bucket); @@ -127,7 +124,6 @@ xfs_zone_account_reclaimable( xfs_zone_add_to_bucket(zi, rgno, to_bucket); spin_unlock(&zi->zi_used_buckets_lock); - xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE); if (zi->zi_gc_thread && xfs_zoned_need_gc(mp)) wake_up_process(zi->zi_gc_thread); } else if (to_bucket != from_bucket) { @@ -142,6 +138,28 @@ xfs_zone_account_reclaimable( } } +/* + * Check if we have any zones that can be reclaimed by looking at the entry + * counters for the zone buckets. + */ +bool +xfs_zoned_have_reclaimable( + struct xfs_zone_info *zi) +{ + int i; + + spin_lock(&zi->zi_used_buckets_lock); + for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) { + if (zi->zi_used_bucket_entries[i]) { + spin_unlock(&zi->zi_used_buckets_lock); + return true; + } + } + spin_unlock(&zi->zi_used_buckets_lock); + + return false; +} + static void xfs_open_zone_mark_full( struct xfs_open_zone *oz) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index a98939aba7b9..d786162ada1c 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -175,7 +175,7 @@ xfs_zoned_need_gc( s64 available, free, threshold; s32 remainder; - if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE)) + if (!xfs_zoned_have_reclaimable(mp->m_zone_info)) return false; available = xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE); diff --git a/fs/xfs/xfs_zone_priv.h b/fs/xfs/xfs_zone_priv.h index 4322e26dd99a..ce7f0e2f4598 100644 --- a/fs/xfs/xfs_zone_priv.h +++ b/fs/xfs/xfs_zone_priv.h @@ -113,6 +113,7 @@ struct xfs_open_zone *xfs_open_zone(struct xfs_mount *mp, int xfs_zone_gc_reset_sync(struct xfs_rtgroup *rtg); bool xfs_zoned_need_gc(struct xfs_mount *mp); +bool xfs_zoned_have_reclaimable(struct xfs_zone_info *zi); int xfs_zone_gc_mount(struct xfs_mount *mp); void xfs_zone_gc_unmount(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_zone_space_resv.c b/fs/xfs/xfs_zone_space_resv.c index 0e54e557a585..fc1a4d1ce10c 100644 --- a/fs/xfs/xfs_zone_space_resv.c +++ b/fs/xfs/xfs_zone_space_resv.c @@ -172,7 +172,7 @@ xfs_zoned_reserve_available( * processing a pending GC request give up as we're fully out * of space. */ - if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE) && + if (!xfs_zoned_have_reclaimable(mp->m_zone_info) && !xfs_is_zonegc_running(mp)) break; |
