summaryrefslogtreecommitdiff
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorCarlos Maiolino <cem@kernel.org>2025-09-05 20:26:42 +0200
committerCarlos Maiolino <cem@kernel.org>2025-09-05 20:26:42 +0200
commit482c57805c722d420bce02b0942b4e15911ec115 (patch)
tree4429ddd4b96c47c9fc52bd21a879c5525dd842e7 /fs/nfs/write.c
parent33ddc796ecbd50cd6211aa9e9eddbf4567038b49 (diff)
parent07c34f8cef69cb8eeef69c18d6cf0c04fbee3cb3 (diff)
Merge tag 'fix-scrub-reap-calculations_2025-09-05' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.18-merge
xfs: improve online repair reap calculations [6.18 v2 1/2] A few months ago, the multi-fsblock untorn writes patchset added a bunch of log intent item helper functions to estimate the number of intent items that could be added to a particular transaction. Those helpers enabled us to compute a safe upper bound on the number of blocks that could be written in an untorn fashion with filesystem-provided out of place writes. Currently, the online fsck code employs static limits on the number of intent items that it's willing to accrue to a single transaction when it's trying to reap what it thinks are the old blocks from a corrupt structure. There have been no problems reported with this approach after years of testing, but static limits are scary and gross because overestimating the intent item limit could result in transaction overflows and dead filesystems; and underestimating causes unnecessary overhead. This series uses the new log intent item size helpers to estimate the limits dynamically based on worst-case per-block repair work vs. the size of the scrub transaction. After several months of testing this, there don't seem to be any problems here either. v2: rearrange patches, add review tags This has been running on the djcloud for months with no problems. Enjoy! Signed-off-by: "Darrick J. Wong" <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index fa5c41d0989a..8b7c04737967 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -153,20 +153,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
}
}
-static int
-nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
{
- int ret;
-
- if (!test_bit(PG_REMOVE, &req->wb_flags))
- return 0;
- ret = nfs_page_group_lock(req);
- if (ret)
- return ret;
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
nfs_page_set_inode_ref(req, inode);
- nfs_page_group_unlock(req);
- return 0;
}
/**
@@ -585,19 +575,18 @@ retry:
}
}
+ ret = nfs_page_group_lock(head);
+ if (ret < 0)
+ goto out_unlock;
+
/* Ensure that nobody removed the request before we locked it */
if (head != folio->private) {
+ nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
goto retry;
}
- ret = nfs_cancel_remove_inode(head, inode);
- if (ret < 0)
- goto out_unlock;
-
- ret = nfs_page_group_lock(head);
- if (ret < 0)
- goto out_unlock;
+ nfs_cancel_remove_inode(head, inode);
/* lock each request in the page group */
for (subreq = head->wb_this_page;
@@ -786,7 +775,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
- if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
+ nfs_page_group_lock(req);
+ if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio->mapping;
@@ -798,6 +788,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
spin_unlock(&mapping->i_private_lock);
}
+ nfs_page_group_unlock(req);
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
atomic_long_dec(&nfsi->nrequests);