summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_file.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-10-29 16:11:58 +0100
committerCarlos Maiolino <cem@kernel.org>2024-11-05 13:52:57 +0100
commit1eb6fc044752d0ec1a637956519787e5f6ce8760 (patch)
tree7297d5e9ccbab4fe356c1d49b5dc5addd8aa8d98 /fs/xfs/xfs_file.c
parent1171de329692163470f1cde7cc99f89487ca555a (diff)
xfs: split write fault handling out of __xfs_filemap_fault
Only two of the callers of __xfs_filemap_fault every handle read faults. Split the write_fault handling out of __xfs_filemap_fault so that all callers call that directly either conditionally or unconditionally and only leave the read fault handling in __xfs_filemap_fault. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r--fs/xfs/xfs_file.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 20f7f92b8867..0b8e36f8703c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1434,6 +1434,16 @@ xfs_dax_read_fault(
return ret;
}
+/*
+ * Locking for serialisation of IO during page faults. This results in a lock
+ * ordering of:
+ *
+ * mmap_lock (MM)
+ * sb_start_pagefault(vfs, freeze)
+ * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
+ * page_lock (MM)
+ * i_lock (XFS - extent map serialisation)
+ */
static vm_fault_t
xfs_write_fault(
struct vm_fault *vmf,
@@ -1471,26 +1481,13 @@ xfs_write_fault(
return ret;
}
-/*
- * Locking for serialisation of IO during page faults. This results in a lock
- * ordering of:
- *
- * mmap_lock (MM)
- * sb_start_pagefault(vfs, freeze)
- * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
- * page_lock (MM)
- * i_lock (XFS - extent map serialisation)
- */
static vm_fault_t
__xfs_filemap_fault(
struct vm_fault *vmf,
- unsigned int order,
- bool write_fault)
+ unsigned int order)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- if (write_fault)
- return xfs_write_fault(vmf, order);
if (IS_DAX(inode))
return xfs_dax_read_fault(vmf, order);
@@ -1511,9 +1508,9 @@ xfs_filemap_fault(
struct vm_fault *vmf)
{
/* DAX can shortcut the normal fault path on write faults! */
- return __xfs_filemap_fault(vmf, 0,
- IS_DAX(file_inode(vmf->vma->vm_file)) &&
- xfs_is_write_fault(vmf));
+ if (IS_DAX(file_inode(vmf->vma->vm_file)) && xfs_is_write_fault(vmf))
+ return xfs_write_fault(vmf, 0);
+ return __xfs_filemap_fault(vmf, 0);
}
static vm_fault_t
@@ -1525,15 +1522,16 @@ xfs_filemap_huge_fault(
return VM_FAULT_FALLBACK;
/* DAX can shortcut the normal fault path on write faults! */
- return __xfs_filemap_fault(vmf, order,
- xfs_is_write_fault(vmf));
+ if (xfs_is_write_fault(vmf))
+ return xfs_write_fault(vmf, order);
+ return __xfs_filemap_fault(vmf, order);
}
static vm_fault_t
xfs_filemap_page_mkwrite(
struct vm_fault *vmf)
{
- return __xfs_filemap_fault(vmf, 0, true);
+ return xfs_write_fault(vmf, 0);
}
/*
@@ -1545,8 +1543,7 @@ static vm_fault_t
xfs_filemap_pfn_mkwrite(
struct vm_fault *vmf)
{
-
- return __xfs_filemap_fault(vmf, 0, true);
+ return xfs_write_fault(vmf, 0);
}
static const struct vm_operations_struct xfs_file_vm_ops = {