summaryrefslogtreecommitdiff
path: root/fs/libfs.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjguzik@gmail.com>2024-08-15 10:33:10 +0200
committerChristian Brauner <brauner@kernel.org>2024-08-30 08:22:37 +0200
commitb381fbbccb4cb60aefa1ab7f8e3766ddb4a461db (patch)
tree57edc85b86716eceaa8a68045dd2f142221c2f18 /fs/libfs.c
parent433f9d76a01056dfeaefc15167b11e514e56f956 (diff)
vfs: elide smp_mb in iversion handling in the common case
According to bpftrace on these routines most calls result in cmpxchg, which already provides the same guarantee. In inode_maybe_inc_iversion elision is possible because even if the wrong value was read due to now missing smp_mb fence, the issue is going to correct itself after cmpxchg. If it appears cmpxchg wont be issued, the fence + reload are there bringing back previous behavior. Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Link: https://lore.kernel.org/r/20240815083310.3865-1-mjguzik@gmail.com Reviewed-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/libfs.c')
-rw-r--r--fs/libfs.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/fs/libfs.c b/fs/libfs.c
index 02602d00939e..7874b23364e1 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -2003,13 +2003,19 @@ bool inode_maybe_inc_iversion(struct inode *inode, bool force)
* information, but the legacy inode_inc_iversion code used a spinlock
* to serialize increments.
*
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
+ * We add a full memory barrier to ensure that any de facto ordering
+ * with other state is preserved (either implicitly coming from cmpxchg
+ * or explicitly from smp_mb if we don't know upfront if we will execute
+ * the former).
*
- * This barrier pairs with the barrier in inode_query_iversion()
+ * These barriers pair with inode_query_iversion().
*/
- smp_mb();
cur = inode_peek_iversion_raw(inode);
+ if (!force && !(cur & I_VERSION_QUERIED)) {
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ }
+
do {
/* If flag is clear then we needn't do anything */
if (!force && !(cur & I_VERSION_QUERIED))
@@ -2038,20 +2044,22 @@ EXPORT_SYMBOL(inode_maybe_inc_iversion);
u64 inode_query_iversion(struct inode *inode)
{
u64 cur, new;
+ bool fenced = false;
+ /*
+ * Memory barriers (implicit in cmpxchg, explicit in smp_mb) pair with
+ * inode_maybe_inc_iversion(), see that routine for more details.
+ */
cur = inode_peek_iversion_raw(inode);
do {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
- /*
- * This barrier (and the implicit barrier in the
- * cmpxchg below) pairs with the barrier in
- * inode_maybe_inc_iversion().
- */
- smp_mb();
+ if (!fenced)
+ smp_mb();
break;
}
+ fenced = true;
new = cur | I_VERSION_QUERIED;
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return cur >> I_VERSION_QUERIED_SHIFT;