summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2025-05-22 16:54:31 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2025-05-23 07:59:43 -0400
commit66782b2acbc3291faba7e14d9b22b77a4f3f94e4 (patch)
treede6e328ce55961f78fc69fb0adacda0dee08d526
parent5b7b342c402df2cfb1d9a8ea79613742d61d1293 (diff)
bcachefs: Fix btree_path_get_locks when not doing trans restart
btree_path_get_locks, on failure, shouldn't unlock if we're not issuing a transaction restart: we might drop locks we're not supposed to (if path->should_be_locked is set). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/btree_iter.c2
-rw-r--r--fs/bcachefs/btree_locking.c108
-rw-r--r--fs/bcachefs/btree_locking.h13
3 files changed, 72 insertions, 51 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index b366407878d0..831275f8e79f 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1799,7 +1799,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > path->locks_want)
- bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
+ bch2_btree_path_upgrade_norestart(trans, path, locks_want);
return path_idx;
}
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 4745c2035d24..6e43269a9c47 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -451,13 +451,13 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
/* relock */
-static inline bool btree_path_get_locks(struct btree_trans *trans,
- struct btree_path *path,
- bool upgrade,
- struct get_locks_fail *f)
+static int btree_path_get_locks(struct btree_trans *trans,
+ struct btree_path *path,
+ bool upgrade,
+ struct get_locks_fail *f,
+ int restart_err)
{
unsigned l = path->level;
- int fail_idx = -1;
do {
if (!btree_path_node(path, l))
@@ -465,39 +465,49 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
if (!(upgrade
? bch2_btree_node_upgrade(trans, path, l)
- : bch2_btree_node_relock(trans, path, l))) {
- fail_idx = l;
-
- if (f) {
- f->l = l;
- f->b = path->l[l].b;
- }
- }
+ : bch2_btree_node_relock(trans, path, l)))
+ goto err;
l++;
} while (l < path->locks_want);
+ if (path->uptodate == BTREE_ITER_NEED_RELOCK)
+ path->uptodate = BTREE_ITER_UPTODATE;
+
+ return path->uptodate < BTREE_ITER_NEED_RELOCK ? 0 : -1;
+err:
+ if (f) {
+ f->l = l;
+ f->b = path->l[l].b;
+ }
+
+ /*
+ * Do transaction restart before unlocking, so we don't pop
+ * should_be_locked asserts
+ */
+ if (restart_err) {
+ btree_trans_restart(trans, restart_err);
+ } else if (path->should_be_locked && !trans->restarted) {
+ if (upgrade)
+ path->locks_want = l;
+ return -1;
+ }
+
+ __bch2_btree_path_unlock(trans, path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+
/*
* When we fail to get a lock, we have to ensure that any child nodes
* can't be relocked so bch2_btree_path_traverse has to walk back up to
* the node that we failed to relock:
*/
- if (fail_idx >= 0) {
- __bch2_btree_path_unlock(trans, path);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
-
- do {
- path->l[fail_idx].b = upgrade
- ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
- : ERR_PTR(-BCH_ERR_no_btree_node_relock);
- --fail_idx;
- } while (fail_idx >= 0);
- }
-
- if (path->uptodate == BTREE_ITER_NEED_RELOCK)
- path->uptodate = BTREE_ITER_UPTODATE;
+ do {
+ path->l[l].b = upgrade
+ ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
+ : ERR_PTR(-BCH_ERR_no_btree_node_relock);
+ } while (l--);
- return path->uptodate < BTREE_ITER_NEED_RELOCK;
+ return -restart_err ?: -1;
}
bool __bch2_btree_node_relock(struct btree_trans *trans,
@@ -596,9 +606,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
__flatten
bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
{
- struct get_locks_fail f;
-
- bool ret = btree_path_get_locks(trans, path, false, &f);
+ bool ret = !btree_path_get_locks(trans, path, false, NULL, 0);
bch2_trans_verify_locks(trans);
return ret;
}
@@ -614,15 +622,16 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
return 0;
}
-bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
- struct btree_path *path,
- unsigned new_locks_want,
- struct get_locks_fail *f)
+bool __bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
{
- path->locks_want = max_t(unsigned, path->locks_want, new_locks_want);
+ path->locks_want = new_locks_want;
- bool ret = btree_path_get_locks(trans, path, true, f);
- bch2_trans_verify_locks(trans);
+ struct get_locks_fail f = {};
+ bool ret = !btree_path_get_locks(trans, path, true, &f, 0);
+
+ bch2_btree_path_verify_locks(path);
return ret;
}
@@ -630,12 +639,15 @@ int __bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
{
- struct get_locks_fail f = {};
unsigned old_locks = path->nodes_locked;
unsigned old_locks_want = path->locks_want;
- int ret = 0;
- if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, &f))
+ path->locks_want = max_t(unsigned, path->locks_want, new_locks_want);
+
+ struct get_locks_fail f = {};
+ int ret = btree_path_get_locks(trans, path, true, &f,
+ BCH_ERR_transaction_restart_upgrade);
+ if (!ret)
goto out;
/*
@@ -667,7 +679,7 @@ int __bch2_btree_path_upgrade(struct btree_trans *trans,
linked->btree_id == path->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true, NULL);
+ btree_path_get_locks(trans, linked, true, NULL, 0);
}
}
@@ -691,7 +703,6 @@ int __bch2_btree_path_upgrade(struct btree_trans *trans,
trace_trans_restart_upgrade(trans->c, buf.buf);
printbuf_exit(&buf);
}
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
out:
bch2_trans_verify_locks(trans);
return ret;
@@ -752,7 +763,7 @@ static inline void __bch2_trans_unlock(struct btree_trans *trans)
__bch2_btree_path_unlock(trans, path);
}
-static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
+static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
struct get_locks_fail *f, bool trace)
{
if (!trace)
@@ -786,7 +797,6 @@ static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, str
out:
__bch2_trans_unlock(trans);
bch2_trans_verify_locks(trans);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
}
static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
@@ -803,10 +813,14 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
trans_for_each_path(trans, path, i) {
struct get_locks_fail f;
+ int ret;
if (path->should_be_locked &&
- !btree_path_get_locks(trans, path, false, &f))
- return bch2_trans_relock_fail(trans, path, &f, trace);
+ (ret = btree_path_get_locks(trans, path, false, &f,
+ BCH_ERR_transaction_restart_relock))) {
+ bch2_trans_relock_fail(trans, path, &f, trace);
+ return ret;
+ }
}
trans_set_locked(trans, true);
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 7e162982de17..63d7e5fb77c8 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -385,9 +385,16 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
/* upgrade */
-bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
- struct btree_path *, unsigned,
- struct get_locks_fail *);
+bool __bch2_btree_path_upgrade_norestart(struct btree_trans *, struct btree_path *, unsigned);
+
+static inline bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
+{
+ return new_locks_want > path->locks_want
+ ? __bch2_btree_path_upgrade_norestart(trans, path, new_locks_want)
+ : true;
+}
int __bch2_btree_path_upgrade(struct btree_trans *,
struct btree_path *, unsigned);