summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c301
1 files changed, 189 insertions, 112 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 59fa527ac685..b4bf4217a3fa 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -16,6 +16,7 @@
#include "journal_io.h"
#include "replicas.h"
#include "snapshot.h"
+#include "super.h"
#include "trace.h"
#include <linux/random.h>
@@ -114,11 +115,9 @@ static inline bool btree_path_pos_in_node(struct btree_path *path,
!btree_path_pos_after_node(path, b);
}
-/* Btree iterator: */
+/* Debug: */
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-static void bch2_btree_path_verify_cached(struct btree_trans *trans,
+static void __bch2_btree_path_verify_cached(struct btree_trans *trans,
struct btree_path *path)
{
struct bkey_cached *ck;
@@ -135,7 +134,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
btree_node_unlock(trans, path, 0);
}
-static void bch2_btree_path_verify_level(struct btree_trans *trans,
+static void __bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned level)
{
struct btree_path_level *l;
@@ -147,16 +146,13 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
struct printbuf buf3 = PRINTBUF;
const char *msg;
- if (!bch2_debug_check_iterators)
- return;
-
l = &path->l[level];
tmp = l->iter;
locked = btree_node_locked(path, level);
if (path->cached) {
if (!level)
- bch2_btree_path_verify_cached(trans, path);
+ __bch2_btree_path_verify_cached(trans, path);
return;
}
@@ -217,7 +213,7 @@ err:
msg, level, buf1.buf, buf2.buf, buf3.buf);
}
-static void bch2_btree_path_verify(struct btree_trans *trans,
+static void __bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path)
{
struct bch_fs *c = trans->c;
@@ -229,22 +225,22 @@ static void bch2_btree_path_verify(struct btree_trans *trans,
break;
}
- bch2_btree_path_verify_level(trans, path, i);
+ __bch2_btree_path_verify_level(trans, path, i);
}
- bch2_btree_path_verify_locks(path);
+ bch2_btree_path_verify_locks(trans, path);
}
-void bch2_trans_verify_paths(struct btree_trans *trans)
+void __bch2_trans_verify_paths(struct btree_trans *trans)
{
struct btree_path *path;
unsigned iter;
trans_for_each_path(trans, path, iter)
- bch2_btree_path_verify(trans, path);
+ __bch2_btree_path_verify(trans, path);
}
-static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
+static void __bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
{
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
@@ -256,11 +252,11 @@ static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter
!btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
- bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
- bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
+ __bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
+ __bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
}
-static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
+static void __bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
{
BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
!iter->pos.snapshot);
@@ -274,16 +270,13 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
-static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
- struct btree_iter *iter, struct bkey_s_c k)
+static int __bch2_btree_iter_verify_ret(struct btree_trans *trans,
+ struct btree_iter *iter, struct bkey_s_c k)
{
struct btree_iter copy;
struct bkey_s_c prev;
int ret = 0;
- if (!bch2_debug_check_iterators)
- return 0;
-
if (!(iter->flags & BTREE_ITER_filter_snapshots))
return 0;
@@ -324,7 +317,7 @@ out:
return ret;
}
-void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
+void __bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
struct bpos pos)
{
bch2_trans_verify_not_unlocked_or_in_restart(trans);
@@ -357,19 +350,40 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
}
-#else
-
static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
- struct btree_path *path, unsigned l) {}
+ struct btree_path *path, unsigned l)
+{
+ if (static_branch_unlikely(&bch2_debug_check_iterators))
+ __bch2_btree_path_verify_level(trans, path, l);
+}
+
static inline void bch2_btree_path_verify(struct btree_trans *trans,
- struct btree_path *path) {}
+ struct btree_path *path)
+{
+ if (static_branch_unlikely(&bch2_debug_check_iterators))
+ __bch2_btree_path_verify(trans, path);
+}
+
static inline void bch2_btree_iter_verify(struct btree_trans *trans,
- struct btree_iter *iter) {}
-static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
-static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k) { return 0; }
+ struct btree_iter *iter)
+{
+ if (static_branch_unlikely(&bch2_debug_check_iterators))
+ __bch2_btree_iter_verify(trans, iter);
+}
-#endif
+static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
+{
+ if (static_branch_unlikely(&bch2_debug_check_iterators))
+ __bch2_btree_iter_verify_entry_exit(iter);
+}
+
+static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ return static_branch_unlikely(&bch2_debug_check_iterators)
+ ? __bch2_btree_iter_verify_ret(trans, iter, k)
+ : 0;
+}
/* Btree path: fixups after btree updates */
@@ -523,7 +537,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans,
__bch2_btree_node_iter_fix(path, b, node_iter, t,
where, clobber_u64s, new_u64s);
- if (bch2_debug_check_iterators)
+ if (static_branch_unlikely(&bch2_debug_check_iterators))
bch2_btree_node_iter_verify(node_iter, b);
}
@@ -977,7 +991,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
path->level = level;
bch2_btree_path_level_init(trans, path, b);
- bch2_btree_path_verify_locks(path);
+ bch2_btree_path_verify_locks(trans, path);
err:
bch2_bkey_buf_exit(&tmp, c);
return ret;
@@ -1089,7 +1103,7 @@ static void btree_path_set_level_down(struct btree_trans *trans,
if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
btree_node_unlock(trans, path, l);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
bch2_btree_path_verify(trans, path);
}
@@ -1162,7 +1176,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
}
if (path->cached) {
- ret = bch2_btree_path_traverse_cached(trans, path, flags);
+ ret = bch2_btree_path_traverse_cached(trans, path_idx, flags);
goto out;
}
@@ -1287,7 +1301,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
if (unlikely(path->cached)) {
btree_node_unlock(trans, path, 0);
path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
goto out;
}
@@ -1316,7 +1330,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
}
if (unlikely(level != path->level)) {
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
__bch2_btree_path_unlock(trans, path);
}
out:
@@ -1385,45 +1399,45 @@ static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_p
void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
{
- struct btree_path *path = trans->paths + path_idx, *dup;
+ struct btree_path *path = trans->paths + path_idx, *dup = NULL;
if (!__btree_path_put(trans, path, intent))
return;
+ if (!path->preserve && !path->should_be_locked)
+ goto free;
+
dup = path->preserve
? have_path_at_pos(trans, path)
: have_node_at_pos(trans, path);
-
- trace_btree_path_free(trans, path_idx, dup);
-
- if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
+ if (!dup)
return;
- if (path->should_be_locked && !trans->restarted) {
- if (!dup)
- return;
-
+ /*
+ * If we need this path locked, the duplicate also has te be locked
+ * before we free this one:
+ */
+ if (path->should_be_locked &&
+ !dup->should_be_locked &&
+ !trans->restarted) {
if (!(trans->locked
? bch2_btree_path_relock_norestart(trans, dup)
: bch2_btree_path_can_relock(trans, dup)))
return;
- }
- if (dup) {
- dup->preserve |= path->preserve;
- dup->should_be_locked |= path->should_be_locked;
+ dup->should_be_locked = true;
}
- __bch2_path_free(trans, path_idx);
-}
+ BUG_ON(path->should_be_locked &&
+ !trans->restarted &&
+ trans->locked &&
+ !btree_node_locked(dup, dup->level));
-static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
- bool intent)
-{
- if (!__btree_path_put(trans, trans->paths + path, intent))
- return;
-
- __bch2_path_free(trans, path);
+ path->should_be_locked = false;
+ dup->preserve |= path->preserve;
+free:
+ trace_btree_path_free(trans, path_idx, dup);
+ __bch2_path_free(trans, path_idx);
}
void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
@@ -1485,7 +1499,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
prt_newline(buf);
}
- for (struct jset_entry *e = trans->journal_entries;
+ for (struct jset_entry *e = btree_trans_journal_entries_start(trans);
e != btree_trans_journal_entries_top(trans);
e = vstruct_next(e)) {
bch2_journal_entry_to_text(buf, trans->c, e);
@@ -1591,7 +1605,7 @@ void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
__bch2_trans_paths_to_text(&buf, trans, nosort);
bch2_trans_updates_to_text(&buf, trans);
- bch2_print_str(trans->c, buf.buf);
+ bch2_print_str(trans->c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
@@ -1735,6 +1749,10 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
btree_trans_sort_paths(trans);
+ if (intent)
+ locks_want = max(locks_want, level + 1);
+ locks_want = min(locks_want, BTREE_MAX_DEPTH);
+
trans_for_each_path_inorder(trans, path, iter) {
if (__btree_path_cmp(path,
btree_id,
@@ -1749,7 +1767,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
if (path_pos &&
trans->paths[path_pos].cached == cached &&
trans->paths[path_pos].btree_id == btree_id &&
- trans->paths[path_pos].level == level) {
+ trans->paths[path_pos].level == level &&
+ bch2_btree_path_upgrade_norestart(trans, trans->paths + path_pos, locks_want)) {
trace_btree_path_get(trans, trans->paths + path_pos, &pos);
__btree_path_get(trans, trans->paths + path_pos, intent);
@@ -1781,9 +1800,6 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
if (!(flags & BTREE_ITER_nopreserve))
path->preserve = true;
- if (path->intent_ref)
- locks_want = max(locks_want, level + 1);
-
/*
* If the path has locks_want greater than requested, we don't downgrade
* it here - on transaction restart because btree node split needs to
@@ -1792,10 +1808,6 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
* a successful transaction commit.
*/
- locks_want = min(locks_want, BTREE_MAX_DEPTH);
- if (locks_want > path->locks_want)
- bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
-
return path_idx;
}
@@ -1967,17 +1979,24 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_
/* got to end? */
if (!btree_path_node(path, path->level + 1)) {
+ path->should_be_locked = false;
btree_path_set_level_up(trans, path);
return NULL;
}
+ /*
+ * We don't correctly handle nodes with extra intent locks here:
+ * downgrade so we don't violate locking invariants
+ */
+ bch2_btree_path_downgrade(trans, path);
+
if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
+ trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
__bch2_btree_path_unlock(trans, path);
path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
goto err;
}
@@ -2338,8 +2357,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree
}
if (iter->update_path) {
- bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
+ bch2_path_put(trans, iter->update_path, iter->flags & BTREE_ITER_intent);
iter->update_path = 0;
}
@@ -2368,8 +2386,8 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree
if (iter->update_path &&
!bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
- bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_intent);
+ bch2_path_put(trans, iter->update_path,
+ iter->flags & BTREE_ITER_intent);
iter->update_path = 0;
}
@@ -2628,7 +2646,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
* the last possible snapshot overwrite, return
* it:
*/
- bch2_path_put_nokeep(trans, iter->path,
+ bch2_path_put(trans, iter->path,
iter->flags & BTREE_ITER_intent);
iter->path = saved_path;
saved_path = 0;
@@ -2658,8 +2676,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
* our previous saved candidate:
*/
if (saved_path) {
- bch2_path_put_nokeep(trans, saved_path,
- iter->flags & BTREE_ITER_intent);
+ bch2_path_put(trans, saved_path,
+ iter->flags & BTREE_ITER_intent);
saved_path = 0;
}
@@ -2702,7 +2720,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
iter->pos.snapshot = iter->snapshot;
out_no_locked:
if (saved_path)
- bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
+ bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(trans, iter);
@@ -2743,7 +2761,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
ret = trans_maybe_inject_restart(trans, _RET_IP_);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
- goto out_no_locked;
+ goto out;
}
/* extents can't span inode numbers: */
@@ -2763,13 +2781,15 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
- goto out_no_locked;
+ goto out;
}
struct btree_path *path = btree_iter_path(trans, iter);
if (unlikely(!btree_path_node(path, path->level)))
return bkey_s_c_null;
+ btree_path_set_should_be_locked(trans, path);
+
if ((iter->flags & BTREE_ITER_cached) ||
!(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
k = bkey_s_c_null;
@@ -2790,12 +2810,12 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
if (!bkey_err(k))
iter->k = *k.k;
/* We're not returning a key from iter->path: */
- goto out_no_locked;
+ goto out;
}
- k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
+ k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
if (unlikely(!k.k))
- goto out_no_locked;
+ goto out;
if (unlikely(k.k->type == KEY_TYPE_whiteout &&
(iter->flags & BTREE_ITER_filter_snapshots) &&
@@ -2833,7 +2853,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
}
if (unlikely(bkey_err(k)))
- goto out_no_locked;
+ goto out;
next = k.k ? bkey_start_pos(k.k) : POS_MAX;
@@ -2855,8 +2875,6 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre
}
}
out:
- btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
-out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_iter_verify_ret(trans, iter, k);
@@ -2923,7 +2941,7 @@ static void btree_trans_verify_sorted(struct btree_trans *trans)
struct btree_path *path, *prev = NULL;
struct trans_for_each_path_inorder_iter iter;
- if (!bch2_debug_check_iterators)
+ if (!static_branch_unlikely(&bch2_debug_check_iterators))
return;
trans_for_each_path_inorder(trans, path, iter) {
@@ -3025,7 +3043,7 @@ static inline void btree_path_list_add(struct btree_trans *trans,
void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
{
if (iter->update_path)
- bch2_path_put_nokeep(trans, iter->update_path,
+ bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
if (iter->path)
bch2_path_put(trans, iter->path,
@@ -3089,7 +3107,19 @@ void bch2_trans_copy_iter(struct btree_trans *trans,
dst->key_cache_path = 0;
}
-void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+void bch2_trans_kmalloc_trace_to_text(struct printbuf *out,
+ darray_trans_kmalloc_trace *trace)
+{
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 60);
+
+ darray_for_each(*trace, i)
+ prt_printf(out, "%pS\t%zu\n", (void *) i->ip, i->bytes);
+}
+#endif
+
+void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long ip)
{
struct bch_fs *c = trans->c;
unsigned new_top = trans->mem_top + size;
@@ -3099,14 +3129,35 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
void *new_mem;
void *p;
- WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+ if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) {
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+ struct printbuf buf = PRINTBUF;
+ bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace);
+ bch2_print_str(c, KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+#endif
+ }
ret = trans_maybe_inject_restart(trans, _RET_IP_);
if (ret)
return ERR_PTR(ret);
struct btree_transaction_stats *s = btree_trans_stats(trans);
- s->max_mem = max(s->max_mem, new_bytes);
+ if (new_bytes > s->max_mem) {
+ mutex_lock(&s->lock);
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+ darray_resize(&s->trans_kmalloc_trace, trans->trans_kmalloc_trace.nr);
+ s->trans_kmalloc_trace.nr = min(s->trans_kmalloc_trace.size,
+ trans->trans_kmalloc_trace.nr);
+
+ memcpy(s->trans_kmalloc_trace.data,
+ trans->trans_kmalloc_trace.data,
+ sizeof(s->trans_kmalloc_trace.data[0]) *
+ s->trans_kmalloc_trace.nr);
+#endif
+ s->max_mem = new_bytes;
+ mutex_unlock(&s->lock);
+ }
if (trans->used_mempool) {
if (trans->mem_bytes >= new_bytes)
@@ -3166,6 +3217,8 @@ out_new_mem:
BCH_ERR_transaction_restart_mem_realloced, _RET_IP_));
}
out_change_top:
+ bch2_trans_kmalloc_trace(trans, size, ip);
+
p = trans->mem + trans->mem_top;
trans->mem_top += size;
memset(p, 0, size);
@@ -3225,7 +3278,6 @@ u32 bch2_trans_begin(struct btree_trans *trans)
trans->restart_count++;
trans->mem_top = 0;
- trans->journal_entries = NULL;
trans_for_each_path(trans, path, i) {
path->should_be_locked = false;
@@ -3279,6 +3331,10 @@ u32 bch2_trans_begin(struct btree_trans *trans)
}
#endif
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+ trans->trans_kmalloc_trace.nr = 0;
+#endif
+
trans_set_locked(trans, false);
if (trans->restarted) {
@@ -3379,7 +3435,6 @@ got_trans:
}
trans->nr_paths_max = s->nr_max_paths;
- trans->journal_entries_size = s->journal_entries_size;
}
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
@@ -3391,28 +3446,44 @@ got_trans:
return trans;
}
-static void check_btree_paths_leaked(struct btree_trans *trans)
-{
#ifdef CONFIG_BCACHEFS_DEBUG
- struct bch_fs *c = trans->c;
+
+static bool btree_paths_leaked(struct btree_trans *trans)
+{
struct btree_path *path;
unsigned i;
trans_for_each_path(trans, path, i)
if (path->ref)
- goto leaked;
- return;
-leaked:
- bch_err(c, "btree paths leaked from %s!", trans->fn);
- trans_for_each_path(trans, path, i)
- if (path->ref)
- printk(KERN_ERR " btree %s %pS\n",
- bch2_btree_id_str(path->btree_id),
- (void *) path->ip_allocated);
- /* Be noisy about this: */
- bch2_fatal_error(c);
-#endif
+ return true;
+ return false;
+}
+
+static void check_btree_paths_leaked(struct btree_trans *trans)
+{
+ if (btree_paths_leaked(trans)) {
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+ unsigned i;
+
+ struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+
+ prt_printf(&buf, "btree paths leaked from %s!\n", trans->fn);
+ trans_for_each_path(trans, path, i)
+ if (path->ref)
+ prt_printf(&buf, "btree %s %pS\n",
+ bch2_btree_id_str(path->btree_id),
+ (void *) path->ip_allocated);
+
+ bch2_fs_emergency_read_only2(c, &buf);
+ bch2_print_str(c, KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+ }
}
+#else
+static inline void check_btree_paths_leaked(struct btree_trans *trans) {}
+#endif
void bch2_trans_put(struct btree_trans *trans)
__releases(&c->btree_trans_barrier)
@@ -3448,6 +3519,9 @@ void bch2_trans_put(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
darray_exit(&trans->last_restarted_trace);
#endif
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+ darray_exit(&trans->trans_kmalloc_trace);
+#endif
unsigned long *paths_allocated = trans->paths_allocated;
trans->paths_allocated = NULL;
@@ -3602,6 +3676,9 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
s++) {
+#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
+ darray_exit(&s->trans_kmalloc_trace);
+#endif
kfree(s->max_paths_text);
bch2_time_stats_exit(&s->lock_hold_times);
}