Skip to content

Commit

Permalink
bcachefs: Btree path tracepoints
Browse files Browse the repository at this point in the history
Fastpath tracepoints, rarely needed, only enabled with
CONFIG_BCACHEFS_PATH_TRACEPOINTS.

Signed-off-by: Kent Overstreet <[email protected]>
  • Loading branch information
koverstreet authored and Kent Overstreet committed Sep 9, 2024
1 parent abbfc4d commit 32ed4a6
Show file tree
Hide file tree
Showing 7 changed files with 508 additions and 22 deletions.
7 changes: 7 additions & 0 deletions fs/bcachefs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN
is held by another thread, spin for a short while, as long as the
thread owning the lock is running.

config BCACHEFS_PATH_TRACEPOINTS
bool "Extra btree_path tracepoints"
depends on BCACHEFS_FS
help
Enable extra tracepoints for debugging btree_path operations; we don't
normally want these enabled because they happen at very high rates.

config MEAN_AND_VARIANCE_UNIT_TEST
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
depends on KUNIT
Expand Down
38 changes: 26 additions & 12 deletions fs/bcachefs/btree_iter.c
Original file line number Diff line number Diff line change
Expand Up @@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
if (unlikely(!trans->srcu_held))
bch2_trans_srcu_lock(trans);

trace_btree_path_traverse_start(trans, path);

/*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart:
Expand Down Expand Up @@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,

out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE;
trace_btree_path_traverse_end(trans, path);
out:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
panic("ret %s (%i) trans->restarted %s (%i)\n",
Expand Down Expand Up @@ -1236,8 +1239,10 @@ __flatten
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
btree_path_idx_t path, bool intent, unsigned long ip)
{
struct btree_path *old = trans->paths + path;
__btree_path_put(trans, trans->paths + path, intent);
path = btree_path_clone(trans, path, intent, ip);
trace_btree_path_clone(trans, old, trans->paths + path);
trans->paths[path].preserve = false;
return path;
}
Expand All @@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
bch2_trans_verify_not_in_restart(trans);
EBUG_ON(!trans->paths[path_idx].ref);

trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);

path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);

struct btree_path *path = trans->paths + path_idx;
Expand Down Expand Up @@ -1368,6 +1375,8 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
? have_path_at_pos(trans, path)
: have_node_at_pos(trans, path);

trace_btree_path_free(trans, path_idx, dup);

if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
return;

Expand Down Expand Up @@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
prt_printf(buf, "transaction updates for %s journal seq %llu\n",
trans->fn, trans->journal_res.seq);
prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
trans->nr_updates, trans->fn, trans->journal_res.seq);
printbuf_indent_add(buf, 2);

trans_for_each_update(trans, i) {
Expand Down Expand Up @@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
{
struct btree_path *path = trans->paths + path_idx;

prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
Expand Down Expand Up @@ -1716,6 +1725,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
trans->paths[path_pos].cached == cached &&
trans->paths[path_pos].btree_id == btree_id &&
trans->paths[path_pos].level == level) {
trace_btree_path_get(trans, trans->paths + path_pos, &pos);

__btree_path_get(trans, trans->paths + path_pos, intent);
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
path = trans->paths + path_idx;
Expand All @@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
path->ip_allocated = ip;
#endif
trans->paths_sorted = false;

trace_btree_path_alloc(trans, path);
}

if (!(flags & BTREE_ITER_nopreserve))
Expand Down Expand Up @@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)

struct btree_path *path = btree_iter_path(trans, iter);
if (btree_path_node(path, path->level))
btree_path_set_should_be_locked(path);
btree_path_set_should_be_locked(trans, path);
return 0;
}

Expand Down Expand Up @@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
Expand Down Expand Up @@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
Expand Down Expand Up @@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (unlikely(ret))
return bkey_s_c_err(ret);

btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);

k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) {
Expand Down Expand Up @@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out;
}

btree_path_set_should_be_locked(path);
btree_path_set_should_be_locked(trans, path);

k = btree_path_level_peek_all(trans->c, l, &iter->k);

Expand Down Expand Up @@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));

btree_path_set_should_be_locked(btree_iter_path(trans, iter));
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
if (iter->update_path) {
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret))
k = bkey_s_c_err(ret);
else
btree_path_set_should_be_locked(trans->paths + iter->update_path);
btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
}

if (!(iter->flags & BTREE_ITER_all_snapshots))
Expand Down Expand Up @@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
_THIS_IP_);
path = btree_iter_path(trans, iter);
trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
saved_k = *k.k;
saved_v = k.v;
}
Expand All @@ -2527,7 +2541,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
continue;
}

btree_path_set_should_be_locked(path);
btree_path_set_should_be_locked(trans, path);
break;
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
Expand Down Expand Up @@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
}
}
out:
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
Expand Down
2 changes: 2 additions & 0 deletions fs/bcachefs/btree_iter.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ static inline void __btree_path_get(struct btree_trans *trans, struct btree_path

path->ref++;
path->intent_ref += intent;
trace_btree_path_get_ll(trans, path);
}

static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
Expand All @@ -39,6 +40,7 @@ static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path
EBUG_ON(!path->ref);
EBUG_ON(!path->intent_ref && intent);

trace_btree_path_put_ll(trans, path);
path->intent_ref -= intent;
return --path->ref == 0;
}
Expand Down
6 changes: 5 additions & 1 deletion fs/bcachefs/btree_locking.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,9 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0);

if (!ret)
trace_btree_path_lock(trans, _THIS_IP_, b);
return ret;
}

Expand Down Expand Up @@ -400,12 +403,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,

/* misc: */

static inline void btree_path_set_should_be_locked(struct btree_path *path)
static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
{
EBUG_ON(!btree_node_locked(path, path->level));
EBUG_ON(path->uptodate);

path->should_be_locked = true;
trace_btree_path_should_be_locked(trans, path);
}

static inline void __btree_path_set_level_up(struct btree_trans *trans,
Expand Down
10 changes: 7 additions & 3 deletions fs/bcachefs/btree_update.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_norun;

btree_path_set_should_be_locked(btree_path);
btree_path_set_should_be_locked(trans, btree_path);
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
out:
bch2_path_put(trans, path_idx, true);
Expand Down Expand Up @@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
break;
}

if (!cmp && i < trans->updates + trans->nr_updates) {
bool overwrite = !cmp && i < trans->updates + trans->nr_updates;

if (overwrite) {
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);

bch2_path_put(trans, i->path, true);
Expand Down Expand Up @@ -451,6 +453,8 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,

__btree_path_get(trans, trans->paths + i->path, true);

trace_update_by_path(trans, path, i, overwrite);

/*
* If a key is present in the key cache, it must also exist in the
* btree - this is necessary for cache coherency. When iterating over
Expand Down Expand Up @@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}

btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
}

return 0;
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_update_interior.c
Original file line number Diff line number Diff line change
Expand Up @@ -1981,7 +1981,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret)
goto err;

btree_path_set_should_be_locked(trans->paths + sib_path);
btree_path_set_should_be_locked(trans, trans->paths + sib_path);

m = trans->paths[sib_path].l[level].b;

Expand Down
Loading

0 comments on commit 32ed4a6

Please sign in to comment.