Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

No bad gossip after close #3245

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion devtools/gossipwith.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ int main(int argc, char *argv[])
"Stream complete gossip history at start");
opt_register_arg("--max-messages", opt_set_ulongval, opt_show_ulongval,
&max_messages,
"Terminate after reading this many messages (> 0)");
"Terminate after reading this many messages");
opt_register_noarg("--stdin", opt_set_bool, &stream_stdin,
"Stream gossip messages from stdin.");
opt_register_noarg("--no-init", opt_set_bool, &no_init,
Expand Down
2 changes: 2 additions & 0 deletions gossipd/gossipd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1517,6 +1517,8 @@ static struct io_plan *handle_outpoint_spent(struct io_conn *conn,
"Deleting channel %s due to the funding outpoint being "
"spent",
type_to_string(msg, struct short_channel_id, &scid));
/* Suppress any now-obsolete updates/announcements */
add_to_txout_failures(rstate, &scid);
remove_channel_from_store(rstate, chan);
/* Freeing is sufficient since everything else is allocated off
* of the channel and this takes care of unregistering
Expand Down
9 changes: 4 additions & 5 deletions gossipd/routing.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,8 @@ static void txout_failure_age(struct routing_state *rstate)
txout_failure_age, rstate);
}

static void add_to_txout_failures(struct routing_state *rstate,
const struct short_channel_id *scid)
void add_to_txout_failures(struct routing_state *rstate,
const struct short_channel_id *scid)
{
if (uintmap_add(&rstate->txout_failures, scid->u64, true)
&& ++rstate->num_txout_failures == 10000) {
Expand Down Expand Up @@ -1721,9 +1721,8 @@ u8 *handle_channel_announcement(struct routing_state *rstate,
}

/* If a prior txout lookup failed there is little point it trying
* again. Just drop the announcement and walk away whistling. Any non-0
* result means this failed before. */
if (uintmap_get(&rstate->txout_failures, pending->short_channel_id.u64)) {
* again. Just drop the announcement and walk away whistling. */
if (in_txout_failures(rstate, &pending->short_channel_id)) {
SUPERVERBOSE(
"Ignoring channel_announcement of %s due to a prior txout "
"query failure. The channel was likely closed on-chain.",
Expand Down
4 changes: 4 additions & 0 deletions gossipd/routing.h
Original file line number Diff line number Diff line change
Expand Up @@ -529,4 +529,8 @@ void remove_channel_from_store(struct routing_state *rstate,
const char *unfinalized_entries(const tal_t *ctx, struct routing_state *rstate);

void remove_all_gossip(struct routing_state *rstate);

/* This scid is dead to us. */
void add_to_txout_failures(struct routing_state *rstate,
const struct short_channel_id *scid);
#endif /* LIGHTNING_GOSSIPD_ROUTING_H */
6 changes: 5 additions & 1 deletion tests/test_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -2201,7 +2201,11 @@ def test_feerate_stress(node_factory, executor):
@unittest.skipIf(not DEVELOPER, "need dev_disconnect")
def test_pay_disconnect_stress(node_factory, executor):
"""Expose race in htlc restoration in channeld: 50% chance of failure"""
for i in range(5):
if SLOW_MACHINE and VALGRIND:
NUM_RUNS = 2
else:
NUM_RUNS = 5
for i in range(NUM_RUNS):
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'disconnect': ['=WIRE_UPDATE_ADD_HTLC',
Expand Down
24 changes: 11 additions & 13 deletions tests/test_gossip.py
Original file line number Diff line number Diff line change
Expand Up @@ -1094,8 +1094,10 @@ def test_gossipwith(node_factory):


def test_gossip_notices_close(node_factory, bitcoind):
# We want IO logging so we can replay a channel_announce to l1.
l1 = node_factory.get_node(options={'log-level': 'io'})
# We want IO logging so we can replay a channel_announce to l1;
# We also *really* do feed it bad gossip!
l1 = node_factory.get_node(options={'log-level': 'io'},
allow_bad_gossip=True)
l2, l3 = node_factory.line_graph(2)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# FIXME: sending SIGUSR1 immediately may kill it before handler installed.
Expand All @@ -1121,17 +1123,13 @@ def test_gossip_notices_close(node_factory, bitcoind):
wait_for(lambda: l1.rpc.listchannels()['channels'] == [])
wait_for(lambda: l1.rpc.listnodes()['nodes'] == [])

# FIXME: This is a hack: we should have a framework for canned conversations
# This doesn't naturally terminate, so we give it 5 seconds.
try:
subprocess.run(['devtools/gossipwith',
'{}@localhost:{}'.format(l1.info['id'], l1.port),
channel_announcement,
channel_update,
node_announcement],
timeout=5, stdout=subprocess.PIPE)
except subprocess.TimeoutExpired:
pass
subprocess.run(['devtools/gossipwith',
'--max-messages=0',
'{}@localhost:{}'.format(l1.info['id'], l1.port),
channel_announcement,
channel_update,
node_announcement],
timeout=TIMEOUT)

# l1 should reject it.
assert(l1.rpc.listchannels()['channels'] == [])
Expand Down
1 change: 1 addition & 0 deletions tests/test_pay.py
Original file line number Diff line number Diff line change
Expand Up @@ -1538,6 +1538,7 @@ def test_pay_variants(node_factory):


@unittest.skipIf(not DEVELOPER, "gossip without DEVELOPER=1 is slow")
@unittest.skipIf(VALGRIND and SLOW_MACHINE, "Travis times out under valgrind")
def test_pay_retry(node_factory, bitcoind):
"""Make sure pay command retries properly. """
def exhaust_channel(funder, fundee, scid, already_spent=0):
Expand Down