1from collections import Counter
2from ephemeral_port_reserve import reserve
3from fixtures import *  # noqa: F401,F403
4from fixtures import TEST_NETWORK
5from pyln.client import RpcError, Millisatoshi
6from utils import (
7    DEVELOPER, wait_for, TIMEOUT, only_one, sync_blockheight, expected_node_features, COMPAT
8)
9
10import json
11import logging
12import math
13import os
14import pytest
15import struct
16import subprocess
17import time
18import unittest
19import socket
20
21
22with open('config.vars') as configfile:
23    config = dict([(line.rstrip().split('=', 1)) for line in configfile])
24
25
26@pytest.mark.developer("needs --dev-fast-gossip-prune")
27def test_gossip_pruning(node_factory, bitcoind):
28    """ Create channel and see it being updated in time before pruning
29    """
30    l1, l2, l3 = node_factory.get_nodes(3, opts={'dev-fast-gossip-prune': None})
31
32    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
33    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
34
35    scid1, _ = l1.fundchannel(l2, 10**6)
36    scid2, _ = l2.fundchannel(l3, 10**6)
37
38    bitcoind.generate_block(6)
39
40    # Channels should be activated locally
41    wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
42    wait_for(lambda: [c['active'] for c in l2.rpc.listchannels()['channels']] == [True] * 4)
43    wait_for(lambda: [c['active'] for c in l3.rpc.listchannels()['channels']] == [True] * 4)
44
45    # All of them should send a keepalive message (after 30 seconds)
46    l1.daemon.wait_for_logs([
47        'Sending keepalive channel_update for {}'.format(scid1),
48    ], timeout=50)
49    l2.daemon.wait_for_logs([
50        'Sending keepalive channel_update for {}'.format(scid1),
51        'Sending keepalive channel_update for {}'.format(scid2),
52    ])
53    l3.daemon.wait_for_logs([
54        'Sending keepalive channel_update for {}'.format(scid2),
55    ])
56
57    # Now kill l2, so that l1 and l3 will prune from their view after 60 seconds
58    l2.stop()
59
60    # We check every 60/4 seconds, and takes 60 seconds since last update.
61    l1.daemon.wait_for_log("Pruning channel {} from network view".format(scid2),
62                           timeout=80)
63    l3.daemon.wait_for_log("Pruning channel {} from network view".format(scid1))
64
65    assert scid2 not in [c['short_channel_id'] for c in l1.rpc.listchannels()['channels']]
66    assert scid1 not in [c['short_channel_id'] for c in l3.rpc.listchannels()['channels']]
67    assert l3.info['id'] not in [n['nodeid'] for n in l1.rpc.listnodes()['nodes']]
68    assert l1.info['id'] not in [n['nodeid'] for n in l3.rpc.listnodes()['nodes']]
69
70
71@pytest.mark.developer("needs --dev-fast-gossip, --dev-no-reconnect")
72def test_gossip_disable_channels(node_factory, bitcoind):
73    """Simple test to check that channels get disabled correctly on disconnect and
74    reenabled upon reconnecting
75
76    """
77    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
78    l1, l2 = node_factory.get_nodes(2, opts=opts)
79
80    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
81    scid, _ = l1.fundchannel(l2, 10**6)
82    bitcoind.generate_block(5)
83
84    def count_active(node):
85        chans = node.rpc.listchannels()['channels']
86        active = [c for c in chans if c['active']]
87        return len(active)
88
89    l1.wait_channel_active(scid)
90    l2.wait_channel_active(scid)
91
92    assert(count_active(l1) == 2)
93    assert(count_active(l2) == 2)
94
95    l2.restart()
96
97    wait_for(lambda: count_active(l1) == 0)
98    assert(count_active(l2) == 0)
99
100    # Now reconnect, they should re-enable the channels
101    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
102
103    wait_for(lambda: count_active(l1) == 2)
104    wait_for(lambda: count_active(l2) == 2)
105
106
107@pytest.mark.developer("needs --dev-allow-localhost")
108def test_announce_address(node_factory, bitcoind):
109    """Make sure our announcements are well formed."""
110
111    # We do not allow announcement of duplicates.
112    opts = {'announce-addr':
113            ['4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion',
114             '1.2.3.4:1234',
115             '::'],
116            'log-level': 'io',
117            'dev-allow-localhost': None}
118    l1, l2 = node_factory.get_nodes(2, opts=[opts, {}])
119
120    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
121    scid, _ = l1.fundchannel(l2, 10**6)
122    bitcoind.generate_block(5)
123
124    l1.wait_channel_active(scid)
125    l2.wait_channel_active(scid)
126
127    # We should see it send node announce with all addresses (257 = 0x0101)
128    # local ephemeral port is masked out.
129    l1.daemon.wait_for_log(r"\[OUT\] 0101.*47"
130                           "010102030404d2"
131                           "017f000001...."
132                           "02000000000000000000000000000000002607"
133                           "04e00533f3e8f2aedaa8969b3d0fa03a96e857bbb28064dca5e147e934244b9ba50230032607")
134
135
136@pytest.mark.developer("needs DEVELOPER=1")
137def test_gossip_timestamp_filter(node_factory, bitcoind, chainparams):
138    # Updates get backdated 5 seconds with --dev-fast-gossip.
139    backdate = 5
140    l1, l2, l3, l4 = node_factory.line_graph(4, fundchannel=False)
141    genesis_blockhash = chainparams['chain_hash']
142
143    before_anything = int(time.time())
144
145    # Make a public channel.
146    chan12, _ = l1.fundchannel(l2, 10**5)
147    bitcoind.generate_block(5)
148
149    l3.wait_for_channel_updates([chan12])
150    after_12 = int(time.time())
151
152    # Make another one, different timestamp.
153    time.sleep(1)
154    chan23, _ = l2.fundchannel(l3, 10**5)
155    bitcoind.generate_block(5)
156
157    l1.wait_for_channel_updates([chan23])
158    after_23 = int(time.time())
159
160    # Make sure l4 has received all the gossip.
161    wait_for(lambda: ['alias' in node for node in l4.rpc.listnodes()['nodes']] == [True, True, True])
162
163    msgs = l4.query_gossip('gossip_timestamp_filter',
164                           genesis_blockhash,
165                           '0', '0xFFFFFFFF',
166                           filters=['0109'])
167
168    # 0x0100 = channel_announcement
169    # 0x0102 = channel_update
170    # 0x0101 = node_announcement
171    # The order of node_announcements relative to others is undefined.
172    types = Counter([m[0:4] for m in msgs])
173    assert types == Counter(['0100'] * 2 + ['0102'] * 4 + ['0101'] * 3)
174
175    # Now timestamp which doesn't overlap (gives nothing).
176    msgs = l4.query_gossip('gossip_timestamp_filter',
177                           genesis_blockhash,
178                           '0', before_anything - backdate,
179                           filters=['0109'])
180    assert msgs == []
181
182    # Now choose range which will only give first update.
183    msgs = l4.query_gossip('gossip_timestamp_filter',
184                           genesis_blockhash,
185                           before_anything - backdate,
186                           after_12 - before_anything + 1,
187                           filters=['0109'])
188
189    # 0x0100 = channel_announcement
190    # 0x0102 = channel_update
191    # (Node announcement may have any timestamp)
192    types = Counter([m[0:4] for m in msgs])
193    assert types['0100'] == 1
194    assert types['0102'] == 2
195
196    # Now choose range which will only give second update.
197    msgs = l4.query_gossip('gossip_timestamp_filter',
198                           genesis_blockhash,
199                           after_12 - backdate,
200                           after_23 - after_12 + 1,
201                           filters=['0109'])
202
203    # 0x0100 = channel_announcement
204    # 0x0102 = channel_update
205    # (Node announcement may have any timestamp)
206    types = Counter([m[0:4] for m in msgs])
207    assert types['0100'] == 1
208    assert types['0102'] == 2
209
210
211@pytest.mark.developer("needs --dev-allow-localhost")
212def test_connect_by_gossip(node_factory, bitcoind):
213    """Test connecting to an unknown peer using node gossip
214    """
215    # l1 announces a bogus addresses.
216    l1, l2, l3 = node_factory.get_nodes(3,
217                                        opts=[{'announce-addr':
218                                               ['127.0.0.1:2',
219                                                '[::]:2',
220                                                'vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion'],
221                                               'dev-allow-localhost': None},
222                                              {},
223                                              {'dev-allow-localhost': None,
224                                               'log-level': 'io'}])
225    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
226
227    # Nodes are gossiped only if they have channels
228    chanid, _ = l2.fundchannel(l3, 10**6)
229    bitcoind.generate_block(5)
230
231    # Let channel reach announcement depth
232    l2.wait_channel_active(chanid)
233
234    # Make sure l3 has given node announcement to l2.
235    l2.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])])
236
237    # Let l1 learn of l3 by node gossip
238    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
239    l1.daemon.wait_for_logs(['Received node_announcement for node {}'.format(l3.info['id'])])
240
241    # Have l1 connect to l3 without explicit host and port.
242    ret = l1.rpc.connect(l3.info['id'])
243    assert ret['address'] == {'type': 'ipv4', 'address': '127.0.0.1', 'port': l3.port}
244
245    # Now give it *wrong* port (after we make sure l2 isn't listening), it should fall back.
246    l1.rpc.disconnect(l3.info['id'])
247    l2.stop()
248    ret = l1.rpc.connect(l3.info['id'], 'localhost', l2.port)
249    assert ret['address'] == {'type': 'ipv4', 'address': '127.0.0.1', 'port': l3.port}
250
251
252@pytest.mark.developer("DEVELOPER=1 needed to speed up gossip propagation, would be too long otherwise")
253def test_gossip_jsonrpc(node_factory):
254    l1, l2 = node_factory.line_graph(2, fundchannel=True, wait_for_announce=False)
255
256    # Shouldn't send announce signatures until 6 deep.
257    assert not l1.daemon.is_in_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')
258
259    # Channels should be activated locally
260    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
261    wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 2)
262
263    # Make sure we can route through the channel, will raise on failure
264    l1.rpc.getroute(l2.info['id'], 100, 1)
265
266    # Outgoing should be active, but not public.
267    channels1 = l1.rpc.listchannels()['channels']
268    channels2 = l2.rpc.listchannels()['channels']
269
270    assert [c['active'] for c in channels1] == [True, True]
271    assert [c['active'] for c in channels2] == [True, True]
272    # The incoming direction will be considered public, hence check for out
273    # outgoing only
274    assert len([c for c in channels1 if not c['public']]) == 2
275    assert len([c for c in channels2 if not c['public']]) == 2
276
277    # Test listchannels-by-source
278    channels1 = l1.rpc.listchannels(source=l1.info['id'])['channels']
279    channels2 = l2.rpc.listchannels(source=l1.info['id'])['channels']
280    assert only_one(channels1)['source'] == l1.info['id']
281    assert only_one(channels1)['destination'] == l2.info['id']
282    assert channels1 == channels2
283
284    # Test listchannels-by-destination
285    channels1 = l1.rpc.listchannels(destination=l1.info['id'])['channels']
286    channels2 = l2.rpc.listchannels(destination=l1.info['id'])['channels']
287    assert only_one(channels1)['destination'] == l1.info['id']
288    assert only_one(channels1)['source'] == l2.info['id']
289    assert channels1 == channels2
290
291    # Test only one of short_channel_id, source or destination can be supplied
292    with pytest.raises(RpcError, match=r"Can only specify one of.*"):
293        l1.rpc.listchannels(source=l1.info['id'], destination=l2.info['id'])
294    with pytest.raises(RpcError, match=r"Can only specify one of.*"):
295        l1.rpc.listchannels(short_channel_id="1x1x1", source=l2.info['id'])
296
297    # Now proceed to funding-depth and do a full gossip round
298    l1.bitcoin.generate_block(5)
299    # Could happen in either order.
300    l1.daemon.wait_for_logs(['peer_out WIRE_ANNOUNCEMENT_SIGNATURES',
301                             'peer_in WIRE_ANNOUNCEMENT_SIGNATURES'])
302
303    # Just wait for the update to kick off and then check the effect
304    needle = "Received node_announcement for node"
305    l1.daemon.wait_for_log(needle)
306    l2.daemon.wait_for_log(needle)
307    # Need to increase timeout, intervals cannot be shortened with DEVELOPER=0
308    wait_for(lambda: len(l1.getactivechannels()) == 2, timeout=60)
309    wait_for(lambda: len(l2.getactivechannels()) == 2, timeout=60)
310
311    nodes = l1.rpc.listnodes()['nodes']
312    assert set([n['nodeid'] for n in nodes]) == set([l1.info['id'], l2.info['id']])
313
314    # Test listnodes with an arg, while we're here.
315    n1 = l1.rpc.listnodes(l1.info['id'])['nodes'][0]
316    n2 = l1.rpc.listnodes(l2.info['id'])['nodes'][0]
317    assert n1['nodeid'] == l1.info['id']
318    assert n2['nodeid'] == l2.info['id']
319
320    # Might not have seen other node-announce yet.
321    assert n1['alias'].startswith('JUNIORBEAM')
322    assert n1['color'] == '0266e4'
323    if 'alias' not in n2:
324        assert 'color' not in n2
325        assert 'addresses' not in n2
326    else:
327        assert n2['alias'].startswith('SILENTARTIST')
328        assert n2['color'] == '022d22'
329
330    assert [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True]
331    assert [c['public'] for c in l1.rpc.listchannels()['channels']] == [True, True]
332    assert [c['active'] for c in l2.rpc.listchannels()['channels']] == [True, True]
333    assert [c['public'] for c in l2.rpc.listchannels()['channels']] == [True, True]
334
335
336@pytest.mark.developer("Too slow without --dev-fast-gossip")
337def test_gossip_badsig(node_factory):
338    """Make sure node announcement signatures are ok.
339
340    This is a smoke test to see if signatures fail. This used to be the case
341    occasionally before PR #276 was merged: we'd be waiting for the HSM to reply
342    with a signature and would then regenerate the message, which might roll the
343    timestamp, invalidating the signature.
344
345    """
346    l1, l2, l3 = node_factory.get_nodes(3)
347
348    # l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
349    l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
350    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
351    l2.fundchannel(l1, 10**6)
352    l2.fundchannel(l3, 10**6)
353
354    # Wait for route propagation.
355    l1.bitcoin.generate_block(5)
356    l1.daemon.wait_for_log('Received node_announcement for node {}'
357                           .format(l3.info['id']))
358    assert not l1.daemon.is_in_log('signature verification failed')
359    assert not l2.daemon.is_in_log('signature verification failed')
360    assert not l3.daemon.is_in_log('signature verification failed')
361
362
363def test_gossip_weirdalias(node_factory, bitcoind):
364    weird_name = '\t \n \" \n \r \n \\'
365    normal_name = 'Normal name'
366    opts = [
367        {'alias': weird_name},
368        {'alias': normal_name}
369    ]
370    l1, l2 = node_factory.get_nodes(2, opts=opts)
371    weird_name_json = json.encoder.JSONEncoder().encode(weird_name)[1:-1]
372    aliasline = l1.daemon.is_in_log('Server started with public key .* alias')
373    assert weird_name_json in str(aliasline)
374    assert l2.daemon.is_in_log('Server started with public key .* alias {}'
375                               .format(normal_name))
376
377    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
378    l2.daemon.wait_for_log('Handed peer, entering loop')
379    l2.fundchannel(l1, 10**6)
380    bitcoind.generate_block(6)
381
382    # They should gossip together.
383    l1.daemon.wait_for_log('Received node_announcement for node {}'
384                           .format(l2.info['id']))
385    l2.daemon.wait_for_log('Received node_announcement for node {}'
386                           .format(l1.info['id']))
387
388    node = l1.rpc.listnodes(l1.info['id'])['nodes'][0]
389    assert node['alias'] == weird_name
390    node = l2.rpc.listnodes(l1.info['id'])['nodes'][0]
391    assert node['alias'] == weird_name
392
393
394@pytest.mark.developer("needs DEVELOPER=1 for --dev-no-reconnect")
395def test_gossip_persistence(node_factory, bitcoind):
396    """Gossip for a while, restart and it should remember.
397
398    Also tests for funding outpoint spends, and they should be persisted
399    too.
400    """
401    opts = {'dev-no-reconnect': None, 'may_reconnect': True}
402    l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
403
404    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
405    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
406    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
407
408    scid12, _ = l1.fundchannel(l2, 10**6)
409    scid23, _ = l2.fundchannel(l3, 10**6)
410
411    # Make channels public, except for l3 -> l4, which is kept local-only for now
412    bitcoind.generate_block(5)
413    scid34, _ = l3.fundchannel(l4, 10**6)
414    bitcoind.generate_block(1)
415
416    def active(node):
417        chans = node.rpc.listchannels()['channels']
418        return sorted([c['short_channel_id'] for c in chans if c['active']])
419
420    def non_public(node):
421        chans = node.rpc.listchannels()['channels']
422        return sorted([c['short_channel_id'] for c in chans if not c['public']])
423
424    # Channels should be activated
425    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
426    wait_for(lambda: active(l2) == [scid12, scid12, scid23, scid23])
427    # This one sees its private channel
428    wait_for(lambda: active(l3) == [scid12, scid12, scid23, scid23, scid34, scid34])
429
430    # l1 restarts and doesn't connect, but loads from persisted store, all
431    # local channels should be disabled, leaving only the two l2 <-> l3
432    # directions
433    l1.restart()
434    wait_for(lambda: active(l1) == [scid23, scid23])
435
436    # Now reconnect, they should re-enable the two l1 <-> l2 directions
437    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
438    wait_for(lambda: active(l1) == [scid12, scid12, scid23, scid23])
439
440    # Now spend the funding tx, generate a block and see others deleting the
441    # channel from their network view
442    l1.rpc.dev_fail(l2.info['id'])
443
444    # We need to wait for the unilateral close to hit the mempool
445    bitcoind.generate_block(1, wait_for_mempool=1)
446
447    wait_for(lambda: active(l1) == [scid23, scid23])
448    wait_for(lambda: active(l2) == [scid23, scid23])
449    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])
450
451    # The channel l3 -> l4 should be known only to them
452    assert non_public(l1) == []
453    assert non_public(l2) == []
454    wait_for(lambda: non_public(l3) == [scid34, scid34])
455    wait_for(lambda: non_public(l4) == [scid34, scid34])
456
457    # Finally, it should also remember the deletion after a restart
458    l3.restart()
459    l4.restart()
460    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
461    l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
462    wait_for(lambda: active(l3) == [scid23, scid23, scid34, scid34])
463
464    # Both l3 and l4 should remember their local-only channel
465    wait_for(lambda: non_public(l3) == [scid34, scid34])
466    wait_for(lambda: non_public(l4) == [scid34, scid34])
467
468
469@pytest.mark.developer("needs DEVELOPER=1")
470def test_routing_gossip_reconnect(node_factory):
471    # Connect two peers, reconnect and then see if we resume the
472    # gossip.
473    disconnects = ['-WIRE_CHANNEL_ANNOUNCEMENT']
474    l1, l2, l3 = node_factory.get_nodes(3,
475                                        opts=[{'disconnect': disconnects,
476                                               'may_reconnect': True},
477                                              {'may_reconnect': True},
478                                              {}])
479    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
480    l1.openchannel(l2, 25000)
481
482    # Now open new channels and everybody should sync
483    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
484    l2.openchannel(l3, 25000)
485
486    # Settle the gossip
487    for n in [l1, l2, l3]:
488        wait_for(lambda: len(n.rpc.listchannels()['channels']) == 4)
489
490
491@pytest.mark.developer("needs DEVELOPER=1")
492def test_gossip_no_empty_announcements(node_factory, bitcoind):
493    # Need full IO logging so we can see gossip
494    # l3 sends CHANNEL_ANNOUNCEMENT to l2, but not CHANNEL_UDPATE.
495    l1, l2, l3, l4 = node_factory.line_graph(4, opts=[{'log-level': 'io'},
496                                                      {'log-level': 'io'},
497                                                      {'disconnect': ['+WIRE_CHANNEL_ANNOUNCEMENT'],
498                                                       'may_reconnect': True},
499                                                      {'may_reconnect': True}],
500                                             fundchannel=False)
501
502    # Make an announced-but-not-updated channel.
503    l3.fundchannel(l4, 10**5)
504    bitcoind.generate_block(5)
505
506    # 0x0100 = channel_announcement, which goes to l2 before l3 dies.
507    l2.daemon.wait_for_log(r'\[IN\] 0100')
508
509    # But it never goes to l1, as there's no channel_update.
510    time.sleep(2)
511    assert not l1.daemon.is_in_log(r'\[IN\] 0100')
512    assert len(l1.rpc.listchannels()['channels']) == 0
513
514    # If we reconnect, gossip will now flow.
515    l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
516    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
517
518
519@pytest.mark.developer("Too slow without --dev-fast-gossip")
520def test_routing_gossip(node_factory, bitcoind):
521    nodes = node_factory.get_nodes(5)
522
523    sync_blockheight(bitcoind, nodes)
524    for i in range(len(nodes) - 1):
525        src, dst = nodes[i], nodes[i + 1]
526        src.rpc.connect(dst.info['id'], 'localhost', dst.port)
527        src.openchannel(dst, 25000, confirm=False, wait_for_announce=False)
528        sync_blockheight(bitcoind, nodes)
529
530    # Avoid "bad gossip" caused by future announcements (a node below
531    # confirmation height receiving and ignoring the announcement,
532    # thus marking followup messages as bad).
533    sync_blockheight(bitcoind, nodes)
534
535    # Allow announce messages.
536    bitcoind.generate_block(6)
537
538    # Deep check that all channels are in there
539    comb = []
540    for i in range(len(nodes) - 1):
541        comb.append((nodes[i].info['id'], nodes[i + 1].info['id']))
542        comb.append((nodes[i + 1].info['id'], nodes[i].info['id']))
543
544    def check_gossip(n):
545        seen = []
546        channels = n.rpc.listchannels()['channels']
547        for c in channels:
548            seen.append((c['source'], c['destination']))
549        missing = set(comb) - set(seen)
550        logging.debug("Node {id} is missing channels {chans}".format(
551            id=n.info['id'],
552            chans=missing)
553        )
554        return len(missing) == 0
555
556    for n in nodes:
557        wait_for(lambda: check_gossip(n))
558
559
560@pytest.mark.developer("needs dev-set-max-scids-encode-size")
561def test_gossip_query_channel_range(node_factory, bitcoind, chainparams):
562    l1, l2, l3, l4 = node_factory.line_graph(4, fundchannel=False)
563    genesis_blockhash = chainparams['chain_hash']
564
565    # Make public channels on consecutive blocks
566    l1.fundwallet(10**6)
567    l2.fundwallet(10**6)
568
569    num_tx = len(bitcoind.rpc.getrawmempool())
570    l1.rpc.fundchannel(l2.info['id'], 10**5)['tx']
571    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
572    bitcoind.generate_block(1)
573
574    num_tx = len(bitcoind.rpc.getrawmempool())
575    l2.rpc.fundchannel(l3.info['id'], 10**5)['tx']
576    wait_for(lambda: len(bitcoind.rpc.getrawmempool()) == num_tx + 1)
577    bitcoind.generate_block(1)
578
579    # Get them both to gossip depth.
580    bitcoind.generate_block(5)
581
582    # Make sure l2 has received all the gossip.
583    l2.daemon.wait_for_logs(['Received node_announcement for node ' + l1.info['id'],
584                             'Received node_announcement for node ' + l3.info['id']])
585
586    scid12 = only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id']
587    scid23 = only_one(l3.rpc.listpeers(l2.info['id'])['peers'])['channels'][0]['short_channel_id']
588    block12 = int(scid12.split('x')[0])
589    block23 = int(scid23.split('x')[0])
590
591    assert block23 == block12 + 1
592
593    # Asks l2 for all channels, gets both.
594    msgs = l2.query_gossip('query_channel_range',
595                           chainparams['chain_hash'],
596                           0, 1000000,
597                           filters=['0109'])
598    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12, scid23],
599                             check=True,
600                             timeout=TIMEOUT,
601                             stdout=subprocess.PIPE).stdout.strip().decode()
602    # reply_channel_range == 264
603    assert msgs == ['0108'
604                    # blockhash
605                    + genesis_blockhash
606                    # first_blocknum, number_of_blocks, complete
607                    + format(0, '08x') + format(1000000, '08x') + '01'
608                    # encoded_short_ids
609                    + format(len(encoded) // 2, '04x')
610                    + encoded]
611
612    # Does not include scid12
613    msgs = l2.query_gossip('query_channel_range',
614                           genesis_blockhash,
615                           0, block12,
616                           filters=['0109'])
617    # reply_channel_range == 264
618    assert msgs == ['0108'
619                    # blockhash
620                    + genesis_blockhash
621                    # first_blocknum, number_of_blocks, complete
622                    + format(0, '08x') + format(block12, '08x') + '01'
623                    # encoded_short_ids
624                    '000100']
625
626    # Does include scid12
627    msgs = l2.query_gossip('query_channel_range',
628                           genesis_blockhash,
629                           0, block12 + 1,
630                           filters=['0109'])
631    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12],
632                             check=True,
633                             timeout=TIMEOUT,
634                             stdout=subprocess.PIPE).stdout.strip().decode()
635    # reply_channel_range == 264
636    assert msgs == ['0108'
637                    # blockhash
638                    + genesis_blockhash
639                    # first_blocknum, number_of_blocks, complete
640                    + format(0, '08x') + format(block12 + 1, '08x') + '01'
641                    # encoded_short_ids
642                    + format(len(encoded) // 2, '04x')
643                    + encoded]
644
645    # Doesn't include scid23
646    msgs = l2.query_gossip('query_channel_range',
647                           genesis_blockhash,
648                           0, block23,
649                           filters=['0109'])
650    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12],
651                             check=True,
652                             timeout=TIMEOUT,
653                             stdout=subprocess.PIPE).stdout.strip().decode()
654    # reply_channel_range == 264
655    assert msgs == ['0108'
656                    # blockhash
657                    + genesis_blockhash
658                    # first_blocknum, number_of_blocks, complete
659                    + format(0, '08x') + format(block23, '08x') + '01'
660                    # encoded_short_ids
661                    + format(len(encoded) // 2, '04x')
662                    + encoded]
663
664    # Does include scid23
665    msgs = l2.query_gossip('query_channel_range',
666                           genesis_blockhash,
667                           block12, block23 - block12 + 1,
668                           filters=['0109'])
669    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12, scid23],
670                             check=True,
671                             timeout=TIMEOUT,
672                             stdout=subprocess.PIPE).stdout.strip().decode()
673    # reply_channel_range == 264
674    assert msgs == ['0108'
675                    # blockhash
676                    + genesis_blockhash
677                    # first_blocknum, number_of_blocks, complete
678                    + format(block12, '08x') + format(block23 - block12 + 1, '08x') + '01'
679                    # encoded_short_ids
680                    + format(len(encoded) // 2, '04x')
681                    + encoded]
682
683    # Only includes scid23
684    msgs = l2.query_gossip('query_channel_range',
685                           genesis_blockhash,
686                           block23, 1,
687                           filters=['0109'])
688    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid23],
689                             check=True,
690                             timeout=TIMEOUT,
691                             stdout=subprocess.PIPE).stdout.strip().decode()
692    # reply_channel_range == 264
693    assert msgs == ['0108'
694                    # blockhash
695                    + genesis_blockhash
696                    # first_blocknum, number_of_blocks, complete
697                    + format(block23, '08x') + format(1, '08x') + '01'
698                    # encoded_short_ids
699                    + format(len(encoded) // 2, '04x')
700                    + encoded]
701
702    # Past both
703    msgs = l2.query_gossip('query_channel_range',
704                           genesis_blockhash,
705                           block23 + 1, 1000000,
706                           filters=['0109'])
707    # reply_channel_range == 264
708    assert msgs == ['0108'
709                    # blockhash
710                    + genesis_blockhash
711                    # first_blocknum, number_of_blocks, complete
712                    + format(block23 + 1, '08x') + format(1000000, '08x') + '01'
713                    # encoded_short_ids
714                    + '000100']
715
716    # Make l2 split reply into two (technically async)
717    l2.rpc.dev_set_max_scids_encode_size(max=9)
718    l2.daemon.wait_for_log('Set max_scids_encode_bytes to 9')
719
720    msgs = l2.query_gossip('query_channel_range',
721                           genesis_blockhash,
722                           0, 1000000,
723                           filters=['0109'])
724    # It should definitely have split
725    l2.daemon.wait_for_log('reply_channel_range: splitting 0-1 of 2')
726
727    start = 0
728    scids = '00'
729    for m in msgs:
730        assert m.startswith('0108' + genesis_blockhash)
731        this_start = int(m[4 + 64:4 + 64 + 8], base=16)
732        num = int(m[4 + 64 + 8:4 + 64 + 8 + 8], base=16)
733        # Pull off end of packet, assume it's uncompressed, and no TLVs!
734        scids += m[4 + 64 + 8 + 8 + 2 + 4 + 2:]
735        assert this_start == start
736        start += num
737
738    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12, scid23],
739                             check=True,
740                             timeout=TIMEOUT,
741                             stdout=subprocess.PIPE).stdout.strip().decode()
742    assert scids == encoded
743
744    # Test overflow case doesn't split forever; should still only get 2 for this
745    msgs = l2.query_gossip('query_channel_range',
746                           genesis_blockhash,
747                           1, 429496000,
748                           filters=['0109'])
749    assert len(msgs) == 2
750
751    # This should actually be large enough for zlib to kick in!
752    scid34, _ = l3.fundchannel(l4, 10**5)
753    bitcoind.generate_block(5)
754    l2.daemon.wait_for_log('Received node_announcement for node ' + l4.info['id'])
755
756    # Restore infinite encode size.
757    l2.rpc.dev_set_max_scids_encode_size(max=(2**32 - 1))
758    l2.daemon.wait_for_log('Set max_scids_encode_bytes to {}'
759                           .format(2**32 - 1))
760
761    msgs = l2.query_gossip('query_channel_range',
762                           genesis_blockhash,
763                           0, 65535,
764                           filters=['0109'])
765    encoded = subprocess.run(['devtools/mkencoded', '--scids', '01', scid12, scid23, scid34],
766                             check=True,
767                             timeout=TIMEOUT,
768                             stdout=subprocess.PIPE).stdout.strip().decode()
769    # reply_channel_range == 264
770    assert msgs == ['0108'
771                    # blockhash
772                    + genesis_blockhash
773                    # first_blocknum, number_of_blocks, complete
774                    + format(0, '08x') + format(65535, '08x') + '01'
775                    # encoded_short_ids
776                    + format(len(encoded) // 2, '04x')
777                    + encoded]
778
779
780# Long test involving 4 lightningd instances.
781@pytest.mark.developer("needs DEVELOPER=1")
782def test_report_routing_failure(node_factory, bitcoind):
783    """Test routing failure and retrying of routing.
784    """
785    # The setup is as follows:
786    #   l3-->l4
787    #   ^   / |
788    #   |  /  |
789    #   | L   v
790    #   l2<--l1
791    #
792    # l1 wants to pay to l4.
793    # The shortest route is l1-l4, but l1 cannot
794    # afford to pay to l1 because l4 has all the
795    # funds.
796    # This is a local failure.
797    # The next shortest route is l1-l2-l4, but
798    # l2 cannot afford to pay l4 for same reason.
799    # This is a remote failure.
800    # Finally the only possible path is
801    # l1-l2-l3-l4.
802
803    def fund_from_to_payer(lsrc, ldst, lpayer):
804        lsrc.rpc.connect(ldst.info['id'], 'localhost', ldst.port)
805        c, _ = lsrc.fundchannel(ldst, 10000000)
806        bitcoind.generate_block(5)
807        lpayer.wait_for_channel_updates([c])
808
809    # Setup
810    # Construct lightningd
811    l1, l2, l3, l4 = node_factory.get_nodes(4)
812
813    # Wire them up
814    # The ordering below matters!
815    # Particularly, l1 is payer and we will
816    # wait for l1 to receive gossip for the
817    # channel being made.
818    channels = []
819    for src, dst in [(l1, l2), (l2, l3), (l3, l4), (l4, l1), (l4, l2)]:
820        src.rpc.connect(dst.info['id'], 'localhost', dst.port)
821        print("src={}, dst={}".format(src.daemon.lightning_dir,
822                                      dst.daemon.lightning_dir))
823        c, _ = src.fundchannel(dst, 10**6)
824        channels.append(c)
825    bitcoind.generate_block(5)
826
827    for c in channels:
828        l1.wait_channel_active(c)
829
830    # Test
831    inv = l4.rpc.invoice(1234567, 'inv', 'for testing')['bolt11']
832    l1.rpc.pay(inv)
833
834
835@pytest.mark.developer("needs fast gossip")
836def test_query_short_channel_id(node_factory, bitcoind, chainparams):
837    l1, l2, l3 = node_factory.get_nodes(3)
838    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
839    l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
840    chain_hash = chainparams['chain_hash']
841
842    # Empty result tests.
843    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', '1x1x1', '2x2x2'],
844                             check=True,
845                             timeout=TIMEOUT,
846                             stdout=subprocess.PIPE).stdout.strip().decode()
847
848    msgs = l1.query_gossip('query_short_channel_ids',
849                           chain_hash,
850                           encoded,
851                           filters=['0109'])
852
853    # Should just get the WIRE_REPLY_SHORT_CHANNEL_IDS_END = 262
854    # (with chainhash and completeflag = 1)
855    assert len(msgs) == 1
856    assert msgs[0] == '0106{}01'.format(chain_hash)
857
858    # Make channels public.
859    scid12, _ = l1.fundchannel(l2, 10**5)
860    scid23, _ = l2.fundchannel(l3, 10**5)
861    bitcoind.generate_block(5)
862
863    # It will know about everything.
864    l1.daemon.wait_for_log('Received node_announcement for node {}'.format(l3.info['id']))
865
866    # This query should get channel announcements, channel updates, and node announcements.
867    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid23],
868                             check=True,
869                             timeout=TIMEOUT,
870                             stdout=subprocess.PIPE).stdout.strip().decode()
871    msgs = l1.query_gossip('query_short_channel_ids',
872                           chain_hash,
873                           encoded,
874                           filters=['0109'])
875
876    assert len(msgs) == 6
877    # 0x0100 = channel_announcement
878    assert msgs[0].startswith('0100')
879    # 0x0102 = channel_update
880    assert msgs[1].startswith('0102')
881    assert msgs[2].startswith('0102')
882    # 0x0101 = node_announcement
883    assert msgs[3].startswith('0101')
884    assert msgs[4].startswith('0101')
885    assert msgs[5] == '0106{}01'.format(chain_hash)
886
887    encoded = subprocess.run(['devtools/mkencoded', '--scids', '00', scid12, scid23],
888                             check=True,
889                             timeout=TIMEOUT,
890                             stdout=subprocess.PIPE).stdout.strip().decode()
891    msgs = l1.query_gossip('query_short_channel_ids',
892                           chain_hash,
893                           encoded,
894                           filters=['0109'])
895
896    # Technically, this order could be different, but this matches code.
897    assert len(msgs) == 10
898    # 0x0100 = channel_announcement
899    assert msgs[0].startswith('0100')
900    # 0x0102 = channel_update
901    assert msgs[1].startswith('0102')
902    assert msgs[2].startswith('0102')
903    # 0x0100 = channel_announcement
904    assert msgs[3].startswith('0100')
905    # 0x0102 = channel_update
906    assert msgs[4].startswith('0102')
907    assert msgs[5].startswith('0102')
908    # 0x0101 = node_announcement
909    assert msgs[6].startswith('0101')
910    assert msgs[7].startswith('0101')
911    assert msgs[8].startswith('0101')
912    assert msgs[9] == '0106{}01'.format(chain_hash)
913
914
915def test_gossip_addresses(node_factory, bitcoind):
916    l1 = node_factory.get_node(options={
917        'announce-addr': [
918            '[::]:3',
919            '127.0.0.1:2',
920            'vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion',
921            '3fyb44wdhnd2ghhl.onion:1234'
922        ],
923        'allow-deprecated-apis': True,
924    })
925    l2 = node_factory.get_node()
926    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
927
928    l1.fundchannel(l2, 100000)
929    bitcoind.generate_block(6)
930    l2.daemon.wait_for_log('Received node_announcement for node {}'
931                           .format(l1.info['id']))
932
933    nodes = l2.rpc.listnodes(l1.info['id'])['nodes']
934    assert len(nodes) == 1 and nodes[0]['addresses'] == [
935        {'type': 'ipv4', 'address': '127.0.0.1', 'port': 2},
936        {'type': 'ipv6', 'address': '::', 'port': 3},
937        {'type': 'torv2', 'address': '3fyb44wdhnd2ghhl.onion', 'port': 1234},
938        {'type': 'torv3', 'address': 'vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion', 'port': 9735}
939    ]
940
941
942@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
943@pytest.mark.developer("needs dev-fast-gossip")
944@pytest.mark.openchannel('v2')
945def test_gossip_lease_rates(node_factory, bitcoind):
946    lease_opts = {'lease-fee-basis': 50,
947                  'lease-fee-base-msat': '2000msat',
948                  'channel-fee-max-base-msat': '500sat',
949                  'channel-fee-max-proportional-thousandths': 200}
950    l1, l2 = node_factory.get_nodes(2, opts=[lease_opts, {}])
951
952    # These logs happen during startup, start looking from the beginning
953    l1.daemon.logsearch_start = 0
954    l2.daemon.logsearch_start = 0
955
956    rates = l1.rpc.call('funderupdate')
957    assert rates['channel_fee_max_base_msat'] == Millisatoshi('500000msat')
958    assert rates['channel_fee_max_proportional_thousandths'] == 200
959    assert rates['funding_weight'] == 666  # Default on regtest
960    assert rates['lease_fee_base_msat'] == Millisatoshi('2000msat')
961    assert rates['lease_fee_basis'] == 50
962
963    rates = l2.rpc.call('funderupdate')
964    assert 'channel_fee_max_base_msat' not in rates
965    assert 'channel_fee_max_proportional_thousandths' not in rates
966    assert 'funding_weight' not in rates
967    assert 'lease_fee_base_msat' not in rates
968    assert 'lease_fee_basis' not in rates
969
970    # Open a channel, check that the node_announcements
971    # include offer details, as expected
972    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
973    l1.fundchannel(l2, 10**6)
974
975    # Announce depth is ALWAYS 6 blocks
976    bitcoind.generate_block(5)
977
978    l2.daemon.wait_for_log('Received node_announcement for node {}'
979                           .format(l1.info['id']))
980    l1.daemon.wait_for_log('Received node_announcement for node {}'
981                           .format(l2.info['id']))
982
983    l2_nodeinfo = only_one(l1.rpc.listnodes(l2.info['id'])['nodes'])
984    l1_nodeinfo = only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])
985
986    assert 'option_will_fund' not in l2_nodeinfo
987    rates = l1_nodeinfo['option_will_fund']
988    assert rates['channel_fee_max_base_msat'] == Millisatoshi('500000msat')
989    assert rates['channel_fee_max_proportional_thousandths'] == 200
990    assert rates['funding_weight'] == 666  # Default on regtest
991    assert rates['lease_fee_base_msat'] == Millisatoshi('2000msat')
992    assert rates['lease_fee_basis'] == 50
993
994    # Update the node announce (set new on l2, turn off l1)
995    # (Turn off by setting everything to zero)
996    l1.rpc.call('funderupdate', {'channel_fee_max_base_msat': '0msat',
997                                 'channel_fee_max_proportional_thousandths': 0,
998                                 'funding_weight': 0,
999                                 'lease_fee_base_msat': '0msat',
1000                                 'lease_fee_basis': 0})
1001    l2.rpc.call('funderupdate', {'channel_fee_max_base_msat': '30000msat',
1002                                 'channel_fee_max_proportional_thousandths': 100,
1003                                 'lease_fee_base_msat': '400000msat',
1004                                 'lease_fee_basis': 20})
1005
1006    l1.daemon.wait_for_log('Received node_announcement for node {}'.format(l2.info['id']))
1007    l2.daemon.wait_for_log('Received node_announcement for node {}'.format(l1.info['id']))
1008
1009    l2_nodeinfo = only_one(l1.rpc.listnodes(l2.info['id'])['nodes'])
1010    l1_nodeinfo = only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])
1011
1012    assert 'option_will_fund' not in l1_nodeinfo
1013    rates = l2_nodeinfo['option_will_fund']
1014    assert rates['channel_fee_max_base_msat'] == Millisatoshi('30000msat')
1015    assert rates['channel_fee_max_proportional_thousandths'] == 100
1016    assert rates['funding_weight'] == 666  # Default on regtest
1017    assert rates['lease_fee_base_msat'] == Millisatoshi('400000msat')
1018    assert rates['lease_fee_basis'] == 20
1019
1020
1021def test_gossip_store_load(node_factory):
1022    """Make sure we can read canned gossip store"""
1023    l1 = node_factory.get_node(start=False)
1024    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
1025        f.write(bytearray.fromhex("09"        # GOSSIP_STORE_VERSION
1026                                  "000001b0"  # len
1027                                  "fea676e8"  # csum
1028                                  "5b8d9b44"  # timestamp
1029                                  "0100"      # WIRE_CHANNEL_ANNOUNCEMENT
1030                                  "bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a"
1031                                  "0000000a"  # len
1032                                  "99dc98b4"  # csum
1033                                  "00000000"  # timestamp
1034                                  "1005"      # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
1035                                  "0000000001000000"
1036                                  "00000082"  # len
1037                                  "fd421aeb"  # csum
1038                                  "5b8d9b44"  # timestamp
1039                                  "0102"      # WIRE_CHANNEL_UPDATE
1040                                  "1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440000009000000000000003e8000003e800000001"
1041                                  "00000095"  # len
1042                                  "f036515e"  # csum
1043                                  "5aab817c"  # timestamp
1044                                  "0101"      # WIRE_NODE_ANNOUNCEMENT
1045                                  "cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607"))
1046
1047    l1.start()
1048    # May preceed the Started msg waited for in 'start'.
1049    wait_for(lambda: l1.daemon.is_in_log(r'gossip_store: Read 1/1/1/0 cannounce/cupdate/nannounce/cdelete from store \(0 deleted\) in 770 bytes'))
1050    assert not l1.daemon.is_in_log('gossip_store.*truncating')
1051
1052
1053def test_gossip_store_load_announce_before_update(node_factory):
1054    """Make sure we can read canned gossip store with node_announce before update.  This happens when a channel_update gets replaced, leaving node_announce before it"""
1055    l1 = node_factory.get_node(start=False)
1056    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
1057        f.write(bytearray.fromhex("09"        # GOSSIP_STORE_VERSION
1058                                  "000001b0"  # len
1059                                  "fea676e8"  # csum
1060                                  "5b8d9b44"  # timestamp
1061                                  "0100"      # WIRE_CHANNEL_ANNOUNCEMENT
1062                                  "bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a"
1063                                  "0000000a"  # len
1064                                  "99dc98b4"  # csum
1065                                  "00000000"  # timestamp
1066                                  "1005"      # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
1067                                  "0000000001000000"
1068                                  "80000082"  # len (DELETED)
1069                                  "fd421aeb"  # csum
1070                                  "5b8d9b44"  # timestamp
1071                                  "0102"      # WIRE_CHANNEL_UPDATE
1072                                  "1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440000009000000000000003e8000003e800000001"
1073                                  "00000095"  # len
1074                                  "f036515e"  # csum
1075                                  "5aab817c"  # timestamp
1076                                  "0101"      # WIRE_NODE_ANNOUNCEMENT
1077                                  "cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607"
1078                                  "00000082"  # len
1079                                  "fd421aeb"  # csum
1080                                  "5b8d9b44"  # timestamp
1081                                  "0102"      # WIRE_CHANNEL_UPDATE
1082                                  "1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440000009000000000000003e8000003e800000001"))
1083
1084    l1.start()
1085    # May preceed the Started msg waited for in 'start'.
1086    wait_for(lambda: l1.daemon.is_in_log(r'gossip_store: Read 1/1/1/0 cannounce/cupdate/nannounce/cdelete from store \(0 deleted\) in 770 bytes'))
1087    assert not l1.daemon.is_in_log('gossip_store.*truncating')
1088
1089    # Extra sanity check if we can.
1090    if DEVELOPER:
1091        l1.rpc.call('dev-compact-gossip-store')
1092        l1.restart()
1093        l1.rpc.call('dev-compact-gossip-store')
1094
1095
1096def test_gossip_store_load_amount_truncated(node_factory):
1097    """Make sure we can read canned gossip store with truncated amount"""
1098    l1 = node_factory.get_node(start=False, allow_broken_log=True)
1099    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
1100        f.write(bytearray.fromhex("09"        # GOSSIP_STORE_VERSION
1101                                  "000001b0"  # len
1102                                  "fea676e8"  # csum
1103                                  "5b8d9b44"  # timestamp
1104                                  "0100"      # WIRE_CHANNEL_ANNOUNCEMENT
1105                                  "bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a"))
1106
1107    l1.start()
1108    # May preceed the Started msg waited for in 'start'.
1109    wait_for(lambda: l1.daemon.is_in_log(r'gossip_store: dangling channel_announcement. Moving to gossip_store.corrupt and truncating'))
1110    wait_for(lambda: l1.daemon.is_in_log(r'gossip_store: Read 0/0/0/0 cannounce/cupdate/nannounce/cdelete from store \(0 deleted\) in 1 bytes'))
1111    assert os.path.exists(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store.corrupt'))
1112
1113    # Extra sanity check if we can.
1114    if DEVELOPER:
1115        l1.rpc.call('dev-compact-gossip-store')
1116        l1.restart()
1117        l1.rpc.call('dev-compact-gossip-store')
1118
1119
1120@pytest.mark.developer("Needs fast gossip propagation")
1121@pytest.mark.openchannel('v1')
1122@pytest.mark.openchannel('v2')
1123def test_node_reannounce(node_factory, bitcoind, chainparams):
1124    "Test that we reannounce a node when parameters change"
1125    l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True,
1126                                              'log-level': 'io'})
1127    bitcoind.generate_block(5)
1128    genesis_blockhash = chainparams['chain_hash']
1129
1130    # Wait for node_announcement for l1.
1131    l2.daemon.wait_for_log(r'\[IN\] 0101.*{}'.format(l1.info['id']))
1132    # Wait for it to process it.
1133    wait_for(lambda: l2.rpc.listnodes(l1.info['id'])['nodes'] != [])
1134    wait_for(lambda: 'alias' in only_one(l2.rpc.listnodes(l1.info['id'])['nodes']))
1135    assert only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'].startswith('JUNIORBEAM')
1136
1137    lfeatures = expected_node_features()
1138    if l1.config('experimental-dual-fund'):
1139        lfeatures = expected_node_features(extra=[21, 29])
1140
1141    # Make sure it gets features correct.
1142    assert only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['features'] == lfeatures
1143
1144    l1.stop()
1145    l1.daemon.opts['alias'] = 'SENIORBEAM'
1146    # It won't update within 5 seconds, so sleep.
1147    time.sleep(5)
1148    l1.start()
1149
1150    wait_for(lambda: only_one(l2.rpc.listnodes(l1.info['id'])['nodes'])['alias'] == 'SENIORBEAM')
1151
1152    # Get node_announcements.
1153    msgs = l1.query_gossip('gossip_timestamp_filter',
1154                           genesis_blockhash,
1155                           '0', '0xFFFFFFFF',
1156                           # Filter out gossip_timestamp_filter,
1157                           # channel_announcement and channel_updates.
1158                           filters=['0109', '0102', '0100'])
1159
1160    assert len(msgs) == 2
1161    assert (bytes("SENIORBEAM", encoding="utf8").hex() in msgs[0]
1162            or bytes("SENIORBEAM", encoding="utf8").hex() in msgs[1])
1163
1164    # Restart should re-xmit exact same update on reconnect!
1165    l1.restart()
1166
1167    msgs2 = l1.query_gossip('gossip_timestamp_filter',
1168                            genesis_blockhash,
1169                            '0', '0xFFFFFFFF',
1170                            # Filter out gossip_timestamp_filter,
1171                            # channel_announcement and channel_updates.
1172                            filters=['0109', '0102', '0100'])
1173    assert msgs == msgs2
1174    # Won't have queued up another one, either.
1175    assert not l1.daemon.is_in_log('node_announcement: delaying')
1176
1177    # Try updating the lease rates ad
1178    ad = l1.rpc.call('setleaserates',
1179                     {'lease_fee_base_msat': '1000sat',
1180                      'lease_fee_basis': 20,
1181                      'funding_weight': 150,
1182                      'channel_fee_max_base_msat': '2000msat',
1183                      'channel_fee_max_proportional_thousandths': 22})
1184
1185    assert ad['lease_fee_base_msat'] == Millisatoshi('1000000msat')
1186    assert ad['lease_fee_basis'] == 20
1187    assert ad['funding_weight'] == 150
1188    assert ad['channel_fee_max_base_msat'] == Millisatoshi('2000msat')
1189    assert ad['channel_fee_max_proportional_thousandths'] == 22
1190
1191    msgs2 = l1.query_gossip('gossip_timestamp_filter',
1192                            genesis_blockhash,
1193                            '0', '0xFFFFFFFF',
1194                            # Filter out gossip_timestamp_filter,
1195                            # channel_announcement and channel_updates.
1196                            filters=['0109', '0102', '0100'])
1197    assert msgs != msgs2
1198
1199
1200def test_gossipwith(node_factory):
1201    l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
1202
1203    out = subprocess.run(['devtools/gossipwith',
1204                          '--initial-sync',
1205                          '--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
1206                          '{}@localhost:{}'.format(l1.info['id'], l1.port)],
1207                         check=True,
1208                         timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
1209
1210    num_msgs = 0
1211    while len(out):
1212        l, t = struct.unpack('>HH', out[0:4])
1213        # channel_announcement node_announcement, channel_update or timestamp_filter
1214        assert t == 256 or t == 257 or t == 258 or t == 265
1215        out = out[2 + l:]
1216        if t != 265:
1217            num_msgs += 1
1218
1219    # one channel announcement, two channel_updates, two node announcements.
1220    assert num_msgs == 5
1221
1222
1223def test_gossip_notices_close(node_factory, bitcoind):
1224    # We want IO logging so we can replay a channel_announce to l1;
1225    # We also *really* do feed it bad gossip!
1226    l1, l2, l3 = node_factory.get_nodes(3, opts=[{'log-level': 'io',
1227                                                  'allow_bad_gossip': True},
1228                                                 {},
1229                                                 {}])
1230    node_factory.join_nodes([l2, l3])
1231    l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
1232
1233    bitcoind.generate_block(5)
1234
1235    # Make sure l1 learns about channel and nodes.
1236    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
1237    wait_for(lambda: ['alias' in n for n in l1.rpc.listnodes()['nodes']] == [True, True])
1238    l1.rpc.disconnect(l2.info['id'])
1239
1240    # Grab channel_announcement from io logs (ends in ')
1241    channel_announcement = l1.daemon.is_in_log(r'\[IN\] 0100').split(' ')[-1][:-1]
1242    channel_update = l1.daemon.is_in_log(r'\[IN\] 0102').split(' ')[-1][:-1]
1243    node_announcement = l1.daemon.is_in_log(r'\[IN\] 0101').split(' ')[-1][:-1]
1244
1245    txid = l2.rpc.close(l3.info['id'])['txid']
1246    wait_for(lambda: only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
1247    bitcoind.generate_block(1, txid)
1248
1249    wait_for(lambda: l1.rpc.listchannels()['channels'] == [])
1250    wait_for(lambda: l1.rpc.listnodes()['nodes'] == [])
1251
1252    subprocess.run(['devtools/gossipwith',
1253                    '--max-messages=0',
1254                    '{}@localhost:{}'.format(l1.info['id'], l1.port),
1255                    channel_announcement,
1256                    channel_update,
1257                    node_announcement],
1258                   timeout=TIMEOUT)
1259
1260    # l1 should reject it.
1261    assert(l1.rpc.listchannels()['channels'] == [])
1262    assert(l1.rpc.listnodes()['nodes'] == [])
1263
1264    l1.stop()
1265    l1.start()
1266    assert(l1.rpc.listchannels()['channels'] == [])
1267    assert(l1.rpc.listnodes()['nodes'] == [])
1268
1269
1270def test_getroute_exclude_duplicate(node_factory):
1271    """Test that accidentally duplicating the same channel or same node
1272    in the exclude list will not have permanent effects.
1273    """
1274
1275    l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
1276
1277    # Starting route
1278    route = l1.rpc.getroute(l2.info['id'], 1, 1)['route']
1279    # l1 id is > l2 id, so 1 means l1->l2
1280    chan_l1l2 = route[0]['channel'] + '/1'
1281
1282    # This should fail to find a route as the only viable channel
1283    # is excluded, and worse, is excluded twice.
1284    with pytest.raises(RpcError):
1285        l1.rpc.getroute(l2.info['id'], 1, 1, exclude=[chan_l1l2, chan_l1l2])
1286
1287    # This should still succeed since nothing is excluded anymore
1288    # and in particular should return the exact same route as
1289    # earlier.
1290    route2 = l1.rpc.getroute(l2.info['id'], 1, 1)['route']
1291    assert route == route2
1292
1293    # This should also fail to find a route as the only viable channel
1294    # is excluded, and worse, is excluded twice.
1295    with pytest.raises(RpcError):
1296        l1.rpc.getroute(l2.info['id'], 1, 1, exclude=[l2.info['id'], l2.info['id']])
1297
1298    # This should still succeed since nothing is excluded anymore
1299    # and in particular should return the exact same route as
1300    # earlier.
1301    route3 = l1.rpc.getroute(l2.info['id'], 1, 1)['route']
1302    assert route == route3
1303
1304
1305@pytest.mark.developer("gossip propagation is slow without DEVELOPER=1")
1306def test_getroute_exclude(node_factory, bitcoind):
1307    """Test getroute's exclude argument"""
1308    l1, l2, l3, l4, l5 = node_factory.get_nodes(5)
1309    node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
1310
1311    # This should work
1312    route = l1.rpc.getroute(l4.info['id'], 1, 1)['route']
1313
1314    # l1 id is > l2 id, so 1 means l1->l2
1315    chan_l1l2 = route[0]['channel'] + '/1'
1316    chan_l2l1 = route[0]['channel'] + '/0'
1317
1318    # This should not
1319    with pytest.raises(RpcError):
1320        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l1l2])
1321
1322    # This should also not
1323    with pytest.raises(RpcError):
1324        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l2.info['id']])
1325
1326    # Blocking the wrong way should be fine.
1327    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l2l1])
1328
1329    # Now, create an alternate (better) route.
1330    l2.rpc.connect(l4.info['id'], 'localhost', l4.port)
1331    scid, _ = l2.fundchannel(l4, 1000000, wait_for_active=False)
1332    bitcoind.generate_block(5)
1333
1334    # We don't wait above, because we care about it hitting l1.
1335    l1.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
1336                             .format(scid),
1337                             r'update for channel {}/1 now ACTIVE'
1338                             .format(scid)])
1339
1340    # l3 id is > l2 id, so 1 means l3->l2
1341    # chan_l3l2 = route[1]['channel'] + '/1'
1342    chan_l2l3 = route[1]['channel'] + '/0'
1343
1344    # l4 is > l2
1345    # chan_l4l2 = scid + '/1'
1346    chan_l2l4 = scid + '/0'
1347
1348    # This works
1349    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l2l3])
1350
1351    # This works
1352    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l2l4])
1353
1354    # This works
1355    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l3.info['id']])
1356
1357    # This doesn't
1358    with pytest.raises(RpcError):
1359        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l2l3, chan_l2l4])
1360
1361    # This doesn't
1362    with pytest.raises(RpcError):
1363        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l3.info['id'], chan_l2l4])
1364
1365    l1.rpc.connect(l5.info['id'], 'localhost', l5.port)
1366    scid15, _ = l1.fundchannel(l5, 1000000, wait_for_active=False)
1367    l5.rpc.connect(l4.info['id'], 'localhost', l4.port)
1368    scid54, _ = l5.fundchannel(l4, 1000000, wait_for_active=False)
1369    bitcoind.generate_block(5)
1370
1371    # We don't wait above, because we care about it hitting l1.
1372    l1.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
1373                             .format(scid15),
1374                             r'update for channel {}/1 now ACTIVE'
1375                             .format(scid15),
1376                             r'update for channel {}/0 now ACTIVE'
1377                             .format(scid54),
1378                             r'update for channel {}/1 now ACTIVE'
1379                             .format(scid54)])
1380
1381    # This works now
1382    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l3.info['id'], chan_l2l4])
1383
1384    # This works now
1385    l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l3.info['id'], l5.info['id']])
1386
1387    # This doesn't work
1388    with pytest.raises(RpcError):
1389        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[l3.info['id'], l5.info['id'], chan_l2l4])
1390
1391    # This doesn't work
1392    with pytest.raises(RpcError):
1393        l1.rpc.getroute(l4.info['id'], 1, 1, exclude=[chan_l2l3, l5.info['id'], chan_l2l4])
1394
1395
1396@pytest.mark.developer("need dev-compact-gossip-store")
1397def test_gossip_store_local_channels(node_factory, bitcoind):
1398    l1, l2 = node_factory.line_graph(2, wait_for_announce=False)
1399
1400    # We see this channel, even though it's not announced, because it's local.
1401    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
1402
1403    l2.stop()
1404    l1.restart()
1405
1406    # We should still see local channels!
1407    time.sleep(3)  # Make sure store is loaded
1408    chans = l1.rpc.listchannels()['channels']
1409    assert len(chans) == 2
1410
1411    # Now compact store
1412    l1.rpc.call('dev-compact-gossip-store')
1413    l1.restart()
1414
1415    time.sleep(3)  # Make sure store is loaded
1416    # We should still see local channels!
1417    chans = l1.rpc.listchannels()['channels']
1418    assert len(chans) == 2
1419
1420
1421@pytest.mark.developer("need dev-compact-gossip-store")
1422def test_gossip_store_private_channels(node_factory, bitcoind):
1423    l1, l2 = node_factory.line_graph(2, announce_channels=False)
1424
1425    # We see this channel, even though it's not announced, because it's local.
1426    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
1427
1428    l2.stop()
1429    l1.restart()
1430
1431    # We should still see local channels!
1432    time.sleep(3)  # Make sure store is loaded
1433    chans = l1.rpc.listchannels()['channels']
1434    assert len(chans) == 2
1435
1436    # Now compact store
1437    l1.rpc.call('dev-compact-gossip-store')
1438    l1.restart()
1439
1440    time.sleep(3)  # Make sure store is loaded
1441    # We should still see local channels!
1442    chans = l1.rpc.listchannels()['channels']
1443    assert len(chans) == 2
1444
1445
1446def setup_gossip_store_test(node_factory, bitcoind):
1447    l1, l2, l3 = node_factory.line_graph(3, fundchannel=False)
1448
1449    # Create channel.
1450    scid23, _ = l2.fundchannel(l3, 10**6)
1451
1452    # Have that channel announced.
1453    bitcoind.generate_block(5)
1454    # Make sure we've got node_announcements
1455    wait_for(lambda: ['alias' in n for n in l2.rpc.listnodes()['nodes']] == [True, True])
1456
1457    # Now, replace the one channel_update, so it's past the node announcements.
1458    l2.rpc.setchannelfee(l3.info['id'], 20, 1000)
1459    # Old base feerate is 1.
1460    wait_for(lambda: sum([c['base_fee_millisatoshi'] for c in l2.rpc.listchannels()['channels']]) == 21)
1461
1462    # Create another channel, which will stay private.
1463    scid12, _ = l1.fundchannel(l2, 10**6)
1464
1465    # Now insert channel_update for previous channel; now they're both past the
1466    # node announcements.
1467    l3.rpc.setchannelfee(l2.info['id'], 20, 1000)
1468    wait_for(lambda: [c['base_fee_millisatoshi'] for c in l2.rpc.listchannels(scid23)['channels']] == [20, 20])
1469
1470    # Replace both (private) updates for scid12.
1471    l1.rpc.setchannelfee(l2.info['id'], 20, 1000)
1472    l2.rpc.setchannelfee(l1.info['id'], 20, 1000)
1473    wait_for(lambda: [c['base_fee_millisatoshi'] for c in l2.rpc.listchannels(scid12)['channels']] == [20, 20])
1474
1475    # Records in store now looks (something) like:
1476    #    DELETED: private channel_announcement (scid23)
1477    #    DELETED: private channel_update (scid23/0)
1478    #    DELETED: private channel_update (scid23/1)
1479    #  delete channel (scid23)
1480    #  channel_announcement (scid23)
1481    #  channel_amount
1482    #    DELETED: channel_update (scid23/0)
1483    #    DELETED: channel_update (scid23/1)
1484    #  node_announcement
1485    #  node_announcement
1486    #  channel_update (scid23)
1487    #  private channel_announcement (scid12)
1488    #    DELETED: private channel_update (scid12/0)
1489    #    DELETED: private channel_update (scid12/1)
1490    #  channel_update (scid23)
1491    #  private_channel_update (scid12)
1492    #  private_channel_update (scid12)
1493    return l2
1494
1495
1496@pytest.mark.developer("need dev-compact-gossip-store")
1497def test_gossip_store_compact_noappend(node_factory, bitcoind):
1498    l2 = setup_gossip_store_test(node_factory, bitcoind)
1499
1500    # It should truncate this, not leave junk!
1501    with open(os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'gossip_store.tmp'), 'wb') as f:
1502        f.write(bytearray.fromhex("07deadbeef"))
1503
1504    l2.rpc.call('dev-compact-gossip-store')
1505    l2.restart()
1506    wait_for(lambda: l2.daemon.is_in_log('gossip_store: Read '))
1507    assert not l2.daemon.is_in_log('gossip_store:.*truncate')
1508
1509
1510@pytest.mark.developer("updates are delayed without --dev-fast-gossip")
1511def test_gossip_store_load_complex(node_factory, bitcoind):
1512    l2 = setup_gossip_store_test(node_factory, bitcoind)
1513
1514    l2.restart()
1515
1516    wait_for(lambda: l2.daemon.is_in_log('gossip_store: Read '))
1517
1518
1519@pytest.mark.developer("need dev-compact-gossip-store")
1520def test_gossip_store_compact(node_factory, bitcoind):
1521    l2 = setup_gossip_store_test(node_factory, bitcoind)
1522
1523    # Now compact store.
1524    l2.rpc.call('dev-compact-gossip-store')
1525
1526    # Should still be connected.
1527    time.sleep(1)
1528    assert len(l2.rpc.listpeers()['peers']) == 2
1529
1530    # Should restart ok.
1531    l2.restart()
1532    wait_for(lambda: l2.daemon.is_in_log('gossip_store: Read '))
1533
1534
1535@pytest.mark.developer("need dev-compact-gossip-store")
1536def test_gossip_store_compact_restart(node_factory, bitcoind):
1537    l2 = setup_gossip_store_test(node_factory, bitcoind)
1538
1539    # Should restart ok.
1540    l2.restart()
1541    wait_for(lambda: l2.daemon.is_in_log('gossip_store: Read '))
1542
1543    # Now compact store.
1544    l2.rpc.call('dev-compact-gossip-store')
1545
1546
1547@pytest.mark.developer("need dev-compact-gossip-store")
1548def test_gossip_store_load_no_channel_update(node_factory):
1549    """Make sure we can read truncated gossip store with a channel_announcement and no channel_update"""
1550    l1 = node_factory.get_node(start=False, allow_broken_log=True)
1551
1552    # A channel announcement with no channel_update.
1553    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
1554        f.write(bytearray.fromhex("09"        # GOSSIP_STORE_VERSION
1555                                  "000001b0"  # len
1556                                  "fea676e8"  # csum
1557                                  "5b8d9b44"  # timestamp
1558                                  "0100"      # WIRE_CHANNEL_ANNOUNCEMENT
1559                                  "bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a"
1560                                  "0000000a"  # len
1561                                  "99dc98b4"  # csum
1562                                  "00000000"  # timestamp
1563                                  "1005"      # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
1564                                  "0000000001000000"
1565                                  "00000095"  # len
1566                                  "f036515e"  # csum
1567                                  "5aab817c"  # timestamp
1568                                  "0101"      # WIRE_NODE_ANNOUNCEMENT
1569                                  "cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607"))
1570
1571    l1.start()
1572
1573    # May preceed the Started msg waited for in 'start'.
1574    wait_for(lambda: l1.daemon.is_in_log('gossip_store: Unupdated channel_announcement at 1. Moving to gossip_store.corrupt and truncating'))
1575    assert os.path.exists(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store.corrupt'))
1576
1577    # This should actually result in an empty store.
1578    l1.rpc.call('dev-compact-gossip-store')
1579
1580    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), "rb") as f:
1581        assert bytearray(f.read()) == bytearray.fromhex("09")
1582
1583
1584@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
1585def test_gossip_store_compact_on_load(node_factory, bitcoind):
1586    l2 = setup_gossip_store_test(node_factory, bitcoind)
1587
1588    l2.restart()
1589
1590    wait_for(lambda: l2.daemon.is_in_log(r'gossip_store_compact_offline: [5-8] deleted, 9 copied'))
1591
1592    wait_for(lambda: l2.daemon.is_in_log(r'gossip_store: Read 2/4/2/0 cannounce/cupdate/nannounce/cdelete from store \(0 deleted\) in [0-9]* bytes'))
1593
1594
1595def test_gossip_announce_invalid_block(node_factory, bitcoind):
1596    """bitcoind lags and we might get an announcement for a block we don't have.
1597
1598    """
1599    # Need to slow down the poll interval so the announcement preceeds the
1600    # blockchain catchup, otherwise we won't call `getfilteredblock`.
1601    opts = {}
1602    if DEVELOPER:
1603        opts['dev-bitcoind-poll'] = TIMEOUT // 2
1604
1605    l1 = node_factory.get_node(options=opts)
1606    bitcoind.generate_block(1)
1607    assert bitcoind.rpc.getblockchaininfo()['blocks'] == 102
1608
1609    # Test gossip for an unknown block.
1610    subprocess.run(['devtools/gossipwith',
1611                    '--max-messages=0',
1612                    '{}@localhost:{}'.format(l1.info['id'], l1.port),
1613                    # short_channel_id=103x1x1
1614                    '01008d9f3d16dbdd985c099b74a3c9a74ccefd52a6d2bd597a553ce9a4c7fac3bfaa7f93031932617d38384cc79533730c9ce875b02643893cacaf51f503b5745fc3aef7261784ce6b50bff6fc947466508b7357d20a7c2929cc5ec3ae649994308527b2cbe1da66038e3bfa4825b074237708b455a4137bdb541cf2a7e6395a288aba15c23511baaae722fdb515910e2b42581f9c98a1f840a9f71897b4ad6f9e2d59e1ebeaf334cf29617633d35bcf6e0056ca0be60d7c002337bbb089b1ab52397f734bcdb2e418db43d1f192195b56e60eefbf82acf043d6068a682e064db23848b4badb20d05594726ec5b59267f4397b093747c23059b397b0c5620c4ab37a000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d029053521d6ea7a52cdd55f733d0fb2d077c0373b0053b5b810d927244061b757302d6063d022691b2490ab454dee73a57c6ff5d308352b461ece69f3c284f2c2412'],
1615                   check=True, timeout=TIMEOUT)
1616
1617    # Make sure it's OK once it's caught up.
1618    sync_blockheight(bitcoind, [l1])
1619
1620
1621def test_gossip_announce_unknown_block(node_factory, bitcoind):
1622    """Don't backfill the future!
1623
1624    If we get a channel_announcement that is for a block height that is above
1625    our sync height we should not store the filteredblock in the blocks table,
1626    otherwise we end up with a duplicate when we finally catch up with the
1627    blockchain.
1628
1629    """
1630    # Need to slow down the poll interval so the announcement preceeds the
1631    # blockchain catchup, otherwise we won't call `getfilteredblock`.
1632    opts = {}
1633    if DEVELOPER:
1634        opts['dev-bitcoind-poll'] = TIMEOUT // 2
1635
1636    l1 = node_factory.get_node(options=opts)
1637
1638    bitcoind.generate_block(2)
1639    assert bitcoind.rpc.getblockchaininfo()['blocks'] == 103
1640
1641    # Test gossip for unknown block.
1642    subprocess.run(['devtools/gossipwith',
1643                    '--max-messages=0',
1644                    '{}@localhost:{}'.format(l1.info['id'], l1.port),
1645                    # short_channel_id=103x1x1
1646                    '01008d9f3d16dbdd985c099b74a3c9a74ccefd52a6d2bd597a553ce9a4c7fac3bfaa7f93031932617d38384cc79533730c9ce875b02643893cacaf51f503b5745fc3aef7261784ce6b50bff6fc947466508b7357d20a7c2929cc5ec3ae649994308527b2cbe1da66038e3bfa4825b074237708b455a4137bdb541cf2a7e6395a288aba15c23511baaae722fdb515910e2b42581f9c98a1f840a9f71897b4ad6f9e2d59e1ebeaf334cf29617633d35bcf6e0056ca0be60d7c002337bbb089b1ab52397f734bcdb2e418db43d1f192195b56e60eefbf82acf043d6068a682e064db23848b4badb20d05594726ec5b59267f4397b093747c23059b397b0c5620c4ab37a000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d029053521d6ea7a52cdd55f733d0fb2d077c0373b0053b5b810d927244061b757302d6063d022691b2490ab454dee73a57c6ff5d308352b461ece69f3c284f2c2412'],
1647                   check=True, timeout=TIMEOUT)
1648
1649    # Make sure it's OK once it's caught up.
1650    sync_blockheight(bitcoind, [l1])
1651
1652
1653@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
1654def test_gossip_no_backtalk(node_factory):
1655    # l3 connects, gets gossip, but should *not* play it back.
1656    l1, l2, l3 = node_factory.get_nodes(3,
1657                                        opts=[{}, {}, {'log-level': 'io'}])
1658    node_factory.join_nodes([l1, l2], wait_for_announce=True)
1659
1660    l3.rpc.connect(l2.info['id'], 'localhost', l2.port)
1661    # Will get channel_announce, then two channel_update and two node_announcement
1662    l3.daemon.wait_for_logs([r'\[IN\] 0100',
1663                             r'\[IN\] 0102', r'\[IN\] 0102',
1664                             r'\[IN\] 0101', r'\[IN\] 0101'])
1665
1666    # With DEVELOPER, this is long enough for gossip flush.
1667    time.sleep(2)
1668    assert not l3.daemon.is_in_log(r'\[OUT\] 0100')
1669
1670
1671@pytest.mark.developer("Needs --dev-gossip")
1672@unittest.skipIf(
1673    TEST_NETWORK != 'regtest',
1674    "Channel announcement contains genesis hash, receiving node discards on mismatch"
1675)
1676def test_gossip_ratelimit(node_factory, bitcoind):
1677    """Check that we ratelimit incoming gossip.
1678
1679    We create a partitioned network, in which the first partition consisting
1680    of l1 and l2 is used to create an on-chain footprint and twe then feed
1681    canned gossip to the other partition consisting of l3. l3 should ratelimit
1682    the incoming gossip.
1683
1684    """
1685    l3, = node_factory.get_nodes(
1686        1,
1687        opts=[{'dev-gossip-time': 1568096251}]
1688    )
1689
1690    # Bump to block 102, so the following tx ends up in 103x1:
1691    bitcoind.generate_block(1)
1692
1693    # We don't actually need to start l1 and l2, they're just there to create
1694    # an unspent outpoint matching the expected script. This is also more
1695    # stable against output ordering issues.
1696    tx = bitcoind.rpc.createrawtransaction(
1697        [],
1698        [
1699            # Fundrawtransaction will fill in the first output with the change
1700            {"bcrt1qtwxd8wg5eanumk86vfeujvp48hfkgannf77evggzct048wggsrxsum2pmm": 0.01000000}
1701        ]
1702    )
1703    tx = bitcoind.rpc.fundrawtransaction(tx, {'changePosition': 0})['hex']
1704    tx = bitcoind.rpc.signrawtransactionwithwallet(tx)['hex']
1705    txid = bitcoind.rpc.sendrawtransaction(tx)
1706    wait_for(lambda: txid in bitcoind.rpc.getrawmempool())
1707
1708    # Make the tx gossipable:
1709    bitcoind.generate_block(6)
1710    sync_blockheight(bitcoind, [l3, ])
1711
1712    def channel_fees(node):
1713        channels = node.rpc.listchannels()['channels']
1714        return [c['fee_per_millionth'] for c in channels]
1715
1716    # Here are some ones I generated earlier (by removing gossip
1717    # ratelimiting)
1718    subprocess.check_call(
1719        [
1720            'devtools/gossipwith',
1721            '--max-messages=0',
1722            '{}@localhost:{}'.format(l3.info['id'], l3.port),
1723            # announcement
1724            '0100987b271fc95a37dbed78e6159e0ab792cda64603780454ce80832b4e31f63a6760abc8fdc53be35bb7cfccd125ee3d15b4fbdfb42165098970c19c7822bb413f46390e0c043c777226927eacd2186a03f064e4bdc30f891cb6e4990af49967d34b338755e99d728987e3d49227815e17f3ab40092434a59e33548e870071176db7d44d8c8f4c4cac27ae6554eb9350e97d47617e3a1355296c78e8234446fa2f138ad1b03439f18520227fb9e9eb92689b3a0ed36e6764f5a41777e9a2a4ce1026d19a4e4d8f7715c13ac2d6bf3238608a1ccf9afd91f774d84d170d9edddebf7460c54d49bd6cd81410bc3eeeba2b7278b1b5f7e748d77d793f31086847d582000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d590266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c0351802e3bd38009866c9da8ec4aa99cc4ea9c6c0dd46df15c61ef0ce1f271291714e5702324266de8403b3ab157a09f1f784d587af61831c998c151bcc21bb74c2b2314b',
1725            # first update is free
1726            '010225bfd9c5e2c5660188a14deb4002cd645ee67f00ad3b82146e46711ec460cb0c6819fdd1c680cb6d24e3906679ef071f13243a04a123e4b83310ebf0518ffd4206226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d773ffb010100060000000000000000000000010000000a000000003b023380'
1727        ],
1728        timeout=TIMEOUT
1729    )
1730
1731    # Wait for it to process channel.
1732    wait_for(lambda: channel_fees(l3) == [10])
1733
1734    subprocess.check_call(
1735        [
1736            'devtools/gossipwith',
1737            '--max-messages=0',
1738            '{}@localhost:{}'.format(l3.info['id'], l3.port),
1739            # next 4 are let through...
1740            '01023a892ad9c9953a54ad3b8e2e03a93d1c973241b62f9a5cd1f17d5cdf08de0e8b4fcd24aa8bd45a48b788fe9dab3d416f28dfa390bc900ec0176ec5bd1afd435706226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400001010006000000000000000000000014000003e9000000003b023380',
1741            '010245966763623ebc16796165263d4b21711ef04ebf3929491e695ff89ed2b8ccc0668ceb9e35e0ff5b8901d95732a119c1ed84ac99861daa2de462118f7b70049f06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400101010006000000000000000000000014000003ea000000003b023380',
1742            '0102c479b7684b9db496b844f6925f4ffd8a27c5840a020d1b537623c1545dcd8e195776381bbf51213e541a853a4a49a0faf84316e7ccca5e7074901a96bbabe04e06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400201010006000000000000000000000014000003eb000000003b023380',
1743            # timestamp=1568096259, fee_proportional_millionths=1004
1744            '01024b866012d995d3d7aec7b7218a283de2d03492dbfa21e71dd546ec2e36c3d4200453420aa02f476f99c73fe1e223ea192f5fa544b70a8319f2a216f1513d503d06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400301010006000000000000000000000014000003ec000000003b023380',
1745            # update 5 marks you as a nasty spammer!
1746            '01025b5b5a0daed874ab02bd3356d38190ff46bbaf5f10db5067da70f3ca203480ca78059e6621c6143f3da4e454d0adda6d01a9980ed48e71ccd0c613af73570a7106226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77400401010006000000000000000000000014000003ed000000003b023380'
1747        ],
1748        timeout=TIMEOUT
1749    )
1750
1751    wait_for(lambda: channel_fees(l3) == [1004])
1752
1753    # 24 seconds later, it will accept another.
1754    l3.rpc.call('dev-gossip-set-time', [1568096251 + 24])
1755
1756    subprocess.run(['devtools/gossipwith',
1757                    '--max-messages=0',
1758                    '{}@localhost:{}'.format(l3.info['id'], l3.port),
1759                    # update 6: timestamp=1568096284 fee_proportional_millionths=1006
1760                    '010282d24bcd984956bd9b891848404ee59d89643923b21641d2c2c0770a51b8f5da00cef82458add970f0b654aa4c8d54f68a9a1cc6470a35810303b09437f1f73d06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f00006700000100015d77401c01010006000000000000000000000014000003ee000000003b023380'],
1761                   check=True, timeout=TIMEOUT)
1762
1763    wait_for(lambda: channel_fees(l3) == [1006])
1764
1765
1766def check_socket(ip_addr, port):
1767    result = True
1768    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1769    # let's also check for fatal and try it ;-)
1770    try:
1771        result = sock.connect_ex((ip_addr, port))
1772        sock.close()
1773    except Exception:
1774        return False
1775
1776    return not result
1777
1778
1779@pytest.mark.developer("needs a running Tor service instance at port 9151 or 9051")
1780def test_statictor_onions(node_factory):
1781    """First basic tests ;-)
1782
1783    Assume that tor is configured and just test
1784    if we see the right onion address for our blob
1785    """
1786    # please define your values
1787    torip = '127.0.0.1'
1788    torips = '127.0.0.1:9051'
1789    torport = 9050
1790    torserviceport = 9051
1791    portA, portB = reserve(), reserve()
1792
1793    if not check_socket(format(torip), torserviceport):
1794        return
1795
1796    if not check_socket(format(torip), torport):
1797        return
1798
1799    l1 = node_factory.get_node(may_fail=True, options={
1800        'bind-addr': '127.0.0.1:{}'.format(portA),
1801        'addr': ['statictor:{}'.format(torips)]
1802    })
1803    l2 = node_factory.get_node(may_fail=True, options={
1804        'bind-addr': '127.0.0.1:{}'.format(portB),
1805        'addr': ['statictor:{}/torblob=11234567890123456789012345678901'.format(torips)]
1806    })
1807
1808    assert l1.daemon.is_in_log('127.0.0.1:{}'.format(l1.port))
1809    assert l2.daemon.is_in_log('x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion:9735,127.0.0.1:{}'.format(l2.port))
1810
1811
1812@pytest.mark.developer("needs a running Tor service instance at port 9151 or 9051")
1813def test_torport_onions(node_factory):
1814    """First basic tests for torport ;-)
1815
1816    Assume that tor is configured and just test
1817    if we see the right onion address for our blob
1818    """
1819    # please define your values
1820    torip = '127.0.0.1'
1821    torips = '127.0.0.1:9051'
1822    torport = 9050
1823    torserviceport = 9051
1824
1825    if not check_socket(torip, torserviceport):
1826        return
1827
1828    if not check_socket(torip, torport):
1829        return
1830
1831    portA, portB = reserve(), reserve()
1832
1833    l1 = node_factory.get_node(may_fail=True, options={'bind-addr': '127.0.0.1:{}'.format(portA), 'addr': ['statictor:{}/torport=45321'.format(torips)]})
1834    l2 = node_factory.get_node(may_fail=True, options={'bind-addr': '127.0.0.1:{}'.format(portB), 'addr': ['statictor:{}/torport=45321/torblob=11234567890123456789012345678901'.format(torips)]})
1835
1836    assert l1.daemon.is_in_log('45321,127.0.0.1:{}'.format(l1.port))
1837    assert l2.daemon.is_in_log('x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion:45321,127.0.0.1:{}'.format(l2.port))
1838
1839
1840@unittest.skipIf(not COMPAT, "needs COMPAT to convert obsolete gossip_store")
1841def test_gossip_store_upgrade_v7_v8(node_factory):
1842    """Version 8 added feature bits to local channel announcements"""
1843    l1 = node_factory.get_node(start=False)
1844
1845    # A channel announcement with no channel_update.
1846    with open(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store'), 'wb') as f:
1847        f.write(bytearray.fromhex("07000000428ce4d2d8000000000daf00"
1848                                  "00670000010001022d223620a359a47f"
1849                                  "f7f7ac447c85c46c923da53389221a00"
1850                                  "54c11c1e3ca31d5900000000000f4240"
1851                                  "000d8000000000000000000000000000"
1852                                  "00008e3af3badf000000001006008a01"
1853                                  "02005a9911d425effd461f803a380f05"
1854                                  "e72d3332eb6e9a7c6c58405ae61eacde"
1855                                  "4e2da18240ffb3d5c595f85e4f78b594"
1856                                  "c59e4d01c0470edd4f5afe645026515e"
1857                                  "fe06226e46111a0b59caaf126043eb5b"
1858                                  "bf28c34f3a5e332a1fc7b2b73cf18891"
1859                                  "0f00006700000100015eaa5eb0010100"
1860                                  "06000000000000000000000001000000"
1861                                  "0a000000003b0233800000008e074a6e"
1862                                  "0f000000001006008a0102463de636b2"
1863                                  "f46ccd6c23259787fc39dc4fdb983510"
1864                                  "1651879325b18cf1bb26330127e51ce8"
1865                                  "7a111b05ef92fe00a9a089979dc49178"
1866                                  "200f49139a541e7078cdc506226e4611"
1867                                  "1a0b59caaf126043eb5bbf28c34f3a5e"
1868                                  "332a1fc7b2b73cf188910f0000670000"
1869                                  "0100015eaa5eb0010000060000000000"
1870                                  "000000000000010000000a000000003b"
1871                                  "023380"))
1872
1873    l1.start()
1874
1875    assert l1.rpc.listchannels()['channels'] == [
1876        {'source': '022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59',
1877         'destination': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518',
1878         'short_channel_id': '103x1x1',
1879         'public': False,
1880         'satoshis': 1000000,
1881         'amount_msat': Millisatoshi(1000000000),
1882         'message_flags': 1,
1883         'channel_flags': 0,
1884         'active': False,
1885         'last_update': 1588223664,
1886         'base_fee_millisatoshi': 1,
1887         'fee_per_millionth': 10,
1888         'delay': 6,
1889         'htlc_minimum_msat': Millisatoshi(0),
1890         'htlc_maximum_msat': Millisatoshi(990000000),
1891         # This store was created on an experimental branch (OPT_ONION_MESSAGES)
1892         'features': '80000000000000000000000000'},
1893        {'source': '0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518',
1894         'destination': '022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59',
1895         'short_channel_id': '103x1x1',
1896         'public': False,
1897         'satoshis': 1000000,
1898         'amount_msat': Millisatoshi(1000000000),
1899         'message_flags': 1,
1900         'channel_flags': 1,
1901         'active': False,
1902         'last_update': 1588223664,
1903         'base_fee_millisatoshi': 1,
1904         'fee_per_millionth': 10,
1905         'delay': 6,
1906         'htlc_minimum_msat': Millisatoshi(0),
1907         'htlc_maximum_msat': Millisatoshi(990000000),
1908         'features': '80000000000000000000000000'}]
1909
1910
1911@pytest.mark.developer("devtools are for devs anyway")
1912def test_routetool(node_factory):
1913    """Test that route tool can see unpublished channels"""
1914    l1, l2 = node_factory.line_graph(2)
1915
1916    subprocess.run(['devtools/route',
1917                    os.path.join(l1.daemon.lightning_dir,
1918                                 TEST_NETWORK,
1919                                 'gossip_store'),
1920                    l1.info['id'],
1921                    l2.info['id']],
1922                   check=True, timeout=TIMEOUT)
1923
1924
1925def test_addgossip(node_factory):
1926    l1, l2 = node_factory.line_graph(2, fundchannel=True, wait_for_announce=True,
1927                                     opts={'log-level': 'io'})
1928
1929    # We should get two node_announcements, one channel_announcement, and two
1930    # channel_update.
1931    l3 = node_factory.get_node()
1932
1933    # 0x0100 = channel_announcement
1934    # 0x0102 = channel_update
1935    # 0x0101 = node_announcement
1936    l1.daemon.logsearch_start = 0
1937    ann = l1.daemon.wait_for_log(r"\[(OUT|IN)\] 0100.*")  # Either direction will suppress the other.
1938
1939    upd1 = l1.daemon.is_in_log(r"\[OUT\] 0102.*")
1940    upd2 = l2.daemon.is_in_log(r"\[OUT\] 0102.*")
1941
1942    nann1 = l1.daemon.is_in_log(r"\[OUT\] 0101.*")
1943    nann2 = l2.daemon.is_in_log(r"\[OUT\] 0101.*")
1944
1945    # Feed them to l3 (Each one starts with TIMESTAMP chanid-xxx: [OUT] ...)
1946    l3.rpc.addgossip(ann.split()[3])
1947
1948    l3.rpc.addgossip(upd1.split()[3])
1949    l3.rpc.addgossip(upd2.split()[3])
1950    l3.rpc.addgossip(nann1.split()[3])
1951    l3.rpc.addgossip(nann2.split()[3])
1952
1953    # In this case, it can actually have to wait, since it does scid lookup.
1954    wait_for(lambda: len(l3.rpc.listchannels()['channels']) == 2)
1955    wait_for(lambda: len(l3.rpc.listnodes()['nodes']) == 2)
1956
1957    # Now corrupt an update
1958    badupdate = upd1.split()[3]
1959    if badupdate.endswith('f'):
1960        badupdate = badupdate[:-1] + 'e'
1961    else:
1962        badupdate = badupdate[:-1] + 'f'
1963
1964    with pytest.raises(RpcError, match='Bad signature'):
1965        l3.rpc.addgossip(badupdate)
1966
1967
1968def test_topology_leak(node_factory, bitcoind):
1969    l1, l2, l3 = node_factory.line_graph(3)
1970
1971    l1.rpc.listchannels()
1972    bitcoind.generate_block(5)
1973
1974    # Wait until l1 sees all the channels.
1975    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 4)
1976
1977    # Close and wait for gossip to catchup.
1978    txid = l2.rpc.close(l3.info['id'])['txid']
1979    bitcoind.generate_block(1, txid)
1980
1981    wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 2)
1982
1983
1984def test_parms_listforwards(node_factory):
1985    """
1986    Simple test to ensure that the order of the listforwards
1987    is correct as describe in the documentation.
1988
1989    This test is written by a issue report in the IR channel,
1990    it is simple and not useful, but it is good to have to avoid
1991    simile errors in the future.
1992    """
1993    l1, l2 = node_factory.line_graph(2)
1994
1995    l2.stop()
1996    l2.daemon.opts['allow-deprecated-apis'] = True
1997    l2.start()
1998
1999    forwards_new = l1.rpc.listforwards("settled")["forwards"]
2000    forwards_dep = l2.rpc.call("listforwards", {"in_channel": "0x1x2", "out_channel": "0x2x3", "status": "settled"})["forwards"]
2001
2002    assert len(forwards_new) == 0
2003    assert len(forwards_dep) == 0
2004