1 /* Routines to generate and handle gossip query messages */
2 #include <bitcoin/chainparams.h>
3 #include <ccan/array_size/array_size.h>
4 #include <ccan/asort/asort.h>
5 #include <ccan/crc32c/crc32c.h>
6 #include <common/daemon_conn.h>
7 #include <common/decode_array.h>
8 #include <common/status.h>
9 #include <common/type_to_string.h>
10 #include <common/wire_error.h>
11 #include <gossipd/gossip_generation.h>
12 #include <gossipd/gossipd.h>
13 #include <gossipd/gossipd_wiregen.h>
14 #include <gossipd/queries.h>
15 #include <gossipd/routing.h>
16 #include <zlib.h>
17
18 #if DEVELOPER
19 static u32 dev_max_encoding_bytes = -1U;
20 #endif
21
22 /* BOLT #7:
23 *
24 * There are several messages which contain a long array of
25 * `short_channel_id`s (called `encoded_short_ids`) so we utilize a
26 * simple compression scheme: the first byte indicates the encoding, the
27 * rest contains the data.
28 */
encoding_start(const tal_t * ctx)29 static u8 *encoding_start(const tal_t *ctx)
30 {
31 return tal_arr(ctx, u8, 0);
32 }
33
34 /* Marshal a single short_channel_id */
encoding_add_short_channel_id(u8 ** encoded,const struct short_channel_id * scid)35 static void encoding_add_short_channel_id(u8 **encoded,
36 const struct short_channel_id *scid)
37 {
38 towire_short_channel_id(encoded, scid);
39 }
40
41 /* Marshal a single channel_update_timestamps */
encoding_add_timestamps(u8 ** encoded,const struct channel_update_timestamps * ts)42 static void encoding_add_timestamps(u8 **encoded,
43 const struct channel_update_timestamps *ts)
44 {
45 towire_channel_update_timestamps(encoded, ts);
46 }
47
48 /* Marshal a single query flag (we don't query, so not currently used) */
encoding_add_query_flag(u8 ** encoded,bigsize_t flag)49 static void encoding_add_query_flag(u8 **encoded, bigsize_t flag)
50 {
51 towire_bigsize(encoded, flag);
52 }
53
54 /* Greg Maxwell asked me privately about using zlib for communicating a set,
55 * and suggested that we'd be better off using Golomb-Rice coding a-la BIP
56 * 158. However, naively using Rice encoding isn't a win: we have to get
57 * more complex and use separate streams. The upside is that it's between
58 * 2 and 5 times smaller (assuming optimal Rice encoding + gzip). We can add
59 * that later. */
zencode(const tal_t * ctx,const u8 * scids,size_t len)60 static u8 *zencode(const tal_t *ctx, const u8 *scids, size_t len)
61 {
62 u8 *z;
63 int err;
64 unsigned long compressed_len = len;
65
66 #ifdef ZLIB_EVEN_IF_EXPANDS
67 /* Needed for test vectors */
68 compressed_len = 128 * 1024;
69 #endif
70 /* Prefer to fail if zlib makes it larger */
71 z = tal_arr(ctx, u8, compressed_len);
72 err = compress2(z, &compressed_len, scids, len, Z_DEFAULT_COMPRESSION);
73 if (err == Z_OK) {
74 tal_resize(&z, compressed_len);
75 return z;
76 }
77 return NULL;
78 }
79
80 /* Try compressing *encoded: fails if result would be longer.
81 * @off is offset to place result in *encoded.
82 */
encoding_end_zlib(u8 ** encoded,size_t off)83 static bool encoding_end_zlib(u8 **encoded, size_t off)
84 {
85 u8 *z;
86 size_t len = tal_count(*encoded);
87
88 z = zencode(tmpctx, *encoded, len);
89 if (!z)
90 return false;
91
92 /* Successful: copy over and trim */
93 tal_resize(encoded, off + tal_count(z));
94 memcpy(*encoded + off, z, tal_count(z));
95
96 tal_free(z);
97 return true;
98 }
99
encoding_end_no_compress(u8 ** encoded,size_t off)100 static void encoding_end_no_compress(u8 **encoded, size_t off)
101 {
102 size_t len = tal_count(*encoded);
103
104 tal_resize(encoded, off + len);
105 memmove(*encoded + off, *encoded, len);
106 }
107
108 /* Once we've assembled it, try compressing.
109 * Prepends encoding type to @encoding. */
encoding_end_prepend_type(u8 ** encoded,size_t max_bytes)110 static bool encoding_end_prepend_type(u8 **encoded, size_t max_bytes)
111 {
112 if (encoding_end_zlib(encoded, 1))
113 **encoded = ARR_ZLIB;
114 else {
115 encoding_end_no_compress(encoded, 1);
116 **encoded = ARR_UNCOMPRESSED;
117 }
118
119 #if DEVELOPER
120 if (tal_count(*encoded) > dev_max_encoding_bytes)
121 return false;
122 #endif
123 return tal_count(*encoded) <= max_bytes;
124 }
125
126 /* Try compressing, leaving type external */
encoding_end_external_type(u8 ** encoded,u8 * type,size_t max_bytes)127 static bool encoding_end_external_type(u8 **encoded, u8 *type, size_t max_bytes)
128 {
129 if (encoding_end_zlib(encoded, 0))
130 *type = ARR_ZLIB;
131 else {
132 encoding_end_no_compress(encoded, 0);
133 *type = ARR_UNCOMPRESSED;
134 }
135
136 return tal_count(*encoded) <= max_bytes;
137 }
138
139 /* Query this peer for these short-channel-ids. */
query_short_channel_ids(struct daemon * daemon,struct peer * peer,const struct short_channel_id * scids,const u8 * query_flags,void (* cb)(struct peer * peer,bool complete))140 bool query_short_channel_ids(struct daemon *daemon,
141 struct peer *peer,
142 const struct short_channel_id *scids,
143 const u8 *query_flags,
144 void (*cb)(struct peer *peer, bool complete))
145 {
146 u8 *encoded, *msg;
147 struct tlv_query_short_channel_ids_tlvs *tlvs;
148 /* BOLT #7:
149 *
150 * 1. type: 261 (`query_short_channel_ids`) (`gossip_queries`)
151 * 2. data:
152 * * [`chain_hash`:`chain_hash`]
153 * * [`u16`:`len`]
154 * * [`len*byte`:`encoded_short_ids`]
155 */
156 const size_t reply_overhead = 32 + 2;
157 size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
158
159 /* Can't query if they don't have gossip_queries_feature */
160 if (!peer->gossip_queries_feature)
161 return false;
162
163 /* BOLT #7:
164 * - MAY include an optional `query_flags`. If so:
165 * - MUST set `encoding_type`, as for `encoded_short_ids`.
166 * - Each query flag is a minimally-encoded bigsize.
167 * - MUST encode one query flag per `short_channel_id`.
168 */
169 if (query_flags)
170 assert(tal_count(query_flags) == tal_count(scids));
171
172 /* BOLT #7:
173 *
174 * The sender:
175 * - MUST NOT send `query_short_channel_ids` if it has sent a previous
176 * `query_short_channel_ids` to this peer and not received
177 * `reply_short_channel_ids_end`.
178 */
179 if (peer->scid_query_outstanding)
180 return false;
181
182 encoded = encoding_start(tmpctx);
183 for (size_t i = 0; i < tal_count(scids); i++) {
184 /* BOLT #7:
185 *
186 * Encoding types:
187 * * `0`: uncompressed array of `short_channel_id` types, in
188 * ascending order.
189 * * `1`: array of `short_channel_id` types, in ascending order
190 */
191 assert(i == 0 || scids[i].u64 > scids[i-1].u64);
192 encoding_add_short_channel_id(&encoded, &scids[i]);
193 }
194
195 if (!encoding_end_prepend_type(&encoded, max_encoded_bytes)) {
196 status_broken("query_short_channel_ids: %zu is too many",
197 tal_count(scids));
198 return false;
199 }
200
201 if (query_flags) {
202 struct tlv_query_short_channel_ids_tlvs_query_flags *tlvq;
203 tlvs = tlv_query_short_channel_ids_tlvs_new(tmpctx);
204 tlvq = tlvs->query_flags = tal(tlvs,
205 struct tlv_query_short_channel_ids_tlvs_query_flags);
206 tlvq->encoded_query_flags = encoding_start(tlvq);
207 for (size_t i = 0; i < tal_count(query_flags); i++)
208 encoding_add_query_flag(&tlvq->encoded_query_flags,
209 query_flags[i]);
210
211 max_encoded_bytes -= tal_bytelen(encoded);
212 if (!encoding_end_external_type(&tlvq->encoded_query_flags,
213 &tlvq->encoding_type,
214 max_encoded_bytes)) {
215 status_broken("query_short_channel_ids:"
216 " %zu query_flags is too many",
217 tal_count(query_flags));
218 return false;
219 }
220 } else
221 tlvs = NULL;
222
223 msg = towire_query_short_channel_ids(NULL,
224 &chainparams->genesis_blockhash,
225 encoded, tlvs);
226 queue_peer_msg(peer, take(msg));
227 peer->scid_query_outstanding = true;
228 peer->scid_query_cb = cb;
229
230 status_peer_debug(&peer->id, "sending query for %zu scids",
231 tal_count(scids));
232 return true;
233 }
234
235 /* The peer can ask about an array of short channel ids: we don't assemble the
236 * reply immediately but process them one at a time in dump_gossip which is
237 * called when there's nothing more important to send. */
handle_query_short_channel_ids(struct peer * peer,const u8 * msg)238 const u8 *handle_query_short_channel_ids(struct peer *peer, const u8 *msg)
239 {
240 struct bitcoin_blkid chain;
241 u8 *encoded;
242 struct short_channel_id *scids;
243 bigsize_t *flags;
244 struct tlv_query_short_channel_ids_tlvs *tlvs
245 = tlv_query_short_channel_ids_tlvs_new(tmpctx);
246
247 if (!fromwire_query_short_channel_ids(tmpctx, msg, &chain, &encoded,
248 tlvs)) {
249 return towire_warningfmt(peer, NULL,
250 "Bad query_short_channel_ids w/tlvs %s",
251 tal_hex(tmpctx, msg));
252 }
253 if (tlvs->query_flags) {
254 /* BOLT #7:
255 *
256 * The receiver:
257 *...
258 * - if the incoming message includes
259 * `query_short_channel_ids_tlvs`:
260 * - if `encoding_type` is not a known encoding type:
261 * - MAY fail the connection
262 */
263 flags = decode_scid_query_flags(tmpctx, tlvs->query_flags);
264 if (!flags) {
265 return towire_warningfmt(peer, NULL,
266 "Bad query_short_channel_ids query_flags %s",
267 tal_hex(tmpctx, msg));
268 }
269 } else
270 flags = NULL;
271
272 /* BOLT #7
273 *
274 * The receiver:
275 * ...
276 * - if does not maintain up-to-date channel information for `chain_hash`:
277 * - MUST set `complete` to 0.
278 */
279 if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain)) {
280 status_peer_debug(&peer->id,
281 "sent query_short_channel_ids chainhash %s",
282 type_to_string(tmpctx, struct bitcoin_blkid, &chain));
283 return towire_reply_short_channel_ids_end(peer, &chain, 0);
284 }
285
286 /* BOLT #7:
287 *
288 * - if it has not sent `reply_short_channel_ids_end` to a
289 * previously received `query_short_channel_ids` from this
290 * sender:
291 * - MAY fail the connection.
292 */
293 if (peer->scid_queries || peer->scid_query_nodes) {
294 return towire_warningfmt(peer, NULL,
295 "Bad concurrent query_short_channel_ids");
296 }
297
298 scids = decode_short_ids(tmpctx, encoded);
299 if (!scids) {
300 return towire_warningfmt(peer, NULL,
301 "Bad query_short_channel_ids encoding %s",
302 tal_hex(tmpctx, encoded));
303 }
304
305 /* BOLT #7:
306 *
307 * The receiver:
308 *...
309 * - if `encoded_query_flags` does not decode to exactly one flag per
310 * `short_channel_id`:
311 * - MAY fail the connection.
312 */
313 if (!flags) {
314 /* Pretend they asked for everything. */
315 flags = tal_arr(tmpctx, bigsize_t, tal_count(scids));
316 memset(flags, 0xFF, tal_bytelen(flags));
317 } else {
318 if (tal_count(flags) != tal_count(scids)) {
319 return towire_warningfmt(peer, NULL,
320 "Bad query_short_channel_ids flags count %zu scids %zu",
321 tal_count(flags), tal_count(scids));
322 }
323 }
324
325 /* BOLT #7:
326 *
327 * - MUST respond to each known `short_channel_id`:
328 *...
329 * - SHOULD NOT wait for the next outgoing gossip flush to send
330 * these.
331 */
332 peer->scid_queries = tal_steal(peer, scids);
333 peer->scid_query_flags = tal_steal(peer, flags);
334 peer->scid_query_idx = 0;
335 peer->scid_query_nodes = tal_arr(peer, struct node_id, 0);
336
337 /* Notify the daemon_conn-write loop to invoke create_next_scid_reply */
338 daemon_conn_wake(peer->dc);
339 return NULL;
340 }
341
342 /*~ We can send multiple replies when the peer queries for all channels in
343 * a given range of blocks; each one indicates the range of blocks it covers. */
send_reply_channel_range(struct peer * peer,u32 first_blocknum,u32 number_of_blocks,const struct short_channel_id * scids,const struct channel_update_timestamps * tstamps,const struct channel_update_checksums * csums,size_t num_scids,bool final)344 static void send_reply_channel_range(struct peer *peer,
345 u32 first_blocknum, u32 number_of_blocks,
346 const struct short_channel_id *scids,
347 const struct channel_update_timestamps *tstamps,
348 const struct channel_update_checksums *csums,
349 size_t num_scids,
350 bool final)
351 {
352 /* BOLT #7:
353 *
354 * - MUST respond with one or more `reply_channel_range`:
355 * - MUST set with `chain_hash` equal to that of `query_channel_range`,
356 * - MUST limit `number_of_blocks` to the maximum number of blocks
357 * whose results could fit in `encoded_short_ids`
358 */
359 u8 *encoded_scids = encoding_start(tmpctx);
360 u8 *encoded_timestamps = encoding_start(tmpctx);
361 struct tlv_reply_channel_range_tlvs *tlvs
362 = tlv_reply_channel_range_tlvs_new(tmpctx);
363
364 /* Encode them all */
365 for (size_t i = 0; i < num_scids; i++)
366 encoding_add_short_channel_id(&encoded_scids, &scids[i]);
367 encoding_end_prepend_type(&encoded_scids, tal_bytelen(encoded_scids));
368
369 if (tstamps) {
370 for (size_t i = 0; i < num_scids; i++)
371 encoding_add_timestamps(&encoded_timestamps, &tstamps[i]);
372
373 tlvs->timestamps_tlv = tal(tlvs, struct tlv_reply_channel_range_tlvs_timestamps_tlv);
374 encoding_end_external_type(&encoded_timestamps,
375 &tlvs->timestamps_tlv->encoding_type,
376 tal_bytelen(encoded_timestamps));
377 tlvs->timestamps_tlv->encoded_timestamps
378 = tal_steal(tlvs, encoded_timestamps);
379 }
380
381 /* Must be a tal object! */
382 if (csums)
383 tlvs->checksums_tlv = tal_dup_arr(tlvs,
384 struct channel_update_checksums,
385 csums, num_scids, 0);
386
387 /* BOLT #7:
388 *
389 * - MUST set `sync_complete` to `false` if this is not the final
390 * `reply_channel_range`.
391 */
392 u8 *msg = towire_reply_channel_range(NULL,
393 &chainparams->genesis_blockhash,
394 first_blocknum,
395 number_of_blocks,
396 final, encoded_scids, tlvs);
397 queue_peer_msg(peer, take(msg));
398 }
399
400 /* BOLT #7:
401 *
402 * The checksum of a `channel_update` is the CRC32C checksum as specified in
403 * [RFC3720](https://tools.ietf.org/html/rfc3720#appendix-B.4) of this
404 * `channel_update` without its `signature` and `timestamp` fields.
405 */
crc32_of_update(const u8 * channel_update)406 static u32 crc32_of_update(const u8 *channel_update)
407 {
408 u32 sum;
409 const u8 *parts[2];
410 size_t sizes[ARRAY_SIZE(parts)];
411
412 get_cupdate_parts(channel_update, parts, sizes);
413
414 sum = 0;
415 for (size_t i = 0; i < ARRAY_SIZE(parts); i++)
416 sum = crc32c(sum, parts[i], sizes[i]);
417 return sum;
418 }
419
get_checksum_and_timestamp(struct routing_state * rstate,const struct chan * chan,int direction,u32 * tstamp,u32 * csum)420 static void get_checksum_and_timestamp(struct routing_state *rstate,
421 const struct chan *chan,
422 int direction,
423 u32 *tstamp, u32 *csum)
424 {
425 const struct half_chan *hc = &chan->half[direction];
426
427 if (!is_chan_public(chan) || !is_halfchan_defined(hc)) {
428 *tstamp = *csum = 0;
429 } else {
430 const u8 *update = gossip_store_get(tmpctx, rstate->gs,
431 hc->bcast.index);
432 *tstamp = hc->bcast.timestamp;
433 *csum = crc32_of_update(update);
434 }
435 }
436
437 /* FIXME: This assumes that the tlv type encodes into 1 byte! */
tlv_overhead(size_t num_entries,size_t size)438 static size_t tlv_overhead(size_t num_entries, size_t size)
439 {
440 return 1 + bigsize_len(num_entries * size);
441 }
442
443 /* How many entries can I fit in a reply? */
max_entries(enum query_option_flags query_option_flags)444 static size_t max_entries(enum query_option_flags query_option_flags)
445 {
446 /* BOLT #7:
447 *
448 * 1. type: 264 (`reply_channel_range`) (`gossip_queries`)
449 * 2. data:
450 * * [`chain_hash`:`chain_hash`]
451 * * [`u32`:`first_blocknum`]
452 * * [`u32`:`number_of_blocks`]
453 * * [`byte`:`sync_complete`]
454 * * [`u16`:`len`]
455 * * [`len*byte`:`encoded_short_ids`]
456 */
457 const size_t reply_overhead = 32 + 4 + 4 + 1 + 2;
458 size_t max_encoded_bytes = 65535 - 2 - reply_overhead;
459 size_t per_entry_size, max_num;
460
461 per_entry_size = sizeof(struct short_channel_id);
462
463 /* Upper bound to start. */
464 max_num = max_encoded_bytes / per_entry_size;
465
466 /* If we add timestamps, we need to encode tlv */
467 if (query_option_flags & QUERY_ADD_TIMESTAMPS) {
468 max_encoded_bytes -= tlv_overhead(max_num,
469 sizeof(struct channel_update_timestamps));
470 per_entry_size += sizeof(struct channel_update_timestamps);
471 }
472
473 if (query_option_flags & QUERY_ADD_CHECKSUMS) {
474 max_encoded_bytes -= tlv_overhead(max_num,
475 sizeof(struct channel_update_checksums));
476 per_entry_size += sizeof(struct channel_update_checksums);
477 }
478
479 #if DEVELOPER
480 if (max_encoded_bytes > dev_max_encoding_bytes)
481 max_encoded_bytes = dev_max_encoding_bytes;
482 /* Always let one through! */
483 if (max_encoded_bytes < per_entry_size)
484 max_encoded_bytes = per_entry_size;
485 #endif
486
487 return max_encoded_bytes / per_entry_size;
488 }
489
490 /* This gets all the scids they asked for, and optionally the timestamps and checksums */
gather_range(const tal_t * ctx,struct routing_state * rstate,u32 first_blocknum,u32 number_of_blocks,enum query_option_flags query_option_flags,struct channel_update_timestamps ** tstamps,struct channel_update_checksums ** csums)491 static struct short_channel_id *gather_range(const tal_t *ctx,
492 struct routing_state *rstate,
493 u32 first_blocknum, u32 number_of_blocks,
494 enum query_option_flags query_option_flags,
495 struct channel_update_timestamps **tstamps,
496 struct channel_update_checksums **csums)
497 {
498 struct short_channel_id scid, *scids;
499 u32 end_block;
500 bool scid_ok;
501
502 scids = tal_arr(ctx, struct short_channel_id, 0);
503 if (query_option_flags & QUERY_ADD_TIMESTAMPS)
504 *tstamps = tal_arr(ctx, struct channel_update_timestamps, 0);
505 else
506 *tstamps = NULL;
507 if (query_option_flags & QUERY_ADD_CHECKSUMS)
508 *csums = tal_arr(ctx, struct channel_update_checksums, 0);
509 else
510 *csums = NULL;
511
512 /* Avoid underflow: we don't use block 0 anyway */
513 if (first_blocknum == 0)
514 scid_ok = mk_short_channel_id(&scid, 1, 0, 0);
515 else
516 scid_ok = mk_short_channel_id(&scid, first_blocknum, 0, 0);
517 scid.u64--;
518 /* Out of range? No blocks then. */
519 if (!scid_ok)
520 return NULL;
521
522 if (number_of_blocks == 0)
523 return NULL;
524
525 /* Fix up number_of_blocks to avoid overflow. */
526 end_block = first_blocknum + number_of_blocks - 1;
527 if (end_block < first_blocknum)
528 end_block = UINT_MAX;
529
530 /* We keep a `uintmap` of `short_channel_id` to `struct chan *`.
531 * Unlike a htable, it's efficient to iterate through, but it only
532 * works because each short_channel_id is basically a 64-bit unsigned
533 * integer.
534 *
535 * First we iterate and gather all the short channel ids. */
536 while (uintmap_after(&rstate->chanmap, &scid.u64)) {
537 struct chan *chan;
538 struct channel_update_timestamps ts;
539 struct channel_update_checksums cs;
540
541 if (short_channel_id_blocknum(&scid) > end_block)
542 break;
543
544 /* FIXME: Store csum in header. */
545 chan = get_channel(rstate, &scid);
546 if (!is_chan_public(chan))
547 continue;
548
549 tal_arr_expand(&scids, scid);
550
551 /* Don't calc csums if we don't even care */
552 if (!(query_option_flags
553 & (QUERY_ADD_TIMESTAMPS|QUERY_ADD_CHECKSUMS)))
554 continue;
555
556 get_checksum_and_timestamp(rstate, chan, 0,
557 &ts.timestamp_node_id_1,
558 &cs.checksum_node_id_1);
559 get_checksum_and_timestamp(rstate, chan, 1,
560 &ts.timestamp_node_id_2,
561 &cs.checksum_node_id_2);
562 if (query_option_flags & QUERY_ADD_TIMESTAMPS)
563 tal_arr_expand(tstamps, ts);
564 if (query_option_flags & QUERY_ADD_CHECKSUMS)
565 tal_arr_expand(csums, cs);
566 }
567
568 return scids;
569 }
570
571 /*~ When we need to send an array of channels, it might go over our 64k packet
572 * size. But because we use compression, we can't actually tell how much
573 * we'll use. We pack them into the maximum amount for uncompressed, then
574 * compress afterwards.
575 */
queue_channel_ranges(struct peer * peer,u32 first_blocknum,u32 number_of_blocks,enum query_option_flags query_option_flags)576 static void queue_channel_ranges(struct peer *peer,
577 u32 first_blocknum, u32 number_of_blocks,
578 enum query_option_flags query_option_flags)
579 {
580 struct routing_state *rstate = peer->daemon->rstate;
581 struct channel_update_timestamps *tstamps;
582 struct channel_update_checksums *csums;
583 struct short_channel_id *scids;
584 size_t off, limit;
585
586 scids = gather_range(tmpctx, rstate, first_blocknum, number_of_blocks,
587 query_option_flags, &tstamps, &csums);
588
589 limit = max_entries(query_option_flags);
590 off = 0;
591
592 /* We need to send an empty msg if we have nothing! */
593 do {
594 size_t n = tal_count(scids) - off;
595 u32 this_num_blocks;
596
597 if (n > limit) {
598 status_debug("reply_channel_range: splitting %zu-%zu of %zu",
599 off, off + limit, tal_count(scids));
600 n = limit;
601
602 /* ... and reduce to a block boundary. */
603 while (short_channel_id_blocknum(&scids[off + n - 1])
604 == short_channel_id_blocknum(&scids[off + limit])) {
605 /* We assume one block doesn't have limit #
606 * channels. If it does, we have to violate
607 * spec and send over multiple blocks. */
608 if (n == 0) {
609 status_broken("reply_channel_range: "
610 "could not fit %zu scids for %u!",
611 limit,
612 short_channel_id_blocknum(&scids[off + n - 1]));
613 n = limit;
614 break;
615 }
616 n--;
617 }
618 /* Get *next* channel, add num blocks */
619 this_num_blocks
620 = short_channel_id_blocknum(&scids[off + n])
621 - first_blocknum;
622 } else
623 /* Last one must end with correct total */
624 this_num_blocks = number_of_blocks;
625
626 send_reply_channel_range(peer, first_blocknum, this_num_blocks,
627 scids + off,
628 query_option_flags & QUERY_ADD_TIMESTAMPS
629 ? tstamps + off : NULL,
630 query_option_flags & QUERY_ADD_CHECKSUMS
631 ? csums + off : NULL,
632 n,
633 this_num_blocks == number_of_blocks);
634 first_blocknum += this_num_blocks;
635 number_of_blocks -= this_num_blocks;
636 off += n;
637 } while (number_of_blocks);
638 }
639
640 /*~ The peer can ask for all channels in a series of blocks. We reply with one
641 * or more messages containing the short_channel_ids. */
handle_query_channel_range(struct peer * peer,const u8 * msg)642 const u8 *handle_query_channel_range(struct peer *peer, const u8 *msg)
643 {
644 struct bitcoin_blkid chain_hash;
645 u32 first_blocknum, number_of_blocks;
646 enum query_option_flags query_option_flags;
647 struct tlv_query_channel_range_tlvs *tlvs
648 = tlv_query_channel_range_tlvs_new(msg);
649
650 if (!fromwire_query_channel_range(msg, &chain_hash,
651 &first_blocknum, &number_of_blocks,
652 tlvs)) {
653 return towire_warningfmt(peer, NULL,
654 "Bad query_channel_range w/tlvs %s",
655 tal_hex(tmpctx, msg));
656 }
657 if (tlvs->query_option)
658 query_option_flags = *tlvs->query_option;
659 else
660 query_option_flags = 0;
661
662 /* BOLT #7
663 *
664 * The receiver of `query_channel_range`:
665 * ...
666 * - if does not maintain up-to-date channel information for `chain_hash`:
667 * - MUST set `complete` to 0.
668 */
669 if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain_hash)) {
670 status_peer_debug(&peer->id,
671 "query_channel_range with chainhash %s",
672 type_to_string(tmpctx, struct bitcoin_blkid,
673 &chain_hash));
674 u8 *end = towire_reply_channel_range(NULL, &chain_hash, first_blocknum,
675 number_of_blocks, false, NULL, NULL);
676 queue_peer_msg(peer, take(end));
677 return NULL;
678 }
679
680 /* Fix up number_of_blocks to avoid overflow. */
681 if (first_blocknum + number_of_blocks < first_blocknum)
682 number_of_blocks = UINT_MAX - first_blocknum;
683
684 queue_channel_ranges(peer, first_blocknum, number_of_blocks,
685 query_option_flags);
686 return NULL;
687 }
688
689 /* Append these scids (and optional timestamps) to our pending replies */
append_range_reply(struct peer * peer,const struct short_channel_id * scids,const struct tlv_reply_channel_range_tlvs_timestamps_tlv * timestamps_tlv)690 static u8 *append_range_reply(struct peer *peer,
691 const struct short_channel_id *scids,
692 const struct tlv_reply_channel_range_tlvs_timestamps_tlv
693 *timestamps_tlv)
694 {
695 u16 i, old_num, added;
696 const struct channel_update_timestamps *ts;
697 /* Zero means "no timestamp" */
698 const static struct channel_update_timestamps zero_ts = { 0, 0 };
699
700 if (timestamps_tlv) {
701 ts = decode_channel_update_timestamps(tmpctx,
702 timestamps_tlv);
703 if (!ts)
704 return towire_warningfmt(peer, NULL,
705 "reply_channel_range can't decode timestamps.");
706 if (tal_count(ts) != tal_count(scids)) {
707 return towire_warningfmt(peer, NULL,
708 "reply_channel_range %zu timestamps when %zu scids?",
709 tal_count(ts),
710 tal_count(scids));
711 }
712 } else
713 ts = NULL;
714
715 old_num = tal_count(peer->range_replies);
716 added = tal_count(scids);
717 for (i = 0; i < added; i++) {
718 tal_resize(&peer->range_replies, old_num + i + 1);
719 peer->range_replies[old_num + i].scid = scids[i];
720 if (ts)
721 peer->range_replies[old_num + i].ts = ts[i];
722 else
723 peer->range_replies[old_num + i].ts = zero_ts;
724 }
725
726 return NULL;
727 }
728
729 /*~ This is the reply we get when we send query_channel_range; we keep
730 * expecting them until the entire range we asked for is covered. */
handle_reply_channel_range(struct peer * peer,const u8 * msg)731 const u8 *handle_reply_channel_range(struct peer *peer, const u8 *msg)
732 {
733 struct bitcoin_blkid chain;
734 u8 sync_complete;
735 u32 first_blocknum, number_of_blocks, start, end;
736 u8 *encoded;
737 struct short_channel_id *scids;
738 const struct range_query_reply *replies;
739 const u8 *err;
740 void (*cb)(struct peer *peer,
741 u32 first_blocknum, u32 number_of_blocks,
742 const struct range_query_reply *replies);
743 struct tlv_reply_channel_range_tlvs *tlvs
744 = tlv_reply_channel_range_tlvs_new(tmpctx);
745
746 if (!fromwire_reply_channel_range(tmpctx, msg, &chain, &first_blocknum,
747 &number_of_blocks, &sync_complete,
748 &encoded, tlvs)) {
749 return towire_warningfmt(peer, NULL,
750 "Bad reply_channel_range w/tlvs %s",
751 tal_hex(tmpctx, msg));
752 }
753
754 if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain)) {
755 return towire_warningfmt(peer, NULL,
756 "reply_channel_range for bad chain: %s",
757 tal_hex(tmpctx, msg));
758 }
759
760 if (!peer->range_replies) {
761 return towire_warningfmt(peer, NULL,
762 "reply_channel_range without query: %s",
763 tal_hex(tmpctx, msg));
764 }
765
766 /* Beware overflow! */
767 if (first_blocknum + number_of_blocks < first_blocknum) {
768 return towire_warningfmt(peer, NULL,
769 "reply_channel_range invalid %u+%u",
770 first_blocknum, number_of_blocks);
771 }
772
773 scids = decode_short_ids(tmpctx, encoded);
774 if (!scids) {
775 return towire_warningfmt(peer, NULL,
776 "Bad reply_channel_range encoding %s",
777 tal_hex(tmpctx, encoded));
778 }
779
780 status_peer_debug(&peer->id,
781 "reply_channel_range %u+%u (of %u+%u) %zu scids",
782 first_blocknum, number_of_blocks,
783 peer->range_first_blocknum,
784 peer->range_end_blocknum - peer->range_first_blocknum,
785 tal_count(scids));
786
787 /* BOLT #7:
788 * The receiver of `query_channel_range`:
789 *...
790 * - the first `reply_channel_range` message:
791 * - MUST set `first_blocknum` less than or equal to the
792 * `first_blocknum` in `query_channel_range`
793 * - MUST set `first_blocknum` plus `number_of_blocks` greater than
794 * `first_blocknum` in `query_channel_range`.
795 * - successive `reply_channel_range` message:
796 * - MUST have `first_blocknum` equal or greater than the previous
797 * `first_blocknum`.
798 * - MUST set `sync_complete` to `false` if this is not the final `reply_channel_range`.
799 * - the final `reply_channel_range` message:
800 * - MUST have `first_blocknum` plus `number_of_blocks` equal or
801 * greater than the `query_channel_range` `first_blocknum` plus
802 * `number_of_blocks`.
803 * - MUST set `sync_complete` to `true`.
804 */
805 /* ie. They can be outside range we asked, but they must overlap! */
806 if (first_blocknum + number_of_blocks <= peer->range_first_blocknum
807 || first_blocknum >= peer->range_end_blocknum) {
808 return towire_warningfmt(peer, NULL,
809 "reply_channel_range invalid %u+%u for query %u+%u",
810 first_blocknum, number_of_blocks,
811 peer->range_first_blocknum,
812 peer->range_end_blocknum
813 - peer->range_first_blocknum);
814 }
815
816 start = first_blocknum;
817 end = first_blocknum + number_of_blocks;
818 /* Trim to make it a subset of what we want. */
819 if (start < peer->range_first_blocknum)
820 start = peer->range_first_blocknum;
821 if (end > peer->range_end_blocknum)
822 end = peer->range_end_blocknum;
823
824 /* Have a seat. It's time for a history lesson in Rusty Screws Up.
825 *
826 * Part 1
827 * ------
828 * The original spec had a field called "complete" which meant
829 * "I believe I have complete knowledge of gossip", with the idea
830 * that lite nodes in future would not set this.
831 *
832 * But I chose a terrible name, and LND mis-implemented the spec,
833 * thinking this was an "end of replies". If they have multiple
834 * replies, set each one to the *whole* range, with complete=0 except
835 * the last.
836 *
837 * Here we try to accomodate that (pretend we make no progress
838 * until the end)! */
839 if (first_blocknum == peer->range_first_blocknum
840 && first_blocknum + number_of_blocks == peer->range_end_blocknum
841 && !sync_complete
842 && tal_bytelen(msg) == 64046) {
843 status_unusual("Old LND reply_channel_range detected: result will be truncated!");
844 }
845
846 /*
847 * Part 2
848 * ------
849 * You were supposed to use the first_blocknum + number_of_blocks
850 * to tell when gossip was finished, with the rule being no replies
851 * could overlap, so you could say "I asked for blocks 100-199" and if
852 * you got a reply saying it covered blocks 50-150, you knew that you
853 * still had 49 blocks to receive.
854 *
855 * The field was renamed to `full_information`, and since everyone
856 * did it this way anyway, we insisted the replies be in
857 * non-overlapping ascending order.
858 *
859 * But LND didn't do this, and can actually overlap, since they just
860 * chop them up when they reach length, not by block boundary, so
861 * we had to allow that.
862 *
863 * Reading this implementation gave me envy: it was much simpler than
864 * backing out to a block boundary!
865 *
866 * And what if a single block had so many channel openings that you
867 * couldn't fit it in a single reply? (This was originally
868 * inconceivable, but with the addition of timestamps and checksums,
869 * is now possible).
870 *
871 * So we decided to make the lie into a truth. `full_information`
872 * was re-renamed to `sync_complete`, and once everyone has upgraded
873 * we can use that, rather than tallying the block numbers, to
874 * tell if replies are finished.
875 */
876 err = append_range_reply(peer, scids, tlvs->timestamps_tlv);
877 if (err)
878 return err;
879
880 /* Credit peer for answering gossip, so seeker doesn't get upset:
881 * since scids are only 8 bytes, use a discount over normal gossip. */
882 peer_supplied_good_gossip(peer, tal_count(scids) / 20);
883
884 /* Old code used to set this to 1 all the time; not setting it implies
885 * we're talking to an upgraded node. */
886 if (!sync_complete) {
887 /* We no longer need old heuristic counter. */
888 peer->range_blocks_outstanding = 0;
889 return NULL;
890 }
891
892 /* FIXME: This "how many blocks do we have answers for?" heuristic
893 * can go away once everyone uses sync_complete properly. */
894 if (end - start < peer->range_blocks_outstanding) {
895 peer->range_blocks_outstanding -= end - start;
896 return NULL;
897 }
898
899 /* Clear these immediately in case cb want to queue more */
900 replies = tal_steal(tmpctx, peer->range_replies);
901 cb = peer->query_channel_range_cb;
902
903 peer->range_replies = NULL;
904 peer->query_channel_range_cb = NULL;
905
906 cb(peer, first_blocknum, number_of_blocks, replies);
907 return NULL;
908 }
909
910 /*~ When we ask about an array of short_channel_ids, we get all channel &
911 * node announcements and channel updates which the peer knows. There's an
912 * explicit end packet; this is needed to differentiate between 'I'm slow'
913 * and 'I don't know those channels'. */
handle_reply_short_channel_ids_end(struct peer * peer,const u8 * msg)914 const u8 *handle_reply_short_channel_ids_end(struct peer *peer, const u8 *msg)
915 {
916 struct bitcoin_blkid chain;
917 u8 complete;
918
919 if (!fromwire_reply_short_channel_ids_end(msg, &chain, &complete)) {
920 return towire_warningfmt(peer, NULL,
921 "Bad reply_short_channel_ids_end %s",
922 tal_hex(tmpctx, msg));
923 }
924
925 if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain)) {
926 return towire_warningfmt(peer, NULL,
927 "reply_short_channel_ids_end for bad chain: %s",
928 tal_hex(tmpctx, msg));
929 }
930
931 if (!peer->scid_query_outstanding) {
932 return towire_warningfmt(peer, NULL,
933 "unexpected reply_short_channel_ids_end: %s",
934 tal_hex(tmpctx, msg));
935 }
936
937 peer->scid_query_outstanding = false;
938 if (peer->scid_query_cb)
939 peer->scid_query_cb(peer, complete);
940
941 /* All good, no error. */
942 return NULL;
943 }
944
945 /*~ Arbitrary ordering function of pubkeys.
946 *
947 * Note that we could use memcmp() here: even if they had somehow different
948 * bitwise representations for the same key, we copied them all from struct
949 * node which should make them unique. Even if not (say, a node vanished
950 * and reappeared) we'd just end up sending two node_announcement for the
951 * same node.
952 */
pubkey_order(const struct node_id * k1,const struct node_id * k2,void * unused UNUSED)953 static int pubkey_order(const struct node_id *k1,
954 const struct node_id *k2,
955 void *unused UNUSED)
956 {
957 return node_id_cmp(k1, k2);
958 }
959
uniquify_node_ids(struct node_id ** ids)960 static void uniquify_node_ids(struct node_id **ids)
961 {
962 size_t dst, src;
963
964 /* BOLT #7:
965 * - SHOULD avoid sending duplicate `node_announcements` in
966 * response to a single `query_short_channel_ids`.
967 */
968 /* ccan/asort is a typesafe qsort wrapper: like most ccan modules
969 * it eschews exposing 'void *' pointers and ensures that the
970 * callback function and its arguments match types correctly. */
971 asort(*ids, tal_count(*ids), pubkey_order, NULL);
972
973 /* Compact the array */
974 for (dst = 0, src = 0; src < tal_count(*ids); src++) {
975 if (dst && node_id_eq(&(*ids)[dst-1], &(*ids)[src]))
976 continue;
977 (*ids)[dst++] = (*ids)[src];
978 }
979
980 /* And trim to length, so tal_count() gives correct answer. */
981 tal_resize(ids, dst);
982 }
983
984 /* We are fairly careful to avoid the peer DoSing us with channel queries:
985 * this routine sends information about a single short_channel_id, unless
986 * it's finished all of them. */
maybe_send_query_responses(struct peer * peer)987 void maybe_send_query_responses(struct peer *peer)
988 {
989 struct routing_state *rstate = peer->daemon->rstate;
990 size_t i, num;
991 bool sent = false;
992
993 /* BOLT #7:
994 *
995 * - MUST respond to each known `short_channel_id`:
996 */
997 /* Search for next short_channel_id we know about. */
998 num = tal_count(peer->scid_queries);
999 for (i = peer->scid_query_idx; !sent && i < num; i++) {
1000 struct chan *chan;
1001
1002 chan = get_channel(rstate, &peer->scid_queries[i]);
1003 if (!chan || !is_chan_public(chan))
1004 continue;
1005
1006 /* BOLT #7:
1007 * - if bit 0 of `query_flag` is set:
1008 * - MUST reply with a `channel_announcement`
1009 */
1010 if (peer->scid_query_flags[i] & SCID_QF_ANNOUNCE) {
1011 queue_peer_from_store(peer, &chan->bcast);
1012 sent = true;
1013 }
1014
1015 /* BOLT #7:
1016 * - if bit 1 of `query_flag` is set and it has received a
1017 * `channel_update` from `node_id_1`:
1018 * - MUST reply with the latest `channel_update` for
1019 * `node_id_1`
1020 * - if bit 2 of `query_flag` is set and it has received a
1021 * `channel_update` from `node_id_2`:
1022 * - MUST reply with the latest `channel_update` for
1023 * `node_id_2` */
1024 if ((peer->scid_query_flags[i] & SCID_QF_UPDATE1)
1025 && is_halfchan_defined(&chan->half[0])) {
1026 queue_peer_from_store(peer, &chan->half[0].bcast);
1027 sent = true;
1028 }
1029 if ((peer->scid_query_flags[i] & SCID_QF_UPDATE2)
1030 && is_halfchan_defined(&chan->half[1])) {
1031 queue_peer_from_store(peer, &chan->half[1].bcast);
1032 sent = true;
1033 }
1034
1035 /* BOLT #7:
1036 * - if bit 3 of `query_flag` is set and it has received
1037 * a `node_announcement` from `node_id_1`:
1038 * - MUST reply with the latest `node_announcement` for
1039 * `node_id_1`
1040 * - if bit 4 of `query_flag` is set and it has received a
1041 * `node_announcement` from `node_id_2`:
1042 * - MUST reply with the latest `node_announcement` for
1043 * `node_id_2` */
1044 /* Save node ids for later transmission of node_announcement */
1045 if (peer->scid_query_flags[i] & SCID_QF_NODE1)
1046 tal_arr_expand(&peer->scid_query_nodes,
1047 chan->nodes[0]->id);
1048 if (peer->scid_query_flags[i] & SCID_QF_NODE2)
1049 tal_arr_expand(&peer->scid_query_nodes,
1050 chan->nodes[1]->id);
1051 }
1052
1053 /* Just finished channels? Remove duplicate nodes. */
1054 if (peer->scid_query_idx != num && i == num)
1055 uniquify_node_ids(&peer->scid_query_nodes);
1056
1057 /* Update index for next time we're called. */
1058 peer->scid_query_idx = i;
1059
1060 /* BOLT #7:
1061 *
1062 * - if the incoming message does not include `encoded_query_flags`:
1063 * ...
1064 * - MUST follow with any `node_announcement`s for each
1065 * `channel_announcement`
1066 * - otherwise:
1067 * ...
1068 * - if bit 3 of `query_flag` is set and it has received a
1069 * `node_announcement` from `node_id_1`:
1070 * - MUST reply with the latest `node_announcement` for
1071 * `node_id_1`
1072 * - if bit 4 of `query_flag` is set and it has received a
1073 * `node_announcement` from `node_id_2`:
1074 * - MUST reply with the latest `node_announcement` for
1075 * `node_id_2`
1076 */
1077 /* If we haven't sent anything above, we look for the next
1078 * node_announcement to send. */
1079 num = tal_count(peer->scid_query_nodes);
1080 for (i = peer->scid_query_nodes_idx; !sent && i < num; i++) {
1081 const struct node *n;
1082
1083 /* Not every node announces itself (we know it exists because
1084 * of a channel_announcement, however) */
1085 n = get_node(rstate, &peer->scid_query_nodes[i]);
1086 if (!n || !n->bcast.index)
1087 continue;
1088
1089 queue_peer_from_store(peer, &n->bcast);
1090 sent = true;
1091 }
1092 peer->scid_query_nodes_idx = i;
1093
1094 /* All finished? */
1095 if (peer->scid_queries
1096 && peer->scid_query_idx == tal_count(peer->scid_queries)
1097 && peer->scid_query_nodes_idx == num) {
1098 /* BOLT #7:
1099 *
1100 * - MUST follow these responses with
1101 * `reply_short_channel_ids_end`.
1102 * - if does not maintain up-to-date channel information for
1103 * `chain_hash`:
1104 * - MUST set `full_information` to 0.
1105 * - otherwise:
1106 * - SHOULD set `full_information` to 1.
1107 */
1108 /* FIXME: We consider ourselves to have complete knowledge. */
1109 u8 *end = towire_reply_short_channel_ids_end(peer,
1110 &chainparams->genesis_blockhash,
1111 true);
1112 queue_peer_msg(peer, take(end));
1113
1114 /* We're done! Clean up so we simply pass-through next time. */
1115 peer->scid_queries = tal_free(peer->scid_queries);
1116 peer->scid_query_flags = tal_free(peer->scid_query_flags);
1117 peer->scid_query_idx = 0;
1118 peer->scid_query_nodes = tal_free(peer->scid_query_nodes);
1119 peer->scid_query_nodes_idx = 0;
1120 }
1121 }
1122
query_channel_range(struct daemon * daemon,struct peer * peer,u32 first_blocknum,u32 number_of_blocks,enum query_option_flags qflags,void (* cb)(struct peer * peer,u32 first_blocknum,u32 number_of_blocks,const struct range_query_reply * replies))1123 bool query_channel_range(struct daemon *daemon,
1124 struct peer *peer,
1125 u32 first_blocknum, u32 number_of_blocks,
1126 enum query_option_flags qflags,
1127 void (*cb)(struct peer *peer,
1128 u32 first_blocknum, u32 number_of_blocks,
1129 const struct range_query_reply *replies))
1130 {
1131 u8 *msg;
1132 struct tlv_query_channel_range_tlvs *tlvs;
1133
1134 assert((qflags & ~(QUERY_ADD_TIMESTAMPS|QUERY_ADD_CHECKSUMS)) == 0);
1135 assert(peer->gossip_queries_feature);
1136 assert(!peer->range_replies);
1137 assert(!peer->query_channel_range_cb);
1138
1139 if (qflags) {
1140 tlvs = tlv_query_channel_range_tlvs_new(tmpctx);
1141 tlvs->query_option = tal(tlvs, bigsize_t);
1142 *tlvs->query_option = qflags;
1143 } else
1144 tlvs = NULL;
1145 status_peer_debug(&peer->id,
1146 "sending query_channel_range for blocks %u+%u",
1147 first_blocknum, number_of_blocks);
1148
1149 msg = towire_query_channel_range(NULL, &chainparams->genesis_blockhash,
1150 first_blocknum, number_of_blocks,
1151 tlvs);
1152 queue_peer_msg(peer, take(msg));
1153 peer->range_first_blocknum = first_blocknum;
1154 peer->range_end_blocknum = first_blocknum + number_of_blocks;
1155 peer->range_blocks_outstanding = number_of_blocks;
1156 peer->range_replies = tal_arr(peer, struct range_query_reply, 0);
1157 peer->query_channel_range_cb = cb;
1158
1159 return true;
1160 }
1161
1162 #if DEVELOPER
1163 /* This is a testing hack to allow us to artificially lower the maximum bytes
1164 * of short_channel_ids we'll encode, using dev_set_max_scids_encode_size. */
dev_set_max_scids_encode_size(struct io_conn * conn,struct daemon * daemon,const u8 * msg)1165 struct io_plan *dev_set_max_scids_encode_size(struct io_conn *conn,
1166 struct daemon *daemon,
1167 const u8 *msg)
1168 {
1169 if (!fromwire_gossipd_dev_set_max_scids_encode_size(msg,
1170 &dev_max_encoding_bytes))
1171 master_badmsg(WIRE_GOSSIPD_DEV_SET_MAX_SCIDS_ENCODE_SIZE, msg);
1172
1173 status_debug("Set max_scids_encode_bytes to %u", dev_max_encoding_bytes);
1174 return daemon_conn_read_next(conn, daemon->master);
1175 }
1176 #endif /* DEVELOPER */
1177