1 /*~ Welcome to the hsm daemon: keeper of our secrets!
2 *
3 * This is a separate daemon which keeps a root secret from which all others
4 * are generated. It starts with one client: lightningd, which can ask for
5 * new sockets for other clients. Each client has a simple capability map
6 * which indicates what it's allowed to ask for. We're entirely driven
7 * by request, response.
8 */
9 #include <ccan/array_size/array_size.h>
10 #include <ccan/intmap/intmap.h>
11 #include <ccan/io/fdpass/fdpass.h>
12 #include <ccan/noerr/noerr.h>
13 #include <ccan/read_write_all/read_write_all.h>
14 #include <ccan/tal/str/str.h>
15 #include <common/daemon_conn.h>
16 #include <common/hsm_encryption.h>
17 #include <common/memleak.h>
18 #include <common/status.h>
19 #include <common/status_wiregen.h>
20 #include <common/subdaemon.h>
21 #include <common/type_to_string.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <hsmd/capabilities.h>
25 /*~ _wiregen files are autogenerated by tools/generate-wire.py */
26 #include <hsmd/libhsmd.h>
27 #include <sys/socket.h>
28 #include <sys/stat.h>
29 #include <wire/wire_io.h>
30
31 /*~ Each subdaemon is started with stdin connected to lightningd (for status
32 * messages), and stderr untouched (for emergency printing). File descriptors
33 * 3 and beyond are set up on other sockets: for hsmd, fd 3 is the request
34 * stream from lightningd. */
35 #define REQ_FD 3
36
37 #if DEVELOPER
38 /* If they specify --dev-force-privkey it ends up in here. */
39 extern struct privkey *dev_force_privkey;
40 /* If they specify --dev-force-bip32-seed it ends up in here. */
41 extern struct secret *dev_force_bip32_seed;
42 #endif
43
44 /* Temporary storage for the secret until we pass it to `hsmd_init` */
45 struct secret hsm_secret;
46
47 /*~ We keep track of clients, but there's not much to keep. */
48 struct client {
49 /* The ccan/io async io connection for this client: it closes, we die. */
50 struct io_conn *conn;
51
52 /*~ io_read_wire needs a pointer to store incoming messages until
53 * it has the complete thing; this is it. */
54 u8 *msg_in;
55
56 /*~ Useful for logging, but also used to derive the per-channel seed. */
57 struct node_id id;
58
59 /*~ This is a unique value handed to us from lightningd, used for
60 * per-channel seed generation (a single id may have multiple channels
61 * over time).
62 *
63 * It's actually zero for the initial lightningd client connection and
64 * the ones for gossipd and connectd, which don't have channels
65 * associated. */
66 u64 dbid;
67
68 /* What is this client allowed to ask for? */
69 u64 capabilities;
70
71 /* Params to apply to all transactions for this client */
72 const struct chainparams *chainparams;
73
74 /* Client context to pass over to libhsmd for its calls. */
75 struct hsmd_client *hsmd_client;
76 };
77
78 /*~ We keep a map of nonzero dbid -> clients, mainly for leak detection.
79 * This is ccan/uintmap, which maps u64 to some (non-NULL) pointer.
80 * I really dislike these kinds of declaration-via-magic macro things, as
81 * tags can't find them without special hacks, but the payoff here is that
82 * the map is typesafe: the compiler won't let you put anything in but a
83 * struct client pointer. */
84 static UINTMAP(struct client *) clients;
85 /*~ Plus the three zero-dbid clients: master, gossipd and connnectd. */
86 static struct client *dbid_zero_clients[3];
87 static size_t num_dbid_zero_clients;
88
89 /*~ We need this deep inside bad_req_fmt, and for memleak, so we make it a
90 * global. */
91 static struct daemon_conn *status_conn;
92
93 /* This is used for various assertions and error cases. */
is_lightningd(const struct client * client)94 static bool is_lightningd(const struct client *client)
95 {
96 return client == dbid_zero_clients[0];
97 }
98
99 /* FIXME: This is used by debug.c. Doesn't apply to us, but lets us link. */
100 extern void dev_disconnect_init(int fd);
dev_disconnect_init(int fd UNUSED)101 void dev_disconnect_init(int fd UNUSED) { }
102
103 /* Pre-declare this, due to mutual recursion */
104 static struct io_plan *handle_client(struct io_conn *conn, struct client *c);
105
106 /*~ ccan/compiler.h defines PRINTF_FMT as the gcc compiler hint so it will
107 * check that fmt and other trailing arguments really are the correct type.
108 *
109 * This is a convenient helper to tell lightningd we've received a bad request
110 * and closes the client connection. This should never happen, of course, but
111 * we definitely want to log if it does.
112 */
113 static struct io_plan *bad_req_fmt(struct io_conn *conn,
114 struct client *c,
115 const u8 *msg_in,
116 const char *fmt, ...)
117 PRINTF_FMT(4,5);
118
bad_req_fmt(struct io_conn * conn,struct client * c,const u8 * msg_in,const char * fmt,...)119 static struct io_plan *bad_req_fmt(struct io_conn *conn,
120 struct client *c,
121 const u8 *msg_in,
122 const char *fmt, ...)
123 {
124 va_list ap;
125 char *str;
126
127 va_start(ap, fmt);
128 str = tal_fmt(tmpctx, fmt, ap);
129 va_end(ap);
130
131 /*~ If the client was actually lightningd, it's Game Over; we actually
132 * fail in this case, and it will too. */
133 if (is_lightningd(c)) {
134 status_broken("%s", str);
135 master_badmsg(fromwire_peektype(msg_in), msg_in);
136 }
137
138 /*~ Nobody should give us bad requests; it's a sign something is broken */
139 status_broken("%s: %s", type_to_string(tmpctx, struct node_id, &c->id), str);
140
141 /*~ Note the use of NULL as the ctx arg to towire_hsmstatus_: only
142 * use NULL as the allocation when we're about to immediately free it
143 * or hand it off with take(), as here. That makes it clear we don't
144 * expect it to linger, and in fact our memleak detection will
145 * complain if it does (unlike using the deliberately-transient
146 * tmpctx). */
147 daemon_conn_send(status_conn,
148 take(towire_hsmstatus_client_bad_request(NULL,
149 &c->id,
150 str,
151 msg_in)));
152
153 /*~ The way ccan/io works is that you return the "plan" for what to do
154 * next (eg. io_read). io_close() is special: it means to close the
155 * connection. */
156 return io_close(conn);
157 }
158
159 /* Convenience wrapper for when we simply can't parse. */
bad_req(struct io_conn * conn,struct client * c,const u8 * msg_in)160 static struct io_plan *bad_req(struct io_conn *conn,
161 struct client *c,
162 const u8 *msg_in)
163 {
164 return bad_req_fmt(conn, c, msg_in, "could not parse request");
165 }
166
167 /*~ This plan simply says: read the next packet into 'c->msg_in' (parent 'c'),
168 * and then call handle_client with argument 'c' */
client_read_next(struct io_conn * conn,struct client * c)169 static struct io_plan *client_read_next(struct io_conn *conn, struct client *c)
170 {
171 return io_read_wire(conn, c, &c->msg_in, handle_client, c);
172 }
173
174 /*~ This is the destructor on our client: we may call it manually, but
175 * generally it's called because the io_conn associated with the client is
176 * closed by the other end. */
destroy_client(struct client * c)177 static void destroy_client(struct client *c)
178 {
179 if (!uintmap_del(&clients, c->dbid))
180 status_failed(STATUS_FAIL_INTERNAL_ERROR,
181 "Failed to remove client dbid %"PRIu64, c->dbid);
182 }
183
new_client(const tal_t * ctx,const struct chainparams * chainparams,const struct node_id * id,u64 dbid,const u64 capabilities,int fd)184 static struct client *new_client(const tal_t *ctx,
185 const struct chainparams *chainparams,
186 const struct node_id *id,
187 u64 dbid,
188 const u64 capabilities,
189 int fd)
190 {
191 struct client *c = tal(ctx, struct client);
192
193 /*~ All-zero pubkey is used for the initial master connection */
194 if (id) {
195 c->id = *id;
196 if (!node_id_valid(id))
197 status_failed(STATUS_FAIL_INTERNAL_ERROR,
198 "Invalid node id %s",
199 type_to_string(tmpctx, struct node_id,
200 id));
201 } else {
202 memset(&c->id, 0, sizeof(c->id));
203 }
204 c->dbid = dbid;
205
206 c->capabilities = capabilities;
207 c->chainparams = chainparams;
208
209 /*~ This is the core of ccan/io: the connection creation calls a
210 * callback which returns the initial plan to execute: in our case,
211 * read a message.*/
212 c->conn = io_new_conn(ctx, fd, client_read_next, c);
213
214 /*~ tal_steal() moves a pointer to a new parent. At this point, the
215 * hierarchy is:
216 *
217 * ctx -> c
218 * ctx -> c->conn
219 *
220 * We want to the c->conn to own 'c', so that if the io_conn closes,
221 * the client is freed:
222 *
223 * ctx -> c->conn -> c.
224 */
225 tal_steal(c->conn, c);
226
227 /* We put the special zero-db HSM connections into an array, the rest
228 * go into the map. */
229 if (dbid == 0) {
230 assert(num_dbid_zero_clients < ARRAY_SIZE(dbid_zero_clients));
231 dbid_zero_clients[num_dbid_zero_clients++] = c;
232 c->hsmd_client = hsmd_client_new_main(c, c->capabilities, c);
233 } else {
234 struct client *old_client = uintmap_get(&clients, dbid);
235
236 /* Close conn and free any old client of this dbid. */
237 if (old_client)
238 io_close(old_client->conn);
239
240 if (!uintmap_add(&clients, dbid, c))
241 status_failed(STATUS_FAIL_INTERNAL_ERROR,
242 "Failed inserting dbid %"PRIu64, dbid);
243 tal_add_destructor(c, destroy_client);
244 c->hsmd_client =
245 hsmd_client_new_peer(c, c->capabilities, dbid, id, c);
246 }
247
248 return c;
249 }
250
251 /* This is the common pattern for the tail of each handler in this file. */
req_reply(struct io_conn * conn,struct client * c,const u8 * msg_out TAKES)252 static struct io_plan *req_reply(struct io_conn *conn,
253 struct client *c,
254 const u8 *msg_out TAKES)
255 {
256 /*~ Write this out, then read the next one. This works perfectly for
257 * a simple request/response system like this.
258 *
259 * Internally, the ccan/io subsystem gathers all the file descriptors,
260 * figures out which want to write and read, asks the OS which ones
261 * are available, and for those file descriptors, tries to do the
262 * reads/writes we've asked it. It handles retry in the case where a
263 * read or write is done partially.
264 *
265 * Since the OS does buffering internally (on my system, over 100k
266 * worth) writes will normally succeed immediately. However, if the
267 * client is slow or malicious, and doesn't read from the socket as
268 * fast as we're writing, eventually the socket buffer will fill up;
269 * we don't care, because ccan/io will wait until there's room to
270 * write this reply before it will read again. The client just hurts
271 * themselves, and there's no Denial of Service on us.
272 *
273 * If we were to queue outgoing messages ourselves, we *would* have to
274 * consider such scenarios; this is why our daemons generally avoid
275 * buffering from untrusted parties. */
276 return io_write_wire(conn, msg_out, client_read_next, c);
277 }
278
279 /*~ This encrypts the content of the `struct secret hsm_secret` and
280 * stores it in hsm_secret, this is called instead of create_hsm() if
281 * `lightningd` is started with --encrypted-hsm.
282 */
create_encrypted_hsm(int fd,const struct secret * encryption_key)283 static void create_encrypted_hsm(int fd, const struct secret *encryption_key)
284 {
285 struct encrypted_hsm_secret cipher;
286
287 if (!encrypt_hsm_secret(encryption_key, &hsm_secret,
288 &cipher))
289 status_failed(STATUS_FAIL_INTERNAL_ERROR,
290 "Encrypting hsm_secret");
291 if (!write_all(fd, cipher.data, ENCRYPTED_HSM_SECRET_LEN)) {
292 unlink_noerr("hsm_secret");
293 status_failed(STATUS_FAIL_INTERNAL_ERROR,
294 "Writing encrypted hsm_secret: %s", strerror(errno));
295 }
296 }
297
create_hsm(int fd)298 static void create_hsm(int fd)
299 {
300 /*~ ccan/read_write_all has a more convenient return than write() where
301 * we'd have to check the return value == the length we gave: write()
302 * can return short on normal files if we run out of disk space. */
303 if (!write_all(fd, &hsm_secret, sizeof(hsm_secret))) {
304 /* ccan/noerr contains useful routines like this, which don't
305 * clobber errno, so we can use it in our error report. */
306 unlink_noerr("hsm_secret");
307 status_failed(STATUS_FAIL_INTERNAL_ERROR,
308 "writing: %s", strerror(errno));
309 }
310 }
311
312 /*~ We store our root secret in a "hsm_secret" file (like all of c-lightning,
313 * we run in the user's .lightning directory). */
maybe_create_new_hsm(const struct secret * encryption_key,bool random_hsm)314 static void maybe_create_new_hsm(const struct secret *encryption_key,
315 bool random_hsm)
316 {
317 /*~ Note that this is opened for write-only, even though the permissions
318 * are set to read-only. That's perfectly valid! */
319 int fd = open("hsm_secret", O_CREAT|O_EXCL|O_WRONLY, 0400);
320 if (fd < 0) {
321 /* If this is not the first time we've run, it will exist. */
322 if (errno == EEXIST)
323 return;
324 status_failed(STATUS_FAIL_INTERNAL_ERROR,
325 "creating: %s", strerror(errno));
326 }
327
328 /*~ This is libsodium's cryptographic randomness routine: we assume
329 * it's doing a good job. */
330 if (random_hsm)
331 randombytes_buf(&hsm_secret, sizeof(hsm_secret));
332
333 /*~ If an encryption_key was provided, store an encrypted seed. */
334 if (encryption_key)
335 create_encrypted_hsm(fd, encryption_key);
336 /*~ Otherwise store the seed in clear.. */
337 else
338 create_hsm(fd);
339 /*~ fsync (mostly!) ensures that the file has reached the disk. */
340 if (fsync(fd) != 0) {
341 unlink_noerr("hsm_secret");
342 status_failed(STATUS_FAIL_INTERNAL_ERROR,
343 "fsync: %s", strerror(errno));
344 }
345 /*~ This should never fail if fsync succeeded. But paranoia good, and
346 * bugs exist. */
347 if (close(fd) != 0) {
348 unlink_noerr("hsm_secret");
349 status_failed(STATUS_FAIL_INTERNAL_ERROR,
350 "closing: %s", strerror(errno));
351 }
352 /*~ We actually need to sync the *directory itself* to make sure the
353 * file exists! You're only allowed to open directories read-only in
354 * modern Unix though. */
355 fd = open(".", O_RDONLY);
356 if (fd < 0) {
357 status_failed(STATUS_FAIL_INTERNAL_ERROR,
358 "opening: %s", strerror(errno));
359 }
360 if (fsync(fd) != 0) {
361 unlink_noerr("hsm_secret");
362 status_failed(STATUS_FAIL_INTERNAL_ERROR,
363 "fsyncdir: %s", strerror(errno));
364 }
365 close(fd);
366 /*~ status_unusual() is good for things which are interesting and
367 * definitely won't spam the logs. Only status_broken() is higher;
368 * status_info() is lower, then status_debug() and finally
369 * status_io(). */
370 status_unusual("HSM: created new hsm_secret file");
371 }
372
373 /*~ We always load the HSM file, even if we just created it above. This
374 * both unifies the code paths, and provides a nice sanity check that the
375 * file contents are as they will be for future invocations. */
load_hsm(const struct secret * encryption_key)376 static void load_hsm(const struct secret *encryption_key)
377 {
378 struct stat st;
379 int fd = open("hsm_secret", O_RDONLY);
380 if (fd < 0)
381 status_failed(STATUS_FAIL_INTERNAL_ERROR,
382 "opening: %s", strerror(errno));
383 if (stat("hsm_secret", &st) != 0)
384 status_failed(STATUS_FAIL_INTERNAL_ERROR,
385 "stating: %s", strerror(errno));
386
387 /* If the seed is stored in clear. */
388 if (st.st_size == 32) {
389 if (!read_all(fd, &hsm_secret, sizeof(hsm_secret)))
390 status_failed(STATUS_FAIL_INTERNAL_ERROR,
391 "reading: %s", strerror(errno));
392 /* If an encryption key was passed with a not yet encrypted hsm_secret,
393 * remove the old one and create an encrypted one. */
394 if (encryption_key) {
395 if (close(fd) != 0)
396 status_failed(STATUS_FAIL_INTERNAL_ERROR,
397 "closing: %s", strerror(errno));
398 if (remove("hsm_secret") != 0)
399 status_failed(STATUS_FAIL_INTERNAL_ERROR,
400 "removing clear hsm_secret: %s", strerror(errno));
401 maybe_create_new_hsm(encryption_key, false);
402 fd = open("hsm_secret", O_RDONLY);
403 if (fd < 0)
404 status_failed(STATUS_FAIL_INTERNAL_ERROR,
405 "opening: %s", strerror(errno));
406 }
407 }
408 /* If an encryption key was passed and the `hsm_secret` is stored
409 * encrypted, recover the seed from the cipher. */
410 else if (st.st_size == ENCRYPTED_HSM_SECRET_LEN) {
411 struct encrypted_hsm_secret encrypted_secret;
412
413 /* hsm_control must have checked it! */
414 assert(encryption_key);
415
416 if (!read_all(fd, encrypted_secret.data, ENCRYPTED_HSM_SECRET_LEN))
417 status_failed(STATUS_FAIL_INTERNAL_ERROR,
418 "Reading encrypted hsm_secret: %s", strerror(errno));
419 if (!decrypt_hsm_secret(encryption_key, &encrypted_secret,
420 &hsm_secret)) {
421 /* Exit but don't throw a backtrace when the user made a mistake in typing
422 * its password. Instead exit and `lightningd` will be able to give
423 * an error message. */
424 exit(1);
425 }
426 }
427 else
428 status_failed(STATUS_FAIL_INTERNAL_ERROR, "Invalid hsm_secret, "
429 "no plaintext nor encrypted"
430 " seed.");
431 close(fd);
432 }
433
434 /*~ This is the response to lightningd's HSM_INIT request, which is the first
435 * thing it sends. */
init_hsm(struct io_conn * conn,struct client * c,const u8 * msg_in)436 static struct io_plan *init_hsm(struct io_conn *conn,
437 struct client *c,
438 const u8 *msg_in)
439 {
440 struct privkey *privkey;
441 struct secret *seed;
442 struct secrets *secrets;
443 struct sha256 *shaseed;
444 struct secret *hsm_encryption_key;
445 struct bip32_key_version bip32_key_version;
446
447 /* This must be lightningd. */
448 assert(is_lightningd(c));
449
450 /*~ The fromwire_* routines are autogenerated, based on the message
451 * definitions in hsm_client_wire.csv. The format of those files is
452 * an extension of the simple comma-separated format output by the
453 * BOLT tools/extract-formats.py tool. */
454 if (!fromwire_hsmd_init(NULL, msg_in, &bip32_key_version, &chainparams,
455 &hsm_encryption_key, &privkey, &seed, &secrets, &shaseed))
456 return bad_req(conn, c, msg_in);
457
458 /*~ The memory is actually copied in towire(), so lock the `hsm_secret`
459 * encryption key (new) memory again here. */
460 if (hsm_encryption_key && sodium_mlock(hsm_encryption_key,
461 sizeof(hsm_encryption_key)) != 0)
462 status_failed(STATUS_FAIL_INTERNAL_ERROR,
463 "Could not lock memory for hsm_secret encryption key.");
464 /*~ Don't swap this. */
465 sodium_mlock(hsm_secret.data, sizeof(hsm_secret.data));
466
467 #if DEVELOPER
468 dev_force_privkey = privkey;
469 dev_force_bip32_seed = seed;
470 dev_force_channel_secrets = secrets;
471 dev_force_channel_secrets_shaseed = shaseed;
472 #endif
473
474 /* Once we have read the init message we know which params the master
475 * will use */
476 c->chainparams = chainparams;
477 maybe_create_new_hsm(hsm_encryption_key, true);
478 load_hsm(hsm_encryption_key);
479
480 /*~ We don't need the hsm_secret encryption key anymore. */
481 if (hsm_encryption_key)
482 discard_key(take(hsm_encryption_key));
483
484 return req_reply(conn, c, hsmd_init(hsm_secret, bip32_key_version));
485 }
486
487 /*~ Since we process requests then service them in strict order, and because
488 * only lightningd can request a new client fd, we can get away with a global
489 * here! But because we are being tricky, I set it to an invalid value when
490 * not in use, and sprinkle assertions around. */
491 static int pending_client_fd = -1;
492
493 /*~ This is the callback from below: having sent the reply, we now send the
494 * fd for the client end of the new socketpair. */
send_pending_client_fd(struct io_conn * conn,struct client * master)495 static struct io_plan *send_pending_client_fd(struct io_conn *conn,
496 struct client *master)
497 {
498 int fd = pending_client_fd;
499 /* This must be the master. */
500 assert(is_lightningd(master));
501 assert(fd != -1);
502
503 /* This sanity check shouldn't be necessary, but it's cheap. */
504 pending_client_fd = -1;
505
506 /*~There's arcane UNIX magic to send an open file descriptor over a
507 * UNIX domain socket. There's no great way to autogenerate this
508 * though; especially for the receive side, so we always pass these
509 * manually immediately following the message.
510 *
511 * io_send_fd()'s third parameter is whether to close the local one
512 * after sending; that saves us YA callback.
513 */
514 return io_send_fd(conn, fd, true, client_read_next, master);
515 }
516
517 /*~ This is used by the master to create a new client connection (which
518 * becomes the HSM_FD for the subdaemon after forking). */
pass_client_hsmfd(struct io_conn * conn,struct client * c,const u8 * msg_in)519 static struct io_plan *pass_client_hsmfd(struct io_conn *conn,
520 struct client *c,
521 const u8 *msg_in)
522 {
523 int fds[2];
524 u64 dbid, capabilities;
525 struct node_id id;
526
527 /* This must be lightningd itself. */
528 assert(is_lightningd(c));
529
530 if (!fromwire_hsmd_client_hsmfd(msg_in, &id, &dbid, &capabilities))
531 return bad_req(conn, c, msg_in);
532
533 /* socketpair is a bi-directional pipe, which is what we want. */
534 if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) != 0)
535 status_failed(STATUS_FAIL_INTERNAL_ERROR, "creating fds: %s",
536 strerror(errno));
537
538 status_debug("new_client: %"PRIu64, dbid);
539 new_client(c, c->chainparams, &id, dbid, capabilities, fds[0]);
540
541 /*~ We stash this in a global, because we need to get both the fd and
542 * the client pointer to the callback. The other way would be to
543 * create a boutique structure and hand that, but we don't need to. */
544 pending_client_fd = fds[1];
545 return io_write_wire(conn, take(towire_hsmd_client_hsmfd_reply(NULL)),
546 send_pending_client_fd, c);
547 }
548
549 #if DEVELOPER
handle_memleak(struct io_conn * conn,struct client * c,const u8 * msg_in)550 static struct io_plan *handle_memleak(struct io_conn *conn,
551 struct client *c,
552 const u8 *msg_in)
553 {
554 struct htable *memtable;
555 bool found_leak;
556 u8 *reply;
557
558 memtable = memleak_find_allocations(tmpctx, msg_in, msg_in);
559
560 /* Now delete clients and anything they point to. */
561 memleak_remove_region(memtable, c, tal_bytelen(c));
562 memleak_remove_region(memtable,
563 dbid_zero_clients, sizeof(dbid_zero_clients));
564 memleak_remove_uintmap(memtable, &clients);
565 memleak_remove_region(memtable,
566 status_conn, tal_bytelen(status_conn));
567
568 memleak_remove_pointer(memtable, dev_force_privkey);
569 memleak_remove_pointer(memtable, dev_force_bip32_seed);
570
571 found_leak = dump_memleak(memtable, memleak_status_broken);
572 reply = towire_hsmd_dev_memleak_reply(NULL, found_leak);
573 return req_reply(conn, c, take(reply));
574 }
575 #endif /* DEVELOPER */
576
hsmd_status_bad_request(struct hsmd_client * client,const u8 * msg,const char * error)577 u8 *hsmd_status_bad_request(struct hsmd_client *client, const u8 *msg, const char *error)
578 {
579 /* Extract the pointer to the hsmd representation of the
580 * client which has access to the underlying connection. */
581 struct client *c = (struct client*)client->extra;
582 bad_req_fmt(c->conn, c, msg, "%s", error);
583
584 /* We often use `return hsmd_status_bad_request` to drop out, and NULL
585 * means we encountered an error. */
586 return NULL;
587 }
588
hsmd_status_fmt(enum log_level level,const struct node_id * peer,const char * fmt,...)589 void hsmd_status_fmt(enum log_level level, const struct node_id *peer,
590 const char *fmt, ...)
591 {
592 va_list ap;
593
594 va_start(ap, fmt);
595 status_vfmt(level, peer, fmt, ap);
596 va_end(ap);
597 }
598
hsmd_status_failed(enum status_failreason reason,const char * fmt,...)599 void hsmd_status_failed(enum status_failreason reason, const char *fmt, ...)
600 {
601 va_list ap;
602 char *str;
603
604 va_start(ap, fmt);
605 str = tal_vfmt(NULL, fmt, ap);
606 va_end(ap);
607
608 /* Give a nice backtrace when this happens! */
609 if (reason == STATUS_FAIL_INTERNAL_ERROR)
610 send_backtrace(str);
611
612 status_send_fatal(take(towire_status_fail(NULL, reason, str)));
613 }
614
615 /*~ This is the core of the HSM daemon: handling requests. */
handle_client(struct io_conn * conn,struct client * c)616 static struct io_plan *handle_client(struct io_conn *conn, struct client *c)
617 {
618 enum hsmd_wire t = fromwire_peektype(c->msg_in);
619
620 /* Before we do anything else, is this client allowed to do
621 * what he asks for? */
622 if (!hsmd_check_client_capabilities(c->hsmd_client, t))
623 return bad_req_fmt(conn, c, c->msg_in,
624 "client does not have capability to run %d",
625 t);
626
627 /* Now actually go and do what the client asked for */
628 switch (t) {
629 case WIRE_HSMD_INIT:
630 return init_hsm(conn, c, c->msg_in);
631
632 case WIRE_HSMD_CLIENT_HSMFD:
633 return pass_client_hsmfd(conn, c, c->msg_in);
634
635 #if DEVELOPER
636 case WIRE_HSMD_DEV_MEMLEAK:
637 return handle_memleak(conn, c, c->msg_in);
638 #else
639 case WIRE_HSMD_DEV_MEMLEAK:
640 #endif /* DEVELOPER */
641
642 case WIRE_HSMD_SIGN_COMMITMENT_TX:
643 case WIRE_HSMD_SIGN_PENALTY_TO_US:
644 case WIRE_HSMD_SIGN_REMOTE_COMMITMENT_TX:
645 case WIRE_HSMD_SIGN_REMOTE_HTLC_TX:
646 case WIRE_HSMD_SIGN_MUTUAL_CLOSE_TX:
647 case WIRE_HSMD_GET_PER_COMMITMENT_POINT:
648 case WIRE_HSMD_SIGN_WITHDRAWAL:
649 case WIRE_HSMD_GET_CHANNEL_BASEPOINTS:
650 case WIRE_HSMD_SIGN_INVOICE:
651 case WIRE_HSMD_SIGN_MESSAGE:
652 case WIRE_HSMD_SIGN_OPTION_WILL_FUND_OFFER:
653 case WIRE_HSMD_SIGN_BOLT12:
654 case WIRE_HSMD_ECDH_REQ:
655 case WIRE_HSMD_CHECK_FUTURE_SECRET:
656 case WIRE_HSMD_GET_OUTPUT_SCRIPTPUBKEY:
657 case WIRE_HSMD_CANNOUNCEMENT_SIG_REQ:
658 case WIRE_HSMD_NODE_ANNOUNCEMENT_SIG_REQ:
659 case WIRE_HSMD_CUPDATE_SIG_REQ:
660 case WIRE_HSMD_SIGN_LOCAL_HTLC_TX:
661 case WIRE_HSMD_SIGN_REMOTE_HTLC_TO_US:
662 case WIRE_HSMD_SIGN_DELAYED_PAYMENT_TO_US:
663 /* Hand off to libhsmd for processing */
664 return req_reply(conn, c,
665 take(hsmd_handle_client_message(
666 tmpctx, c->hsmd_client, c->msg_in)));
667
668 case WIRE_HSMD_ECDH_RESP:
669 case WIRE_HSMD_CANNOUNCEMENT_SIG_REPLY:
670 case WIRE_HSMD_CUPDATE_SIG_REPLY:
671 case WIRE_HSMD_CLIENT_HSMFD_REPLY:
672 case WIRE_HSMD_NODE_ANNOUNCEMENT_SIG_REPLY:
673 case WIRE_HSMD_SIGN_WITHDRAWAL_REPLY:
674 case WIRE_HSMD_SIGN_INVOICE_REPLY:
675 case WIRE_HSMD_INIT_REPLY:
676 case WIRE_HSMSTATUS_CLIENT_BAD_REQUEST:
677 case WIRE_HSMD_SIGN_COMMITMENT_TX_REPLY:
678 case WIRE_HSMD_SIGN_TX_REPLY:
679 case WIRE_HSMD_SIGN_OPTION_WILL_FUND_OFFER_REPLY:
680 case WIRE_HSMD_GET_PER_COMMITMENT_POINT_REPLY:
681 case WIRE_HSMD_CHECK_FUTURE_SECRET_REPLY:
682 case WIRE_HSMD_GET_CHANNEL_BASEPOINTS_REPLY:
683 case WIRE_HSMD_DEV_MEMLEAK_REPLY:
684 case WIRE_HSMD_SIGN_MESSAGE_REPLY:
685 case WIRE_HSMD_GET_OUTPUT_SCRIPTPUBKEY_REPLY:
686 case WIRE_HSMD_SIGN_BOLT12_REPLY:
687 return bad_req_fmt(conn, c, c->msg_in,
688 "Received an incoming message of type %s, "
689 "which is not a request",
690 hsmd_wire_name(t));
691 }
692
693 return bad_req_fmt(conn, c, c->msg_in, "Unknown request");
694 }
695
master_gone(struct io_conn * unused UNUSED,struct client * c UNUSED)696 static void master_gone(struct io_conn *unused UNUSED, struct client *c UNUSED)
697 {
698 daemon_shutdown();
699 /* Can't tell master, it's gone. */
700 exit(2);
701 }
702
main(int argc,char * argv[])703 int main(int argc, char *argv[])
704 {
705 struct client *master;
706
707 setup_locale();
708
709 /* This sets up tmpctx, various DEVELOPER options, backtraces, etc. */
710 subdaemon_setup(argc, argv);
711
712 /* A trivial daemon_conn just for writing. */
713 status_conn = daemon_conn_new(NULL, STDIN_FILENO, NULL, NULL, NULL);
714 status_setup_async(status_conn);
715 uintmap_init(&clients);
716
717 master = new_client(NULL, NULL, NULL, 0,
718 HSM_CAP_MASTER | HSM_CAP_SIGN_GOSSIP | HSM_CAP_ECDH,
719 REQ_FD);
720
721 /* First client == lightningd. */
722 assert(is_lightningd(master));
723
724 /* When conn closes, everything is freed. */
725 io_set_finish(master->conn, master_gone, master);
726
727 /*~ The two NULL args are a list of timers, and the timer which expired:
728 * we don't have any timers. */
729 io_loop(NULL, NULL);
730
731 /*~ This should never be reached: io_loop only exits on io_break which
732 * we don't call, a timer expiry which we don't have, or all connections
733 * being closed, and closing the master calls master_gone. */
734 abort();
735 }
736
737 /*~ Congratulations on making it through the first of the seven dwarves!
738 * (And Christian wondered why I'm so fond of having separate daemons!).
739 *
740 * We continue our story in the next-more-complex daemon: connectd/connectd.c
741 */
742