1 /* $OpenBSD: queue.c,v 1.194 2021/08/02 12:33:34 eric Exp $ */ 2 3 /* 4 * Copyright (c) 2008 Gilles Chehade <gilles@poolp.org> 5 * Copyright (c) 2008 Pierre-Yves Ritschard <pyr@openbsd.org> 6 * Copyright (c) 2012 Eric Faurot <eric@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <inttypes.h> 22 #include <pwd.h> 23 #include <signal.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <unistd.h> 27 28 #include "smtpd.h" 29 #include "log.h" 30 31 static void queue_imsg(struct mproc *, struct imsg *); 32 static void queue_timeout(int, short, void *); 33 static void queue_bounce(struct envelope *, struct delivery_bounce *); 34 static void queue_shutdown(void); 35 static void queue_log(const struct envelope *, const char *, const char *); 36 static void queue_msgid_walk(int, short, void *); 37 38 39 static void 40 queue_imsg(struct mproc *p, struct imsg *imsg) 41 { 42 struct delivery_bounce bounce; 43 struct msg_walkinfo *wi; 44 struct timeval tv; 45 struct bounce_req_msg *req_bounce; 46 struct envelope evp; 47 struct msg m; 48 const char *reason; 49 uint64_t reqid, evpid, holdq; 50 uint32_t msgid; 51 time_t nexttry; 52 size_t n_evp; 53 int fd, mta_ext, ret, v, flags, code; 54 55 if (imsg == NULL) 56 queue_shutdown(); 57 58 memset(&bounce, 0, sizeof(struct delivery_bounce)); 59 60 switch (imsg->hdr.type) { 61 case IMSG_SMTP_MESSAGE_CREATE: 62 m_msg(&m, imsg); 63 m_get_id(&m, &reqid); 64 m_end(&m); 65 66 ret = queue_message_create(&msgid); 67 68 m_create(p, IMSG_SMTP_MESSAGE_CREATE, 0, 0, -1); 69 m_add_id(p, reqid); 70 if (ret == 0) 71 m_add_int(p, 0); 72 else { 73 m_add_int(p, 1); 74 m_add_msgid(p, msgid); 75 } 76 m_close(p); 77 return; 78 79 case IMSG_SMTP_MESSAGE_ROLLBACK: 80 m_msg(&m, imsg); 81 m_get_msgid(&m, &msgid); 82 m_end(&m); 83 84 queue_message_delete(msgid); 85 86 m_create(p_scheduler, IMSG_QUEUE_MESSAGE_ROLLBACK, 87 0, 0, -1); 88 m_add_msgid(p_scheduler, msgid); 89 m_close(p_scheduler); 90 return; 91 92 case IMSG_SMTP_MESSAGE_COMMIT: 93 m_msg(&m, imsg); 94 m_get_id(&m, &reqid); 95 m_get_msgid(&m, &msgid); 96 m_end(&m); 97 98 ret = queue_message_commit(msgid); 99 100 m_create(p, IMSG_SMTP_MESSAGE_COMMIT, 0, 0, -1); 101 m_add_id(p, reqid); 102 m_add_int(p, (ret == 0) ? 0 : 1); 103 m_close(p); 104 105 if (ret) { 106 m_create(p_scheduler, IMSG_QUEUE_MESSAGE_COMMIT, 107 0, 0, -1); 108 m_add_msgid(p_scheduler, msgid); 109 m_close(p_scheduler); 110 } 111 return; 112 113 case IMSG_SMTP_MESSAGE_OPEN: 114 m_msg(&m, imsg); 115 m_get_id(&m, &reqid); 116 m_get_msgid(&m, &msgid); 117 m_end(&m); 118 119 fd = queue_message_fd_rw(msgid); 120 121 m_create(p, IMSG_SMTP_MESSAGE_OPEN, 0, 0, fd); 122 m_add_id(p, reqid); 123 m_add_int(p, (fd == -1) ? 0 : 1); 124 m_close(p); 125 return; 126 127 case IMSG_QUEUE_SMTP_SESSION: 128 bounce_fd(imsg->fd); 129 return; 130 131 case IMSG_LKA_ENVELOPE_SUBMIT: 132 m_msg(&m, imsg); 133 m_get_id(&m, &reqid); 134 m_get_envelope(&m, &evp); 135 m_end(&m); 136 137 if (evp.id == 0) 138 log_warnx("warn: imsg_queue_submit_envelope: evpid=0"); 139 if (evpid_to_msgid(evp.id) == 0) 140 log_warnx("warn: imsg_queue_submit_envelope: msgid=0, " 141 "evpid=%016"PRIx64, evp.id); 142 ret = queue_envelope_create(&evp); 143 m_create(p_dispatcher, IMSG_QUEUE_ENVELOPE_SUBMIT, 0, 0, -1); 144 m_add_id(p_dispatcher, reqid); 145 if (ret == 0) 146 m_add_int(p_dispatcher, 0); 147 else { 148 m_add_int(p_dispatcher, 1); 149 m_add_evpid(p_dispatcher, evp.id); 150 } 151 m_close(p_dispatcher); 152 if (ret) { 153 m_create(p_scheduler, 154 IMSG_QUEUE_ENVELOPE_SUBMIT, 0, 0, -1); 155 m_add_envelope(p_scheduler, &evp); 156 m_close(p_scheduler); 157 } 158 return; 159 160 case IMSG_LKA_ENVELOPE_COMMIT: 161 m_msg(&m, imsg); 162 m_get_id(&m, &reqid); 163 m_end(&m); 164 m_create(p_dispatcher, IMSG_QUEUE_ENVELOPE_COMMIT, 0, 0, -1); 165 m_add_id(p_dispatcher, reqid); 166 m_add_int(p_dispatcher, 1); 167 m_close(p_dispatcher); 168 return; 169 170 case IMSG_SCHED_ENVELOPE_REMOVE: 171 m_msg(&m, imsg); 172 m_get_evpid(&m, &evpid); 173 m_end(&m); 174 175 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_ACK, 0, 0, -1); 176 m_add_evpid(p_scheduler, evpid); 177 m_close(p_scheduler); 178 179 /* already removed by scheduler */ 180 if (queue_envelope_load(evpid, &evp) == 0) 181 return; 182 183 queue_log(&evp, "Remove", "Removed by administrator"); 184 queue_envelope_delete(evpid); 185 return; 186 187 case IMSG_SCHED_ENVELOPE_EXPIRE: 188 m_msg(&m, imsg); 189 m_get_evpid(&m, &evpid); 190 m_end(&m); 191 192 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_ACK, 0, 0, -1); 193 m_add_evpid(p_scheduler, evpid); 194 m_close(p_scheduler); 195 196 /* already removed by scheduler*/ 197 if (queue_envelope_load(evpid, &evp) == 0) 198 return; 199 200 bounce.type = B_FAILED; 201 envelope_set_errormsg(&evp, "Envelope expired"); 202 envelope_set_esc_class(&evp, ESC_STATUS_PERMFAIL); 203 envelope_set_esc_code(&evp, ESC_DELIVERY_TIME_EXPIRED); 204 queue_bounce(&evp, &bounce); 205 queue_log(&evp, "Expire", "Envelope expired"); 206 queue_envelope_delete(evpid); 207 return; 208 209 case IMSG_SCHED_ENVELOPE_BOUNCE: 210 CHECK_IMSG_DATA_SIZE(imsg, sizeof *req_bounce); 211 req_bounce = imsg->data; 212 evpid = req_bounce->evpid; 213 214 if (queue_envelope_load(evpid, &evp) == 0) { 215 log_warnx("queue: bounce: failed to load envelope"); 216 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 217 m_add_evpid(p_scheduler, evpid); 218 m_add_u32(p_scheduler, 0); /* not in-flight */ 219 m_close(p_scheduler); 220 return; 221 } 222 queue_bounce(&evp, &req_bounce->bounce); 223 evp.lastbounce = req_bounce->timestamp; 224 if (!queue_envelope_update(&evp)) 225 log_warnx("warn: could not update envelope %016"PRIx64, evpid); 226 return; 227 228 case IMSG_SCHED_ENVELOPE_DELIVER: 229 m_msg(&m, imsg); 230 m_get_evpid(&m, &evpid); 231 m_end(&m); 232 if (queue_envelope_load(evpid, &evp) == 0) { 233 log_warnx("queue: deliver: failed to load envelope"); 234 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 235 m_add_evpid(p_scheduler, evpid); 236 m_add_u32(p_scheduler, 1); /* in-flight */ 237 m_close(p_scheduler); 238 return; 239 } 240 evp.lasttry = time(NULL); 241 m_create(p_dispatcher, IMSG_QUEUE_DELIVER, 0, 0, -1); 242 m_add_envelope(p_dispatcher, &evp); 243 m_close(p_dispatcher); 244 return; 245 246 case IMSG_SCHED_ENVELOPE_INJECT: 247 m_msg(&m, imsg); 248 m_get_evpid(&m, &evpid); 249 m_end(&m); 250 bounce_add(evpid); 251 return; 252 253 case IMSG_SCHED_ENVELOPE_TRANSFER: 254 m_msg(&m, imsg); 255 m_get_evpid(&m, &evpid); 256 m_end(&m); 257 if (queue_envelope_load(evpid, &evp) == 0) { 258 log_warnx("queue: failed to load envelope"); 259 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 260 m_add_evpid(p_scheduler, evpid); 261 m_add_u32(p_scheduler, 1); /* in-flight */ 262 m_close(p_scheduler); 263 return; 264 } 265 evp.lasttry = time(NULL); 266 m_create(p_dispatcher, IMSG_QUEUE_TRANSFER, 0, 0, -1); 267 m_add_envelope(p_dispatcher, &evp); 268 m_close(p_dispatcher); 269 return; 270 271 case IMSG_CTL_LIST_ENVELOPES: 272 if (imsg->hdr.len == sizeof imsg->hdr) { 273 m_forward(p_control, imsg); 274 return; 275 } 276 277 m_msg(&m, imsg); 278 m_get_evpid(&m, &evpid); 279 m_get_int(&m, &flags); 280 m_get_time(&m, &nexttry); 281 m_end(&m); 282 283 if (queue_envelope_load(evpid, &evp) == 0) 284 return; /* Envelope is gone, drop it */ 285 286 /* 287 * XXX consistency: The envelope might already be on 288 * its way back to the scheduler. We need to detect 289 * this properly and report that state. 290 */ 291 if (flags & EF_INFLIGHT) { 292 /* 293 * Not exactly correct but pretty close: The 294 * value is not recorded on the envelope unless 295 * a tempfail occurs. 296 */ 297 evp.lasttry = nexttry; 298 } 299 300 m_create(p_control, IMSG_CTL_LIST_ENVELOPES, 301 imsg->hdr.peerid, 0, -1); 302 m_add_int(p_control, flags); 303 m_add_time(p_control, nexttry); 304 m_add_envelope(p_control, &evp); 305 m_close(p_control); 306 return; 307 308 case IMSG_MDA_OPEN_MESSAGE: 309 case IMSG_MTA_OPEN_MESSAGE: 310 m_msg(&m, imsg); 311 m_get_id(&m, &reqid); 312 m_get_msgid(&m, &msgid); 313 m_end(&m); 314 fd = queue_message_fd_r(msgid); 315 m_create(p, imsg->hdr.type, 0, 0, fd); 316 m_add_id(p, reqid); 317 m_close(p); 318 return; 319 320 case IMSG_MDA_DELIVERY_OK: 321 case IMSG_MTA_DELIVERY_OK: 322 m_msg(&m, imsg); 323 m_get_evpid(&m, &evpid); 324 if (imsg->hdr.type == IMSG_MTA_DELIVERY_OK) 325 m_get_int(&m, &mta_ext); 326 m_end(&m); 327 if (queue_envelope_load(evpid, &evp) == 0) { 328 log_warn("queue: dsn: failed to load envelope"); 329 return; 330 } 331 if (evp.dsn_notify & DSN_SUCCESS) { 332 bounce.type = B_DELIVERED; 333 bounce.dsn_ret = evp.dsn_ret; 334 envelope_set_esc_class(&evp, ESC_STATUS_OK); 335 if (imsg->hdr.type == IMSG_MDA_DELIVERY_OK) 336 queue_bounce(&evp, &bounce); 337 else if (imsg->hdr.type == IMSG_MTA_DELIVERY_OK && 338 (mta_ext & MTA_EXT_DSN) == 0) { 339 bounce.mta_without_dsn = 1; 340 queue_bounce(&evp, &bounce); 341 } 342 } 343 queue_envelope_delete(evpid); 344 m_create(p_scheduler, IMSG_QUEUE_DELIVERY_OK, 0, 0, -1); 345 m_add_evpid(p_scheduler, evpid); 346 m_close(p_scheduler); 347 return; 348 349 case IMSG_MDA_DELIVERY_TEMPFAIL: 350 case IMSG_MTA_DELIVERY_TEMPFAIL: 351 m_msg(&m, imsg); 352 m_get_evpid(&m, &evpid); 353 m_get_string(&m, &reason); 354 m_get_int(&m, &code); 355 m_end(&m); 356 if (queue_envelope_load(evpid, &evp) == 0) { 357 log_warnx("queue: tempfail: failed to load envelope"); 358 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 359 m_add_evpid(p_scheduler, evpid); 360 m_add_u32(p_scheduler, 1); /* in-flight */ 361 m_close(p_scheduler); 362 return; 363 } 364 envelope_set_errormsg(&evp, "%s", reason); 365 envelope_set_esc_class(&evp, ESC_STATUS_TEMPFAIL); 366 envelope_set_esc_code(&evp, code); 367 evp.retry++; 368 if (!queue_envelope_update(&evp)) 369 log_warnx("warn: could not update envelope %016"PRIx64, evpid); 370 m_create(p_scheduler, IMSG_QUEUE_DELIVERY_TEMPFAIL, 0, 0, -1); 371 m_add_envelope(p_scheduler, &evp); 372 m_close(p_scheduler); 373 return; 374 375 case IMSG_MDA_DELIVERY_PERMFAIL: 376 case IMSG_MTA_DELIVERY_PERMFAIL: 377 m_msg(&m, imsg); 378 m_get_evpid(&m, &evpid); 379 m_get_string(&m, &reason); 380 m_get_int(&m, &code); 381 m_end(&m); 382 if (queue_envelope_load(evpid, &evp) == 0) { 383 log_warnx("queue: permfail: failed to load envelope"); 384 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 385 m_add_evpid(p_scheduler, evpid); 386 m_add_u32(p_scheduler, 1); /* in-flight */ 387 m_close(p_scheduler); 388 return; 389 } 390 bounce.type = B_FAILED; 391 envelope_set_errormsg(&evp, "%s", reason); 392 envelope_set_esc_class(&evp, ESC_STATUS_PERMFAIL); 393 envelope_set_esc_code(&evp, code); 394 queue_bounce(&evp, &bounce); 395 queue_envelope_delete(evpid); 396 m_create(p_scheduler, IMSG_QUEUE_DELIVERY_PERMFAIL, 0, 0, -1); 397 m_add_evpid(p_scheduler, evpid); 398 m_close(p_scheduler); 399 return; 400 401 case IMSG_MDA_DELIVERY_LOOP: 402 case IMSG_MTA_DELIVERY_LOOP: 403 m_msg(&m, imsg); 404 m_get_evpid(&m, &evpid); 405 m_end(&m); 406 if (queue_envelope_load(evpid, &evp) == 0) { 407 log_warnx("queue: loop: failed to load envelope"); 408 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_REMOVE, 0, 0, -1); 409 m_add_evpid(p_scheduler, evpid); 410 m_add_u32(p_scheduler, 1); /* in-flight */ 411 m_close(p_scheduler); 412 return; 413 } 414 envelope_set_errormsg(&evp, "%s", "Loop detected"); 415 envelope_set_esc_class(&evp, ESC_STATUS_TEMPFAIL); 416 envelope_set_esc_code(&evp, ESC_ROUTING_LOOP_DETECTED); 417 bounce.type = B_FAILED; 418 queue_bounce(&evp, &bounce); 419 queue_envelope_delete(evp.id); 420 m_create(p_scheduler, IMSG_QUEUE_DELIVERY_LOOP, 0, 0, -1); 421 m_add_evpid(p_scheduler, evp.id); 422 m_close(p_scheduler); 423 return; 424 425 case IMSG_MTA_DELIVERY_HOLD: 426 case IMSG_MDA_DELIVERY_HOLD: 427 imsg->hdr.type = IMSG_QUEUE_HOLDQ_HOLD; 428 m_forward(p_scheduler, imsg); 429 return; 430 431 case IMSG_MTA_SCHEDULE: 432 imsg->hdr.type = IMSG_QUEUE_ENVELOPE_SCHEDULE; 433 m_forward(p_scheduler, imsg); 434 return; 435 436 case IMSG_MTA_HOLDQ_RELEASE: 437 case IMSG_MDA_HOLDQ_RELEASE: 438 m_msg(&m, imsg); 439 m_get_id(&m, &holdq); 440 m_get_int(&m, &v); 441 m_end(&m); 442 m_create(p_scheduler, IMSG_QUEUE_HOLDQ_RELEASE, 0, 0, -1); 443 if (imsg->hdr.type == IMSG_MTA_HOLDQ_RELEASE) 444 m_add_int(p_scheduler, D_MTA); 445 else 446 m_add_int(p_scheduler, D_MDA); 447 m_add_id(p_scheduler, holdq); 448 m_add_int(p_scheduler, v); 449 m_close(p_scheduler); 450 return; 451 452 case IMSG_CTL_PAUSE_MDA: 453 case IMSG_CTL_PAUSE_MTA: 454 case IMSG_CTL_RESUME_MDA: 455 case IMSG_CTL_RESUME_MTA: 456 m_forward(p_scheduler, imsg); 457 return; 458 459 case IMSG_CTL_VERBOSE: 460 m_msg(&m, imsg); 461 m_get_int(&m, &v); 462 m_end(&m); 463 log_trace_verbose(v); 464 return; 465 466 case IMSG_CTL_PROFILE: 467 m_msg(&m, imsg); 468 m_get_int(&m, &v); 469 m_end(&m); 470 profiling = v; 471 return; 472 473 case IMSG_CTL_DISCOVER_EVPID: 474 m_msg(&m, imsg); 475 m_get_evpid(&m, &evpid); 476 m_end(&m); 477 if (queue_envelope_load(evpid, &evp) == 0) { 478 log_warnx("queue: discover: failed to load " 479 "envelope %016" PRIx64, evpid); 480 n_evp = 0; 481 m_compose(p_control, imsg->hdr.type, 482 imsg->hdr.peerid, 0, -1, 483 &n_evp, sizeof n_evp); 484 return; 485 } 486 487 m_create(p_scheduler, IMSG_QUEUE_DISCOVER_EVPID, 488 0, 0, -1); 489 m_add_envelope(p_scheduler, &evp); 490 m_close(p_scheduler); 491 492 m_create(p_scheduler, IMSG_QUEUE_DISCOVER_MSGID, 493 0, 0, -1); 494 m_add_msgid(p_scheduler, evpid_to_msgid(evpid)); 495 m_close(p_scheduler); 496 n_evp = 1; 497 m_compose(p_control, imsg->hdr.type, imsg->hdr.peerid, 498 0, -1, &n_evp, sizeof n_evp); 499 return; 500 501 case IMSG_CTL_DISCOVER_MSGID: 502 m_msg(&m, imsg); 503 m_get_msgid(&m, &msgid); 504 m_end(&m); 505 /* handle concurrent walk requests */ 506 wi = xcalloc(1, sizeof *wi); 507 wi->msgid = msgid; 508 wi->peerid = imsg->hdr.peerid; 509 evtimer_set(&wi->ev, queue_msgid_walk, wi); 510 tv.tv_sec = 0; 511 tv.tv_usec = 10; 512 evtimer_add(&wi->ev, &tv); 513 return; 514 } 515 516 fatalx("queue_imsg: unexpected %s imsg", imsg_to_str(imsg->hdr.type)); 517 } 518 519 static void 520 queue_msgid_walk(int fd, short event, void *arg) 521 { 522 struct envelope evp; 523 struct timeval tv; 524 struct msg_walkinfo *wi = arg; 525 int r; 526 527 r = queue_message_walk(&evp, wi->msgid, &wi->done, &wi->data); 528 if (r == -1) { 529 if (wi->n_evp) { 530 m_create(p_scheduler, IMSG_QUEUE_DISCOVER_MSGID, 531 0, 0, -1); 532 m_add_msgid(p_scheduler, wi->msgid); 533 m_close(p_scheduler); 534 } 535 536 m_compose(p_control, IMSG_CTL_DISCOVER_MSGID, wi->peerid, 0, -1, 537 &wi->n_evp, sizeof wi->n_evp); 538 evtimer_del(&wi->ev); 539 free(wi); 540 return; 541 } 542 543 if (r) { 544 m_create(p_scheduler, IMSG_QUEUE_DISCOVER_EVPID, 0, 0, -1); 545 m_add_envelope(p_scheduler, &evp); 546 m_close(p_scheduler); 547 wi->n_evp += 1; 548 } 549 550 tv.tv_sec = 0; 551 tv.tv_usec = 10; 552 evtimer_set(&wi->ev, queue_msgid_walk, wi); 553 evtimer_add(&wi->ev, &tv); 554 } 555 556 static void 557 queue_bounce(struct envelope *e, struct delivery_bounce *d) 558 { 559 struct envelope b; 560 561 b = *e; 562 b.type = D_BOUNCE; 563 b.agent.bounce = *d; 564 b.retry = 0; 565 b.lasttry = 0; 566 b.creation = time(NULL); 567 b.ttl = 3600 * 24 * 7; 568 569 if (e->dsn_notify & DSN_NEVER) 570 return; 571 572 if (b.id == 0) 573 log_warnx("warn: queue_bounce: evpid=0"); 574 if (evpid_to_msgid(b.id) == 0) 575 log_warnx("warn: queue_bounce: msgid=0, evpid=%016"PRIx64, 576 b.id); 577 if (e->type == D_BOUNCE) { 578 log_warnx("warn: queue: double bounce!"); 579 } else if (e->sender.user[0] == '\0') { 580 log_warnx("warn: queue: no return path!"); 581 } else if (!queue_envelope_create(&b)) { 582 log_warnx("warn: queue: cannot bounce!"); 583 } else { 584 log_debug("debug: queue: bouncing evp:%016" PRIx64 585 " as evp:%016" PRIx64, e->id, b.id); 586 587 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_SUBMIT, 0, 0, -1); 588 m_add_envelope(p_scheduler, &b); 589 m_close(p_scheduler); 590 591 m_create(p_scheduler, IMSG_QUEUE_MESSAGE_COMMIT, 0, 0, -1); 592 m_add_msgid(p_scheduler, evpid_to_msgid(b.id)); 593 m_close(p_scheduler); 594 595 stat_increment("queue.bounce", 1); 596 } 597 } 598 599 static void 600 queue_shutdown(void) 601 { 602 log_debug("debug: queue agent exiting"); 603 queue_close(); 604 _exit(0); 605 } 606 607 int 608 queue(void) 609 { 610 struct passwd *pw; 611 struct timeval tv; 612 struct event ev_qload; 613 614 purge_config(PURGE_EVERYTHING & ~PURGE_DISPATCHERS); 615 616 if ((pw = getpwnam(SMTPD_QUEUE_USER)) == NULL) 617 if ((pw = getpwnam(SMTPD_USER)) == NULL) 618 fatalx("unknown user " SMTPD_USER); 619 620 env->sc_queue_flags |= QUEUE_EVPCACHE; 621 env->sc_queue_evpcache_size = 1024; 622 623 if (chroot(PATH_SPOOL) == -1) 624 fatal("queue: chroot"); 625 if (chdir("/") == -1) 626 fatal("queue: chdir(\"/\")"); 627 628 config_process(PROC_QUEUE); 629 630 if (env->sc_queue_flags & QUEUE_COMPRESSION) 631 log_info("queue: queue compression enabled"); 632 633 if (env->sc_queue_key) { 634 if (!crypto_setup(env->sc_queue_key, strlen(env->sc_queue_key))) 635 fatalx("crypto_setup: invalid key for queue encryption"); 636 log_info("queue: queue encryption enabled"); 637 } 638 639 if (setgroups(1, &pw->pw_gid) || 640 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 641 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 642 fatal("queue: cannot drop privileges"); 643 644 imsg_callback = queue_imsg; 645 event_init(); 646 647 signal(SIGINT, SIG_IGN); 648 signal(SIGTERM, SIG_IGN); 649 signal(SIGPIPE, SIG_IGN); 650 signal(SIGHUP, SIG_IGN); 651 652 config_peer(PROC_PARENT); 653 config_peer(PROC_CONTROL); 654 config_peer(PROC_LKA); 655 config_peer(PROC_SCHEDULER); 656 config_peer(PROC_DISPATCHER); 657 658 /* setup queue loading task */ 659 evtimer_set(&ev_qload, queue_timeout, &ev_qload); 660 tv.tv_sec = 0; 661 tv.tv_usec = 10; 662 evtimer_add(&ev_qload, &tv); 663 664 if (pledge("stdio rpath wpath cpath flock recvfd sendfd", NULL) == -1) 665 fatal("pledge"); 666 667 event_dispatch(); 668 fatalx("exited event loop"); 669 670 return (0); 671 } 672 673 static void 674 queue_timeout(int fd, short event, void *p) 675 { 676 static uint32_t msgid = 0; 677 struct envelope evp; 678 struct event *ev = p; 679 struct timeval tv; 680 int r; 681 682 r = queue_envelope_walk(&evp); 683 if (r == -1) { 684 if (msgid) { 685 m_create(p_scheduler, IMSG_QUEUE_MESSAGE_COMMIT, 686 0, 0, -1); 687 m_add_msgid(p_scheduler, msgid); 688 m_close(p_scheduler); 689 } 690 log_debug("debug: queue: done loading queue into scheduler"); 691 return; 692 } 693 694 if (r) { 695 if (msgid && evpid_to_msgid(evp.id) != msgid) { 696 m_create(p_scheduler, IMSG_QUEUE_MESSAGE_COMMIT, 697 0, 0, -1); 698 m_add_msgid(p_scheduler, msgid); 699 m_close(p_scheduler); 700 } 701 msgid = evpid_to_msgid(evp.id); 702 m_create(p_scheduler, IMSG_QUEUE_ENVELOPE_SUBMIT, 0, 0, -1); 703 m_add_envelope(p_scheduler, &evp); 704 m_close(p_scheduler); 705 } 706 707 tv.tv_sec = 0; 708 tv.tv_usec = 10; 709 evtimer_add(ev, &tv); 710 } 711 712 static void 713 queue_log(const struct envelope *e, const char *prefix, const char *status) 714 { 715 char rcpt[LINE_MAX]; 716 717 (void)strlcpy(rcpt, "-", sizeof rcpt); 718 if (strcmp(e->rcpt.user, e->dest.user) || 719 strcmp(e->rcpt.domain, e->dest.domain)) 720 (void)snprintf(rcpt, sizeof rcpt, "%s@%s", 721 e->rcpt.user, e->rcpt.domain); 722 723 log_info("%s: %s for %016" PRIx64 ": from=<%s@%s>, to=<%s@%s>, " 724 "rcpt=<%s>, delay=%s, stat=%s", 725 e->type == D_MDA ? "delivery" : "relay", 726 prefix, 727 e->id, e->sender.user, e->sender.domain, 728 e->dest.user, e->dest.domain, 729 rcpt, 730 duration_to_text(time(NULL) - e->creation), 731 status); 732 } 733