1 /* Copyright (c) 2017-2018 Dovecot authors, see the included COPYING file */
2
3 #include "lib.h"
4 #include "lib-event-private.h"
5 #include "event-filter.h"
6 #include "array.h"
7 #include "llist.h"
8 #include "time-util.h"
9 #include "str.h"
10 #include "strescape.h"
11 #include "ioloop.h"
12
13 enum event_code {
14 EVENT_CODE_ALWAYS_LOG_SOURCE = 'a',
15 EVENT_CODE_CATEGORY = 'c',
16 EVENT_CODE_TV_LAST_SENT = 'l',
17 EVENT_CODE_SENDING_NAME = 'n',
18 EVENT_CODE_SOURCE = 's',
19
20 EVENT_CODE_FIELD_INTMAX = 'I',
21 EVENT_CODE_FIELD_STR = 'S',
22 EVENT_CODE_FIELD_TIMEVAL = 'T',
23 };
24
25 /* Internal event category state.
26
27 Each (unique) event category maps to one internal category. (I.e., if
28 two places attempt to register the same category, they will share the
29 internal state.)
30
31 This is required in order to support multiple registrations of the same
32 category. Currently, the only situation in which this occurs is the
33 stats process receiving categories from other processes and also using
34 the same categories internally.
35
36 During registration, we look up the internal state based on the new
37 category's name. If found, we use it after sanity checking that the two
38 are identical (i.e., they both have the same name and parent). If not
39 found, we allocate a new internal state and use it.
40
41 We stash a pointer to the internal state in struct event_category (the
42 "internal" member). As a result, all category structs for the same
43 category point to the same internal state. */
44 struct event_internal_category {
45 /* More than one category can be represented by the internal state.
46 To give consumers a unique but consistent category pointer, we
47 return a pointer to this 'represetative' category structure.
48 Because we allocated it, we know that it will live exactly as
49 long as we need it to. */
50 struct event_category representative;
51
52 struct event_internal_category *parent;
53 char *name;
54 int refcount;
55 };
56
57 extern const struct event_passthrough event_passthrough_vfuncs;
58
59 static struct event *events = NULL;
60 static struct event *current_global_event = NULL;
61 static struct event_passthrough *event_last_passthrough = NULL;
62 static ARRAY(event_callback_t *) event_handlers;
63 static ARRAY(event_category_callback_t *) event_category_callbacks;
64 static ARRAY(struct event_internal_category *) event_registered_categories_internal;
65 static ARRAY(struct event_category *) event_registered_categories_representative;
66 static ARRAY(struct event *) global_event_stack;
67 static uint64_t event_id_counter = 0;
68
get_self_rusage(struct rusage * ru_r)69 static void get_self_rusage(struct rusage *ru_r)
70 {
71 if (getrusage(RUSAGE_SELF, ru_r) < 0)
72 i_fatal("getrusage() failed: %m");
73 }
74
75 static struct event *
76 event_create_internal(struct event *parent, const char *source_filename,
77 unsigned int source_linenum);
78 static struct event_internal_category *
79 event_category_find_internal(const char *name);
80
last_passthrough_event(void)81 static struct event *last_passthrough_event(void)
82 {
83 return container_of(event_last_passthrough,
84 struct event, event_passthrough);
85 }
86
event_copy_parent_defaults(struct event * event,const struct event * parent)87 static void event_copy_parent_defaults(struct event *event,
88 const struct event *parent)
89 {
90 event->always_log_source = parent->always_log_source;
91 event->passthrough = parent->passthrough;
92 event->min_log_level = parent->min_log_level;
93 event->forced_debug = parent->forced_debug;
94 }
95
96 static bool
97 event_find_category(const struct event *event,
98 const struct event_category *category);
99
event_set_changed(struct event * event)100 static void event_set_changed(struct event *event)
101 {
102 event->change_id++;
103 /* It's unlikely that change_id will ever wrap, but lets be safe
104 anyway. */
105 if (event->change_id == 0 ||
106 event->change_id == event->sent_to_stats_id)
107 event->change_id++;
108 }
109
110 static bool
event_call_callbacks(struct event * event,enum event_callback_type type,struct failure_context * ctx,const char * fmt,va_list args)111 event_call_callbacks(struct event *event, enum event_callback_type type,
112 struct failure_context *ctx, const char *fmt, va_list args)
113 {
114 event_callback_t *callback;
115
116 array_foreach_elem(&event_handlers, callback) {
117 bool ret;
118
119 T_BEGIN {
120 ret = callback(event, type, ctx, fmt, args);
121 } T_END;
122 if (!ret) {
123 /* event sending was stopped */
124 return FALSE;
125 }
126 }
127 return TRUE;
128 }
129
130 static void
event_call_callbacks_noargs(struct event * event,enum event_callback_type type,...)131 event_call_callbacks_noargs(struct event *event,
132 enum event_callback_type type, ...)
133 {
134 va_list args;
135
136 /* the args are empty and not used for anything, but there doesn't seem
137 to be any nice and standard way of passing an initialized va_list
138 as a parameter without va_start(). */
139 va_start(args, type);
140 (void)event_call_callbacks(event, type, NULL, NULL, args);
141 va_end(args);
142 }
143
event_copy_categories(struct event * to,struct event * from)144 void event_copy_categories(struct event *to, struct event *from)
145 {
146 unsigned int cat_count;
147 struct event_category *const *categories =
148 event_get_categories(from, &cat_count);
149 for (unsigned int i = 1; i <= cat_count; i++)
150 event_add_category(to, categories[cat_count-i]);
151 }
152
event_copy_fields(struct event * to,struct event * from)153 void event_copy_fields(struct event *to, struct event *from)
154 {
155 const struct event_field *fld;
156 if (!array_is_created(&from->fields))
157 return;
158 array_foreach(&from->fields, fld) {
159 switch (fld->value_type) {
160 case EVENT_FIELD_VALUE_TYPE_STR:
161 event_add_str(to, fld->key, fld->value.str);
162 break;
163 case EVENT_FIELD_VALUE_TYPE_INTMAX:
164 event_add_int(to, fld->key, fld->value.intmax);
165 break;
166 case EVENT_FIELD_VALUE_TYPE_TIMEVAL:
167 event_add_timeval(to, fld->key, &fld->value.timeval);
168 break;
169 default:
170 break;
171 }
172 }
173 }
174
event_has_all_categories(struct event * event,const struct event * other)175 bool event_has_all_categories(struct event *event, const struct event *other)
176 {
177 struct event_category **cat;
178 if (!array_is_created(&other->categories))
179 return TRUE;
180 if (!array_is_created(&event->categories))
181 return FALSE;
182 array_foreach_modifiable(&other->categories, cat) {
183 if (!event_find_category(event, *cat))
184 return FALSE;
185 }
186 return TRUE;
187 }
188
event_has_all_fields(struct event * event,const struct event * other)189 bool event_has_all_fields(struct event *event, const struct event *other)
190 {
191 struct event_field *fld;
192 if (!array_is_created(&other->fields))
193 return TRUE;
194 array_foreach_modifiable(&other->fields, fld) {
195 if (event_find_field_nonrecursive(event, fld->key) == NULL)
196 return FALSE;
197 }
198 return TRUE;
199 }
200
event_dup(const struct event * source)201 struct event *event_dup(const struct event *source)
202 {
203 struct event *ret =
204 event_create_internal(source->parent, source->source_filename,
205 source->source_linenum);
206 string_t *str = t_str_new(256);
207 const char *err;
208 event_export(source, str);
209 if (!event_import(ret, str_c(str), &err))
210 i_panic("event_import(%s) failed: %s", str_c(str), err);
211 ret->tv_created_ioloop = source->tv_created_ioloop;
212 return ret;
213 }
214
215 /*
216 * Copy the source's categories and fields recursively.
217 *
218 * We recurse to the parent before copying this event's data because we may
219 * be overriding a field.
220 */
event_flatten_recurse(struct event * dst,struct event * src,struct event * limit)221 static void event_flatten_recurse(struct event *dst, struct event *src,
222 struct event *limit)
223 {
224 if (src->parent != limit)
225 event_flatten_recurse(dst, src->parent, limit);
226
227 event_copy_categories(dst, src);
228 event_copy_fields(dst, src);
229 }
230
event_flatten(struct event * src)231 struct event *event_flatten(struct event *src)
232 {
233 struct event *dst;
234
235 /* If we don't have a parent, we have nothing to flatten. */
236 if (src->parent == NULL)
237 return event_ref(src);
238
239 /* We have to flatten the event. */
240
241 dst = event_create_internal(NULL, src->source_filename,
242 src->source_linenum);
243 dst = event_set_name(dst, src->sending_name);
244
245 event_flatten_recurse(dst, src, NULL);
246
247 dst->tv_created_ioloop = src->tv_created_ioloop;
248 dst->tv_created = src->tv_created;
249 dst->tv_last_sent = src->tv_last_sent;
250
251 return dst;
252 }
253
replace_parent_ref(struct event * event,struct event * new)254 static inline void replace_parent_ref(struct event *event, struct event *new)
255 {
256 if (event->parent == new)
257 return; /* no-op */
258
259 if (new != NULL)
260 event_ref(new);
261
262 event_unref(&event->parent);
263
264 event->parent = new;
265 }
266
267 /*
268 * Minimize the event and its ancestry.
269 *
270 * In general, the chain of parents starting from this event can be divided
271 * up into four consecutive ranges:
272 *
273 * 1. the event itself
274 * 2. a range of events that should be flattened into the event itself
275 * 3. a range of trivial (i.e., no categories or fields) events that should
276 * be skipped
277 * 4. the rest of the chain
278 *
279 * Except for the first range, the event itself, the remaining ranges can
280 * have zero events.
281 *
282 * As the names of these ranges imply, we want to flatten certain parts of
283 * the ancestry, skip other parts of the ancestry and leave the remainder
284 * untouched.
285 *
286 * For example, suppose that we have an event (A) with ancestors forming the
287 * following graph:
288 *
289 * A -> B -> C -> D -> E -> F
290 *
291 * Further, suppose that B, C, and F contain some categories or fields but
292 * have not yet been sent to an external process that knows how to reference
293 * previously encountered events, and D contains no fields or categories of
294 * its own (but it inherits some from E and F).
295 *
296 * We can define the 4 ranges:
297 *
298 * A: the event
299 * B-C: flattening
300 * D: skipping
301 * E-end: the rest
302 *
303 * The output would therefore be:
304 *
305 * G -> E -> F
306 *
307 * where G contains the fields and categories of A, B, and C (and trivially
308 * D beacuse D was empty).
309 *
310 * Note that even though F has not yet been sent out, we send it now because
311 * it is part of the "rest" range.
312 *
313 * TODO: We could likely apply this function recursively on the "rest"
314 * range, but further investigation is required to determine whether it is
315 * worth it.
316 */
event_minimize(struct event * event)317 struct event *event_minimize(struct event *event)
318 {
319 struct event *flatten_bound;
320 struct event *skip_bound;
321 struct event *new_event;
322 struct event *cur;
323
324 if (event->parent == NULL)
325 return event_ref(event);
326
327 /* find the bound for field/category flattening */
328 flatten_bound = NULL;
329 for (cur = event->parent; cur != NULL; cur = cur->parent) {
330 if (cur->sent_to_stats_id == 0 &&
331 timeval_cmp(&cur->tv_created_ioloop,
332 &event->tv_created_ioloop) == 0)
333 continue;
334
335 flatten_bound = cur;
336 break;
337 }
338
339 /* continue to find the bound for empty event skipping */
340 skip_bound = NULL;
341 for (; cur != NULL; cur = cur->parent) {
342 if (cur->sent_to_stats_id == 0 &&
343 (!array_is_created(&cur->fields) ||
344 array_is_empty(&cur->fields)) &&
345 (!array_is_created(&cur->categories) ||
346 array_is_empty(&cur->categories)))
347 continue;
348
349 skip_bound = cur;
350 break;
351 }
352
353 /* fast path - no flattening and no skipping to do */
354 if ((event->parent == flatten_bound) &&
355 (event->parent == skip_bound))
356 return event_ref(event);
357
358 new_event = event_dup(event);
359
360 /* flatten */
361 event_flatten_recurse(new_event, event, flatten_bound);
362 replace_parent_ref(new_event, flatten_bound);
363
364 /* skip */
365 replace_parent_ref(new_event, skip_bound);
366
367 return new_event;
368 }
369
370 static struct event *
event_create_internal(struct event * parent,const char * source_filename,unsigned int source_linenum)371 event_create_internal(struct event *parent, const char *source_filename,
372 unsigned int source_linenum)
373 {
374 struct event *event;
375 pool_t pool = pool_alloconly_create(MEMPOOL_GROWING"event", 64);
376
377 event = p_new(pool, struct event, 1);
378 event->event_passthrough = event_passthrough_vfuncs;
379 event->refcount = 1;
380 event->id = ++event_id_counter;
381 event->pool = pool;
382 event->tv_created_ioloop = ioloop_timeval;
383 event->min_log_level = LOG_TYPE_INFO;
384 i_gettimeofday(&event->tv_created);
385 event->source_filename = p_strdup(pool, source_filename);
386 event->source_linenum = source_linenum;
387 event->change_id = 1;
388 if (parent != NULL) {
389 event->parent = parent;
390 event_ref(event->parent);
391 event_copy_parent_defaults(event, parent);
392 }
393 DLLIST_PREPEND(&events, event);
394 return event;
395 }
396
397 #undef event_create
event_create(struct event * parent,const char * source_filename,unsigned int source_linenum)398 struct event *event_create(struct event *parent, const char *source_filename,
399 unsigned int source_linenum)
400 {
401 struct event *event;
402
403 event = event_create_internal(parent, source_filename, source_linenum);
404 (void)event_call_callbacks_noargs(event, EVENT_CALLBACK_TYPE_CREATE);
405 return event;
406 }
407
408 #undef event_create_passthrough
409 struct event_passthrough *
event_create_passthrough(struct event * parent,const char * source_filename,unsigned int source_linenum)410 event_create_passthrough(struct event *parent, const char *source_filename,
411 unsigned int source_linenum)
412 {
413 if (!parent->passthrough) {
414 if (event_last_passthrough != NULL) {
415 /* API is being used in a wrong or dangerous way */
416 i_panic("Can't create multiple passthrough events - "
417 "finish the earlier with ->event()");
418 }
419 struct event *event =
420 event_create(parent, source_filename, source_linenum);
421 event->passthrough = TRUE;
422 /* This event only intends to extend the parent event.
423 Use the parent's creation timestamp. */
424 event->tv_created_ioloop = parent->tv_created_ioloop;
425 event->tv_created = parent->tv_created;
426 memcpy(&event->ru_last, &parent->ru_last, sizeof(parent->ru_last));
427 event_last_passthrough = &event->event_passthrough;
428 } else {
429 event_last_passthrough = &parent->event_passthrough;
430 }
431 return event_last_passthrough;
432 }
433
event_ref(struct event * event)434 struct event *event_ref(struct event *event)
435 {
436 i_assert(event->refcount > 0);
437
438 event->refcount++;
439 return event;
440 }
441
event_unref(struct event ** _event)442 void event_unref(struct event **_event)
443 {
444 struct event *event = *_event;
445
446 if (event == NULL)
447 return;
448 *_event = NULL;
449
450 i_assert(event->refcount > 0);
451 if (--event->refcount > 0)
452 return;
453 i_assert(event != current_global_event);
454
455 event_call_callbacks_noargs(event, EVENT_CALLBACK_TYPE_FREE);
456
457 if (last_passthrough_event() == event)
458 event_last_passthrough = NULL;
459 if (event->log_prefix_from_system_pool)
460 i_free(event->log_prefix);
461 i_free(event->sending_name);
462 event_unref(&event->parent);
463
464 DLLIST_REMOVE(&events, event);
465 pool_unref(&event->pool);
466 }
467
events_get_head(void)468 struct event *events_get_head(void)
469 {
470 return events;
471 }
472
event_push_global(struct event * event)473 struct event *event_push_global(struct event *event)
474 {
475 if (current_global_event != NULL) {
476 if (!array_is_created(&global_event_stack))
477 i_array_init(&global_event_stack, 4);
478 array_push_back(&global_event_stack, ¤t_global_event);
479 }
480 current_global_event = event;
481 return event;
482 }
483
event_pop_global(struct event * event)484 struct event *event_pop_global(struct event *event)
485 {
486 i_assert(event != NULL);
487 i_assert(event == current_global_event);
488
489 if (!array_is_created(&global_event_stack) ||
490 array_count(&global_event_stack) == 0)
491 current_global_event = NULL;
492 else {
493 unsigned int event_count;
494 struct event *const *events =
495 array_get(&global_event_stack, &event_count);
496
497 i_assert(event_count > 0);
498 current_global_event = events[event_count-1];
499 array_delete(&global_event_stack, event_count-1, 1);
500 }
501 return current_global_event;
502 }
503
event_get_global(void)504 struct event *event_get_global(void)
505 {
506 return current_global_event;
507 }
508
509 static struct event *
event_set_log_prefix(struct event * event,const char * prefix,bool append)510 event_set_log_prefix(struct event *event, const char *prefix, bool append)
511 {
512 event->log_prefix_callback = NULL;
513 event->log_prefix_callback_context = NULL;
514 if (event->log_prefix == NULL) {
515 /* allocate the first log prefix from the pool */
516 event->log_prefix = p_strdup(event->pool, prefix);
517 } else {
518 /* log prefix is being updated multiple times -
519 switch to system pool so we don't keep leaking memory */
520 if (event->log_prefix_from_system_pool)
521 i_free(event->log_prefix);
522 else
523 event->log_prefix_from_system_pool = TRUE;
524 event->log_prefix = i_strdup(prefix);
525 }
526 event->log_prefix_replace = !append;
527 return event;
528 }
529
530 struct event *
event_set_append_log_prefix(struct event * event,const char * prefix)531 event_set_append_log_prefix(struct event *event, const char *prefix)
532 {
533 return event_set_log_prefix(event, prefix, TRUE);
534 }
535
event_replace_log_prefix(struct event * event,const char * prefix)536 struct event *event_replace_log_prefix(struct event *event, const char *prefix)
537 {
538 return event_set_log_prefix(event, prefix, FALSE);
539 }
540
541 struct event *
event_drop_parent_log_prefixes(struct event * event,unsigned int count)542 event_drop_parent_log_prefixes(struct event *event, unsigned int count)
543 {
544 event->log_prefixes_dropped = count;
545 return event;
546 }
547
548 #undef event_set_log_prefix_callback
549 struct event *
event_set_log_prefix_callback(struct event * event,bool replace,event_log_prefix_callback_t * callback,void * context)550 event_set_log_prefix_callback(struct event *event,
551 bool replace,
552 event_log_prefix_callback_t *callback,
553 void *context)
554 {
555 if (event->log_prefix_from_system_pool)
556 i_free(event->log_prefix);
557 else
558 event->log_prefix = NULL;
559 event->log_prefix_replace = replace;
560 event->log_prefix_callback = callback;
561 event->log_prefix_callback_context = context;
562 return event;
563 }
564
565 #undef event_set_log_message_callback
566 struct event *
event_set_log_message_callback(struct event * event,event_log_message_callback_t * callback,void * context)567 event_set_log_message_callback(struct event *event,
568 event_log_message_callback_t *callback,
569 void *context)
570 {
571 event->log_message_callback = callback;
572 event->log_message_callback_context = context;
573 return event;
574 }
575
576 struct event *
event_set_name(struct event * event,const char * name)577 event_set_name(struct event *event, const char *name)
578 {
579 i_free(event->sending_name);
580 event->sending_name = i_strdup(name);
581 return event;
582 }
583
584 struct event *
event_set_source(struct event * event,const char * filename,unsigned int linenum,bool literal_fname)585 event_set_source(struct event *event, const char *filename,
586 unsigned int linenum, bool literal_fname)
587 {
588 if (strcmp(event->source_filename, filename) != 0) {
589 event->source_filename = literal_fname ? filename :
590 p_strdup(event->pool, filename);
591 }
592 event->source_linenum = linenum;
593 return event;
594 }
595
event_set_always_log_source(struct event * event)596 struct event *event_set_always_log_source(struct event *event)
597 {
598 event->always_log_source = TRUE;
599 return event;
600 }
601
event_set_min_log_level(struct event * event,enum log_type level)602 struct event *event_set_min_log_level(struct event *event, enum log_type level)
603 {
604 event->min_log_level = level;
605 event_recalculate_debug_level(event);
606 return event;
607 }
608
event_get_min_log_level(const struct event * event)609 enum log_type event_get_min_log_level(const struct event *event)
610 {
611 return event->min_log_level;
612 }
613
event_set_ptr(struct event * event,const char * key,void * value)614 struct event *event_set_ptr(struct event *event, const char *key, void *value)
615 {
616 struct event_pointer *p;
617
618 if (!array_is_created(&event->pointers))
619 p_array_init(&event->pointers, event->pool, 4);
620 else {
621 /* replace existing pointer if the key already exists */
622 array_foreach_modifiable(&event->pointers, p) {
623 if (strcmp(p->key, key) == 0) {
624 p->value = value;
625 return event;
626 }
627 }
628 }
629 p = array_append_space(&event->pointers);
630 p->key = p_strdup(event->pool, key);
631 p->value = value;
632 return event;
633 }
634
event_get_ptr(const struct event * event,const char * key)635 void *event_get_ptr(const struct event *event, const char *key)
636 {
637 const struct event_pointer *p;
638
639 if (!array_is_created(&event->pointers))
640 return NULL;
641 array_foreach(&event->pointers, p) {
642 if (strcmp(p->key, key) == 0)
643 return p->value;
644 }
645 return NULL;
646 }
647
event_category_find_registered(const char * name)648 struct event_category *event_category_find_registered(const char *name)
649 {
650 struct event_category *cat;
651
652 array_foreach_elem(&event_registered_categories_representative, cat) {
653 if (strcmp(cat->name, name) == 0)
654 return cat;
655 }
656 return NULL;
657 }
658
659 static struct event_internal_category *
event_category_find_internal(const char * name)660 event_category_find_internal(const char *name)
661 {
662 struct event_internal_category *internal;
663
664 array_foreach_elem(&event_registered_categories_internal, internal) {
665 if (strcmp(internal->name, name) == 0)
666 return internal;
667 }
668
669 return NULL;
670 }
671
672 struct event_category *const *
event_get_registered_categories(unsigned int * count_r)673 event_get_registered_categories(unsigned int *count_r)
674 {
675 return array_get(&event_registered_categories_representative, count_r);
676 }
677
678 static void
event_category_add_to_array(struct event_internal_category * internal)679 event_category_add_to_array(struct event_internal_category *internal)
680 {
681 struct event_category *representative = &internal->representative;
682
683 array_push_back(&event_registered_categories_internal, &internal);
684 array_push_back(&event_registered_categories_representative,
685 &representative);
686 }
687
688 static struct event_category *
event_category_register(struct event_category * category)689 event_category_register(struct event_category *category)
690 {
691 struct event_internal_category *internal = category->internal;
692 event_category_callback_t *callback;
693 bool allocated;
694
695 if (internal != NULL)
696 return &internal->representative; /* case 2 - see below */
697
698 /* register parent categories first */
699 if (category->parent != NULL)
700 (void) event_category_register(category->parent);
701
702 /* There are four cases we need to handle:
703
704 1) a new category is registered
705 2) same category struct is re-registered - already handled above
706 internal NULL check
707 3) different category struct is registered, but it is identical
708 to the previously registered one
709 4) different category struct is registered, and it is different
710 from the previously registered one - a programming error */
711 internal = event_category_find_internal(category->name);
712 if (internal == NULL) {
713 /* case 1: first time we saw this name - allocate new */
714 internal = i_new(struct event_internal_category, 1);
715 if (category->parent != NULL)
716 internal->parent = category->parent->internal;
717 internal->name = i_strdup(category->name);
718 internal->refcount = 1;
719 internal->representative.name = internal->name;
720 internal->representative.parent = category->parent;
721 internal->representative.internal = internal;
722
723 event_category_add_to_array(internal);
724
725 allocated = TRUE;
726 } else {
727 /* case 3 or 4: someone registered this name before - share */
728 if ((category->parent != NULL) &&
729 (internal->parent != category->parent->internal)) {
730 /* case 4 */
731 struct event_internal_category *other =
732 category->parent->internal;
733
734 i_panic("event category parent mismatch detected: "
735 "category %p internal %p (%s), "
736 "internal parent %p (%s), public parent %p (%s)",
737 category, internal, internal->name,
738 internal->parent, internal->parent->name,
739 other, other->name);
740 }
741
742 internal->refcount++;
743
744 allocated = FALSE;
745 }
746
747 category->internal = internal;
748
749 if (!allocated) {
750 /* not the first registration of this category */
751 return &internal->representative;
752 }
753
754 array_foreach_elem(&event_category_callbacks, callback) T_BEGIN {
755 callback(&internal->representative);
756 } T_END;
757
758 return &internal->representative;
759 }
760
761 static bool
event_find_category(const struct event * event,const struct event_category * category)762 event_find_category(const struct event *event,
763 const struct event_category *category)
764 {
765 struct event_internal_category *internal = category->internal;
766 struct event_category *cat;
767
768 /* make sure we're always looking for a representative */
769 i_assert(category == &internal->representative);
770
771 array_foreach_elem(&event->categories, cat) {
772 if (cat == category)
773 return TRUE;
774 }
775 return FALSE;
776 }
777
778 struct event *
event_add_categories(struct event * event,struct event_category * const * categories)779 event_add_categories(struct event *event,
780 struct event_category *const *categories)
781 {
782 struct event_category *representative;
783
784 if (!array_is_created(&event->categories))
785 p_array_init(&event->categories, event->pool, 4);
786
787 for (unsigned int i = 0; categories[i] != NULL; i++) {
788 representative = event_category_register(categories[i]);
789 if (!event_find_category(event, representative))
790 array_push_back(&event->categories, &representative);
791 }
792 event_set_changed(event);
793 event_recalculate_debug_level(event);
794 return event;
795 }
796
797 struct event *
event_add_category(struct event * event,struct event_category * category)798 event_add_category(struct event *event, struct event_category *category)
799 {
800 struct event_category *const categories[] = { category, NULL };
801 return event_add_categories(event, categories);
802 }
803
804 struct event_field *
event_find_field_nonrecursive(const struct event * event,const char * key)805 event_find_field_nonrecursive(const struct event *event, const char *key)
806 {
807 struct event_field *field;
808
809 if (!array_is_created(&event->fields))
810 return NULL;
811
812 array_foreach_modifiable(&event->fields, field) {
813 if (strcmp(field->key, key) == 0)
814 return field;
815 }
816 return NULL;
817 }
818
819 const struct event_field *
event_find_field_recursive(const struct event * event,const char * key)820 event_find_field_recursive(const struct event *event, const char *key)
821 {
822 const struct event_field *field;
823
824 do {
825 if ((field = event_find_field_nonrecursive(event, key)) != NULL)
826 return field;
827 event = event->parent;
828 } while (event != NULL);
829 return NULL;
830 }
831
832 const char *
event_find_field_recursive_str(const struct event * event,const char * key)833 event_find_field_recursive_str(const struct event *event, const char *key)
834 {
835 const struct event_field *field;
836
837 field = event_find_field_recursive(event, key);
838 if (field == NULL)
839 return NULL;
840
841 switch (field->value_type) {
842 case EVENT_FIELD_VALUE_TYPE_STR:
843 return field->value.str;
844 case EVENT_FIELD_VALUE_TYPE_INTMAX:
845 return dec2str(field->value.intmax);
846 case EVENT_FIELD_VALUE_TYPE_TIMEVAL:
847 return t_strdup_printf("%"PRIdTIME_T".%u",
848 field->value.timeval.tv_sec,
849 (unsigned int)field->value.timeval.tv_usec);
850 }
851 i_unreached();
852 }
853
854 static struct event_field *
event_get_field(struct event * event,const char * key)855 event_get_field(struct event *event, const char *key)
856 {
857 struct event_field *field;
858
859 field = event_find_field_nonrecursive(event, key);
860 if (field == NULL) {
861 if (!array_is_created(&event->fields))
862 p_array_init(&event->fields, event->pool, 8);
863 field = array_append_space(&event->fields);
864 field->key = p_strdup(event->pool, key);
865 }
866 event_set_changed(event);
867 return field;
868 }
869
870 struct event *
event_add_str(struct event * event,const char * key,const char * value)871 event_add_str(struct event *event, const char *key, const char *value)
872 {
873 struct event_field *field;
874
875 if (value == NULL) {
876 /* silently ignoring is perhaps better than assert-crashing? */
877 return event;
878 }
879
880 field = event_get_field(event, key);
881 field->value_type = EVENT_FIELD_VALUE_TYPE_STR;
882 i_zero(&field->value);
883 field->value.str = p_strdup(event->pool, value);
884 return event;
885 }
886
887 struct event *
event_add_int(struct event * event,const char * key,intmax_t num)888 event_add_int(struct event *event, const char *key, intmax_t num)
889 {
890 struct event_field *field;
891
892 field = event_get_field(event, key);
893 field->value_type = EVENT_FIELD_VALUE_TYPE_INTMAX;
894 i_zero(&field->value);
895 field->value.intmax = num;
896 return event;
897 }
898
899 struct event *
event_inc_int(struct event * event,const char * key,intmax_t num)900 event_inc_int(struct event *event, const char *key, intmax_t num)
901 {
902 struct event_field *field;
903
904 field = event_find_field_nonrecursive(event, key);
905 if (field == NULL || field->value_type != EVENT_FIELD_VALUE_TYPE_INTMAX)
906 return event_add_int(event, key, num);
907
908 field->value.intmax += num;
909 event_set_changed(event);
910 return event;
911 }
912
913 struct event *
event_add_timeval(struct event * event,const char * key,const struct timeval * tv)914 event_add_timeval(struct event *event, const char *key,
915 const struct timeval *tv)
916 {
917 struct event_field *field;
918
919 field = event_get_field(event, key);
920 field->value_type = EVENT_FIELD_VALUE_TYPE_TIMEVAL;
921 i_zero(&field->value);
922 field->value.timeval = *tv;
923 return event;
924 }
925
926 struct event *
event_add_fields(struct event * event,const struct event_add_field * fields)927 event_add_fields(struct event *event,
928 const struct event_add_field *fields)
929 {
930 for (unsigned int i = 0; fields[i].key != NULL; i++) {
931 if (fields[i].value != NULL)
932 event_add_str(event, fields[i].key, fields[i].value);
933 else if (fields[i].value_timeval.tv_sec != 0) {
934 event_add_timeval(event, fields[i].key,
935 &fields[i].value_timeval);
936 } else {
937 event_add_int(event, fields[i].key,
938 fields[i].value_intmax);
939 }
940 }
941 return event;
942 }
943
event_field_clear(struct event * event,const char * key)944 void event_field_clear(struct event *event, const char *key)
945 {
946 event_add_str(event, key, "");
947 }
948
event_get_parent(const struct event * event)949 struct event *event_get_parent(const struct event *event)
950 {
951 return event->parent;
952 }
953
event_get_create_time(const struct event * event,struct timeval * tv_r)954 void event_get_create_time(const struct event *event, struct timeval *tv_r)
955 {
956 *tv_r = event->tv_created;
957 }
958
event_get_last_send_time(const struct event * event,struct timeval * tv_r)959 bool event_get_last_send_time(const struct event *event, struct timeval *tv_r)
960 {
961 *tv_r = event->tv_last_sent;
962 return tv_r->tv_sec != 0;
963 }
964
event_get_last_duration(const struct event * event,uintmax_t * duration_usecs_r)965 void event_get_last_duration(const struct event *event,
966 uintmax_t *duration_usecs_r)
967 {
968 if (event->tv_last_sent.tv_sec == 0) {
969 *duration_usecs_r = 0;
970 return;
971 }
972 long long diff = timeval_diff_usecs(&event->tv_last_sent,
973 &event->tv_created);
974 i_assert(diff >= 0);
975 *duration_usecs_r = diff;
976 }
977
978 const struct event_field *
event_get_fields(const struct event * event,unsigned int * count_r)979 event_get_fields(const struct event *event, unsigned int *count_r)
980 {
981 if (!array_is_created(&event->fields)) {
982 *count_r = 0;
983 return NULL;
984 }
985 return array_get(&event->fields, count_r);
986 }
987
988 struct event_category *const *
event_get_categories(const struct event * event,unsigned int * count_r)989 event_get_categories(const struct event *event, unsigned int *count_r)
990 {
991 if (!array_is_created(&event->categories)) {
992 *count_r = 0;
993 return NULL;
994 }
995 return array_get(&event->categories, count_r);
996 }
997
event_send(struct event * event,struct failure_context * ctx,const char * fmt,...)998 void event_send(struct event *event, struct failure_context *ctx,
999 const char *fmt, ...)
1000 {
1001 va_list args;
1002
1003 va_start(args, fmt);
1004 event_vsend(event, ctx, fmt, args);
1005 va_end(args);
1006 }
1007
event_vsend(struct event * event,struct failure_context * ctx,const char * fmt,va_list args)1008 void event_vsend(struct event *event, struct failure_context *ctx,
1009 const char *fmt, va_list args)
1010 {
1011 i_gettimeofday(&event->tv_last_sent);
1012
1013 /* Skip adding user_cpu_usecs if not enabled. */
1014 if (event->ru_last.ru_utime.tv_sec != 0 ||
1015 event->ru_last.ru_utime.tv_usec != 0) {
1016 struct rusage ru_current;
1017 get_self_rusage(&ru_current);
1018 long long udiff = timeval_diff_usecs(&ru_current.ru_utime,
1019 &event->ru_last.ru_utime);
1020 event_add_int(event, "user_cpu_usecs", udiff > 0 ? udiff : 0);
1021 }
1022 if (event_call_callbacks(event, EVENT_CALLBACK_TYPE_SEND,
1023 ctx, fmt, args)) {
1024 if (ctx->type != LOG_TYPE_DEBUG ||
1025 event->sending_debug_log)
1026 i_log_typev(ctx, fmt, args);
1027 }
1028 event_send_abort(event);
1029 }
1030
event_send_abort(struct event * event)1031 void event_send_abort(struct event *event)
1032 {
1033 /* if the event is sent again, it needs a new name */
1034 i_free(event->sending_name);
1035 if (event->passthrough)
1036 event_unref(&event);
1037 }
1038
1039 static void
event_export_field_value(string_t * dest,const struct event_field * field)1040 event_export_field_value(string_t *dest, const struct event_field *field)
1041 {
1042 switch (field->value_type) {
1043 case EVENT_FIELD_VALUE_TYPE_STR:
1044 str_append_c(dest, EVENT_CODE_FIELD_STR);
1045 str_append_tabescaped(dest, field->key);
1046 str_append_c(dest, '\t');
1047 str_append_tabescaped(dest, field->value.str);
1048 break;
1049 case EVENT_FIELD_VALUE_TYPE_INTMAX:
1050 str_append_c(dest, EVENT_CODE_FIELD_INTMAX);
1051 str_append_tabescaped(dest, field->key);
1052 str_printfa(dest, "\t%jd", field->value.intmax);
1053 break;
1054 case EVENT_FIELD_VALUE_TYPE_TIMEVAL:
1055 str_append_c(dest, EVENT_CODE_FIELD_TIMEVAL);
1056 str_append_tabescaped(dest, field->key);
1057 str_printfa(dest, "\t%"PRIdTIME_T"\t%u",
1058 field->value.timeval.tv_sec,
1059 (unsigned int)field->value.timeval.tv_usec);
1060 break;
1061 }
1062 }
1063
event_export(const struct event * event,string_t * dest)1064 void event_export(const struct event *event, string_t *dest)
1065 {
1066 /* required fields: */
1067 str_printfa(dest, "%"PRIdTIME_T"\t%u",
1068 event->tv_created.tv_sec,
1069 (unsigned int)event->tv_created.tv_usec);
1070
1071 /* optional fields: */
1072 if (event->source_filename != NULL) {
1073 str_append_c(dest, '\t');
1074 str_append_c(dest, EVENT_CODE_SOURCE);
1075 str_append_tabescaped(dest, event->source_filename);
1076 str_printfa(dest, "\t%u", event->source_linenum);
1077 }
1078 if (event->always_log_source) {
1079 str_append_c(dest, '\t');
1080 str_append_c(dest, EVENT_CODE_ALWAYS_LOG_SOURCE);
1081 }
1082 if (event->tv_last_sent.tv_sec != 0) {
1083 str_printfa(dest, "\t%c%"PRIdTIME_T"\t%u",
1084 EVENT_CODE_TV_LAST_SENT,
1085 event->tv_last_sent.tv_sec,
1086 (unsigned int)event->tv_last_sent.tv_usec);
1087 }
1088 if (event->sending_name != NULL) {
1089 str_append_c(dest, '\t');
1090 str_append_c(dest, EVENT_CODE_SENDING_NAME);
1091 str_append_tabescaped(dest, event->sending_name);
1092 }
1093
1094 if (array_is_created(&event->categories)) {
1095 struct event_category *cat;
1096 array_foreach_elem(&event->categories, cat) {
1097 str_append_c(dest, '\t');
1098 str_append_c(dest, EVENT_CODE_CATEGORY);
1099 str_append_tabescaped(dest, cat->name);
1100 }
1101 }
1102
1103 if (array_is_created(&event->fields)) {
1104 const struct event_field *field;
1105 array_foreach(&event->fields, field) {
1106 str_append_c(dest, '\t');
1107 event_export_field_value(dest, field);
1108 }
1109 }
1110 }
1111
event_import(struct event * event,const char * str,const char ** error_r)1112 bool event_import(struct event *event, const char *str, const char **error_r)
1113 {
1114 return event_import_unescaped(event, t_strsplit_tabescaped(str),
1115 error_r);
1116 }
1117
event_import_tv(const char * arg_secs,const char * arg_usecs,struct timeval * tv_r,const char ** error_r)1118 static bool event_import_tv(const char *arg_secs, const char *arg_usecs,
1119 struct timeval *tv_r, const char **error_r)
1120 {
1121 unsigned int usecs;
1122
1123 if (str_to_time(arg_secs, &tv_r->tv_sec) < 0) {
1124 *error_r = "Invalid timeval seconds parameter";
1125 return FALSE;
1126 }
1127
1128 if (arg_usecs == NULL) {
1129 *error_r = "Timeval missing microseconds parameter";
1130 return FALSE;
1131 }
1132 if (str_to_uint(arg_usecs, &usecs) < 0 || usecs >= 1000000) {
1133 *error_r = "Invalid timeval microseconds parameter";
1134 return FALSE;
1135 }
1136 tv_r->tv_usec = usecs;
1137 return TRUE;
1138 }
1139
event_import_unescaped(struct event * event,const char * const * args,const char ** error_r)1140 bool event_import_unescaped(struct event *event, const char *const *args,
1141 const char **error_r)
1142 {
1143 const char *error;
1144
1145 /* Event's create callback has already added service:<name> category.
1146 This imported event may be coming from another service process
1147 though, so clear it out. */
1148 if (array_is_created(&event->categories))
1149 array_clear(&event->categories);
1150
1151 /* required fields: */
1152 if (args[0] == NULL) {
1153 *error_r = "Missing required fields";
1154 return FALSE;
1155 }
1156 if (!event_import_tv(args[0], args[1], &event->tv_created, &error)) {
1157 *error_r = t_strdup_printf("Invalid tv_created: %s", error);
1158 return FALSE;
1159 }
1160 args += 2;
1161
1162 /* optional fields: */
1163 while (*args != NULL) {
1164 const char *arg = *args;
1165 enum event_code code = arg[0];
1166
1167 arg++;
1168 switch (code) {
1169 case EVENT_CODE_ALWAYS_LOG_SOURCE:
1170 event->always_log_source = TRUE;
1171 break;
1172 case EVENT_CODE_CATEGORY: {
1173 struct event_category *category =
1174 event_category_find_registered(arg);
1175 if (category == NULL) {
1176 *error_r = t_strdup_printf("Unregistered category: '%s'", arg);
1177 return FALSE;
1178 }
1179 if (!array_is_created(&event->categories))
1180 p_array_init(&event->categories, event->pool, 4);
1181 if (!event_find_category(event, category))
1182 array_push_back(&event->categories, &category);
1183 break;
1184 }
1185 case EVENT_CODE_TV_LAST_SENT:
1186 if (!event_import_tv(arg, args[1], &event->tv_last_sent,
1187 &error)) {
1188 *error_r = t_strdup_printf(
1189 "Invalid tv_last_sent: %s", error);
1190 return FALSE;
1191 }
1192 args++;
1193 break;
1194 case EVENT_CODE_SENDING_NAME:
1195 i_free(event->sending_name);
1196 event->sending_name = i_strdup(arg);
1197 break;
1198 case EVENT_CODE_SOURCE: {
1199 unsigned int linenum;
1200
1201 if (args[1] == NULL) {
1202 *error_r = "Source line number missing";
1203 return FALSE;
1204 }
1205 if (str_to_uint(args[1], &linenum) < 0) {
1206 *error_r = "Invalid Source line number";
1207 return FALSE;
1208 }
1209 event_set_source(event, arg, linenum, FALSE);
1210 args++;
1211 break;
1212 }
1213
1214 case EVENT_CODE_FIELD_INTMAX:
1215 case EVENT_CODE_FIELD_STR:
1216 case EVENT_CODE_FIELD_TIMEVAL: {
1217 if (*arg == '\0') {
1218 *error_r = "Field name is missing";
1219 return FALSE;
1220 }
1221 struct event_field *field =
1222 event_get_field(event, arg);
1223 if (args[1] == NULL) {
1224 *error_r = "Field value is missing";
1225 return FALSE;
1226 }
1227 args++;
1228 i_zero(&field->value);
1229 switch (code) {
1230 case EVENT_CODE_FIELD_INTMAX:
1231 field->value_type = EVENT_FIELD_VALUE_TYPE_INTMAX;
1232 if (str_to_intmax(*args, &field->value.intmax) < 0) {
1233 *error_r = t_strdup_printf(
1234 "Invalid field value '%s' number for '%s'",
1235 *args, field->key);
1236 return FALSE;
1237 }
1238 break;
1239 case EVENT_CODE_FIELD_STR:
1240 if (field->value_type == EVENT_FIELD_VALUE_TYPE_STR &&
1241 null_strcmp(field->value.str, *args) == 0) {
1242 /* already identical value */
1243 break;
1244 }
1245 field->value_type = EVENT_FIELD_VALUE_TYPE_STR;
1246 field->value.str = p_strdup(event->pool, *args);
1247 break;
1248 case EVENT_CODE_FIELD_TIMEVAL:
1249 field->value_type = EVENT_FIELD_VALUE_TYPE_TIMEVAL;
1250 if (!event_import_tv(args[0], args[1],
1251 &field->value.timeval, &error)) {
1252 *error_r = t_strdup_printf(
1253 "Field '%s' value '%s': %s",
1254 field->key, args[1], error);
1255 return FALSE;
1256 }
1257 args++;
1258 break;
1259 default:
1260 i_unreached();
1261 }
1262 break;
1263 }
1264 }
1265 args++;
1266 }
1267 return TRUE;
1268 }
1269
event_register_callback(event_callback_t * callback)1270 void event_register_callback(event_callback_t *callback)
1271 {
1272 array_push_back(&event_handlers, &callback);
1273 }
1274
event_unregister_callback(event_callback_t * callback)1275 void event_unregister_callback(event_callback_t *callback)
1276 {
1277 event_callback_t *const *callbackp;
1278
1279 array_foreach(&event_handlers, callbackp) {
1280 if (*callbackp == callback) {
1281 unsigned int idx =
1282 array_foreach_idx(&event_handlers, callbackp);
1283 array_delete(&event_handlers, idx, 1);
1284 return;
1285 }
1286 }
1287 i_unreached();
1288 }
1289
event_category_register_callback(event_category_callback_t * callback)1290 void event_category_register_callback(event_category_callback_t *callback)
1291 {
1292 array_push_back(&event_category_callbacks, &callback);
1293 }
1294
event_category_unregister_callback(event_category_callback_t * callback)1295 void event_category_unregister_callback(event_category_callback_t *callback)
1296 {
1297 event_category_callback_t *const *callbackp;
1298
1299 array_foreach(&event_category_callbacks, callbackp) {
1300 if (*callbackp == callback) {
1301 unsigned int idx =
1302 array_foreach_idx(&event_category_callbacks,
1303 callbackp);
1304 array_delete(&event_category_callbacks, idx, 1);
1305 return;
1306 }
1307 }
1308 i_unreached();
1309 }
1310
1311 static struct event_passthrough *
event_passthrough_set_append_log_prefix(const char * prefix)1312 event_passthrough_set_append_log_prefix(const char *prefix)
1313 {
1314 event_set_append_log_prefix(last_passthrough_event(), prefix);
1315 return event_last_passthrough;
1316 }
1317
1318 static struct event_passthrough *
event_passthrough_replace_log_prefix(const char * prefix)1319 event_passthrough_replace_log_prefix(const char *prefix)
1320 {
1321 event_replace_log_prefix(last_passthrough_event(), prefix);
1322 return event_last_passthrough;
1323 }
1324
1325 static struct event_passthrough *
event_passthrough_set_name(const char * name)1326 event_passthrough_set_name(const char *name)
1327 {
1328 event_set_name(last_passthrough_event(), name);
1329 return event_last_passthrough;
1330 }
1331
1332 static struct event_passthrough *
event_passthrough_set_source(const char * filename,unsigned int linenum,bool literal_fname)1333 event_passthrough_set_source(const char *filename,
1334 unsigned int linenum, bool literal_fname)
1335 {
1336 event_set_source(last_passthrough_event(), filename,
1337 linenum, literal_fname);
1338 return event_last_passthrough;
1339 }
1340
1341 static struct event_passthrough *
event_passthrough_set_always_log_source(void)1342 event_passthrough_set_always_log_source(void)
1343 {
1344 event_set_always_log_source(last_passthrough_event());
1345 return event_last_passthrough;
1346 }
1347
1348 static struct event_passthrough *
event_passthrough_add_categories(struct event_category * const * categories)1349 event_passthrough_add_categories(struct event_category *const *categories)
1350 {
1351 event_add_categories(last_passthrough_event(), categories);
1352 return event_last_passthrough;
1353 }
1354
1355 static struct event_passthrough *
event_passthrough_add_category(struct event_category * category)1356 event_passthrough_add_category(struct event_category *category)
1357 {
1358 event_add_category(last_passthrough_event(), category);
1359 return event_last_passthrough;
1360 }
1361
1362 static struct event_passthrough *
event_passthrough_add_fields(const struct event_add_field * fields)1363 event_passthrough_add_fields(const struct event_add_field *fields)
1364 {
1365 event_add_fields(last_passthrough_event(), fields);
1366 return event_last_passthrough;
1367 }
1368
1369 static struct event_passthrough *
event_passthrough_add_str(const char * key,const char * value)1370 event_passthrough_add_str(const char *key, const char *value)
1371 {
1372 event_add_str(last_passthrough_event(), key, value);
1373 return event_last_passthrough;
1374 }
1375
1376 static struct event_passthrough *
event_passthrough_add_int(const char * key,intmax_t num)1377 event_passthrough_add_int(const char *key, intmax_t num)
1378 {
1379 event_add_int(last_passthrough_event(), key, num);
1380 return event_last_passthrough;
1381 }
1382
1383 static struct event_passthrough *
event_passthrough_add_timeval(const char * key,const struct timeval * tv)1384 event_passthrough_add_timeval(const char *key, const struct timeval *tv)
1385 {
1386 event_add_timeval(last_passthrough_event(), key, tv);
1387 return event_last_passthrough;
1388 }
1389
1390 static struct event_passthrough *
event_passthrough_inc_int(const char * key,intmax_t num)1391 event_passthrough_inc_int(const char *key, intmax_t num)
1392 {
1393 event_inc_int(last_passthrough_event(), key, num);
1394 return event_last_passthrough;
1395 }
1396
1397 static struct event_passthrough *
event_passthrough_clear_field(const char * key)1398 event_passthrough_clear_field(const char *key)
1399 {
1400 event_field_clear(last_passthrough_event(), key);
1401 return event_last_passthrough;
1402 }
1403
event_passthrough_event(void)1404 static struct event *event_passthrough_event(void)
1405 {
1406 struct event *event = last_passthrough_event();
1407 event_last_passthrough = NULL;
1408 return event;
1409 }
1410
1411 const struct event_passthrough event_passthrough_vfuncs = {
1412 .append_log_prefix = event_passthrough_set_append_log_prefix,
1413 .replace_log_prefix = event_passthrough_replace_log_prefix,
1414 .set_name = event_passthrough_set_name,
1415 .set_source = event_passthrough_set_source,
1416 .set_always_log_source = event_passthrough_set_always_log_source,
1417 .add_categories = event_passthrough_add_categories,
1418 .add_category = event_passthrough_add_category,
1419 .add_fields = event_passthrough_add_fields,
1420 .add_str = event_passthrough_add_str,
1421 .add_int = event_passthrough_add_int,
1422 .add_timeval = event_passthrough_add_timeval,
1423 .inc_int = event_passthrough_inc_int,
1424 .clear_field = event_passthrough_clear_field,
1425 .event = event_passthrough_event,
1426 };
1427
event_enable_user_cpu_usecs(struct event * event)1428 void event_enable_user_cpu_usecs(struct event *event)
1429 {
1430 get_self_rusage(&event->ru_last);
1431 }
1432
lib_event_init(void)1433 void lib_event_init(void)
1434 {
1435 i_array_init(&event_handlers, 4);
1436 i_array_init(&event_category_callbacks, 4);
1437 i_array_init(&event_registered_categories_internal, 16);
1438 i_array_init(&event_registered_categories_representative, 16);
1439 }
1440
lib_event_deinit(void)1441 void lib_event_deinit(void)
1442 {
1443 struct event_internal_category *internal;
1444
1445 event_unset_global_debug_log_filter();
1446 event_unset_global_debug_send_filter();
1447 event_unset_global_core_log_filter();
1448 for (struct event *event = events; event != NULL; event = event->next) {
1449 i_warning("Event %p leaked (parent=%p): %s:%u",
1450 event, event->parent,
1451 event->source_filename, event->source_linenum);
1452 }
1453 /* categories cannot be unregistered, so just free them here */
1454 array_foreach_elem(&event_registered_categories_internal, internal) {
1455 i_free(internal->name);
1456 i_free(internal);
1457 }
1458 array_free(&event_handlers);
1459 array_free(&event_category_callbacks);
1460 array_free(&event_registered_categories_internal);
1461 array_free(&event_registered_categories_representative);
1462 array_free(&global_event_stack);
1463 }
1464