1 /* Copyright (c) 2002-2018 Dovecot authors, see the included COPYING file */
2
3 /* @UNSAFE: whole file */
4
5 #include "lib.h"
6 #include "backtrace-string.h"
7 #include "str.h"
8 #include "data-stack.h"
9
10
11 /* Initial stack size - this should be kept in a size that doesn't exceed
12 in a normal use to avoid extra malloc()ing. */
13 #ifdef DEBUG
14 # define INITIAL_STACK_SIZE (1024*10)
15 #else
16 # define INITIAL_STACK_SIZE (1024*32)
17 #endif
18
19 #ifdef DEBUG
20 # define CLEAR_CHR 0xD5 /* D5 is mnemonic for "Data 5tack" */
21 # define SENTRY_COUNT (4*8)
22 # define BLOCK_CANARY ((void *)0xBADBADD5BADBADD5) /* contains 'D5' */
23 # define ALLOC_SIZE(size) (MEM_ALIGN(sizeof(size_t)) + MEM_ALIGN(size + SENTRY_COUNT))
24 #else
25 # define CLEAR_CHR 0
26 # define BLOCK_CANARY NULL
27 # define block_canary_check(block) do { ; } while(0)
28 # define ALLOC_SIZE(size) MEM_ALIGN(size)
29 #endif
30
31 struct stack_block {
32 struct stack_block *prev, *next;
33
34 size_t size, left;
35 #ifdef DEBUG
36 /* The lowest value that "left" has been in this block since it was
37 last popped. This is used to keep track which parts of the block
38 needs to be cleared if DEBUG is used. */
39 size_t left_lowwater;
40 #endif
41 /* NULL or a poison value, just in case something accesses
42 the memory in front of an allocated area */
43 void *canary;
44 unsigned char data[FLEXIBLE_ARRAY_MEMBER];
45 };
46
47 #define SIZEOF_MEMBLOCK MEM_ALIGN(sizeof(struct stack_block))
48
49 #define STACK_BLOCK_DATA(block) \
50 (block->data + (SIZEOF_MEMBLOCK - sizeof(struct stack_block)))
51
52 struct stack_frame {
53 struct stack_frame *prev;
54
55 struct stack_block *block;
56 /* Each frame initializes this to current_block->left, i.e. how much
57 free space is left in the block. So the frame's start position in
58 the block is (block.size - block_space_left) */
59 size_t block_space_left;
60 size_t last_alloc_size;
61 const char *marker;
62 #ifdef DEBUG
63 /* Fairly arbitrary profiling data */
64 unsigned long long alloc_bytes;
65 unsigned int alloc_count;
66 #endif
67 };
68
69 #ifdef STATIC_CHECKER
70 struct data_stack_frame {
71 unsigned int id;
72 };
73 #endif
74
75 unsigned int data_stack_frame_id = 0;
76
77 static bool data_stack_initialized = FALSE;
78 static data_stack_frame_t root_frame_id;
79
80 static struct stack_frame *current_frame;
81
82 /* The latest block currently used for allocation. current_block->next is
83 always NULL. */
84 static struct stack_block *current_block;
85 /* The largest block that data stack has allocated so far, which was already
86 freed. This can prevent rapid malloc()+free()ing when data stack is grown
87 and shrunk constantly. */
88 static struct stack_block *unused_block = NULL;
89
90 static struct event *event_datastack = NULL;
91 static bool event_datastack_deinitialized = FALSE;
92
93 static struct stack_block *last_buffer_block;
94 static size_t last_buffer_size;
95 static bool outofmem = FALSE;
96
97 static union {
98 struct stack_block block;
99 unsigned char data[512];
100 } outofmem_area;
101
102 static struct stack_block *mem_block_alloc(size_t min_size);
103
104 static inline
data_stack_after_last_alloc(struct stack_block * block)105 unsigned char *data_stack_after_last_alloc(struct stack_block *block)
106 {
107 return STACK_BLOCK_DATA(block) + (block->size - block->left);
108 }
109
data_stack_last_buffer_reset(bool preserve_data ATTR_UNUSED)110 static void data_stack_last_buffer_reset(bool preserve_data ATTR_UNUSED)
111 {
112 if (last_buffer_block != NULL) {
113 #ifdef DEBUG
114 unsigned char *last_alloc_end, *p, *pend;
115
116 /* We assume that this function gets called before
117 current_block changes. */
118 i_assert(last_buffer_block == current_block);
119
120 last_alloc_end = data_stack_after_last_alloc(current_block);
121 p = last_alloc_end + MEM_ALIGN(sizeof(size_t)) + last_buffer_size;
122 pend = last_alloc_end + ALLOC_SIZE(last_buffer_size);
123 #endif
124 /* reset t_buffer_get() mark - not really needed but makes it
125 easier to notice if t_malloc()/t_push()/t_pop() is called
126 between t_buffer_get() and t_buffer_alloc().
127 do this before we get to i_panic() to avoid recursive
128 panics. */
129 last_buffer_block = NULL;
130
131 #ifdef DEBUG
132 /* NOTE: If the below panic triggers, it may also be due to an
133 internal bug in data-stack (since this is rather complex). While
134 debugging whether that is the case, it's a good idea to change the
135 i_panic() to abort(). Otherwise the i_panic() changes the
136 data-stack's internal state and complicates debugging. */
137 while (p < pend)
138 if (*p++ != CLEAR_CHR)
139 i_panic("t_buffer_get(): buffer overflow");
140
141 if (!preserve_data) {
142 p = last_alloc_end;
143 memset(p, CLEAR_CHR, SENTRY_COUNT);
144 }
145 #endif
146 }
147 }
148
t_push(const char * marker)149 data_stack_frame_t t_push(const char *marker)
150 {
151 struct stack_frame *frame;
152
153 i_assert(marker != NULL);
154
155 if (unlikely(!data_stack_initialized)) {
156 /* kludgy, but allow this before initialization */
157 data_stack_init();
158 return t_push(marker);
159 }
160
161 /* allocate new block */
162 frame = t_buffer_get(sizeof(*frame));
163 frame->prev = current_frame;
164 current_frame = frame;
165
166 /* mark our current position */
167 current_frame->block = current_block;
168 current_frame->block_space_left = current_block->left;
169 current_frame->last_alloc_size = 0;
170 current_frame->marker = marker;
171 #ifdef DEBUG
172 current_frame->alloc_bytes = 0;
173 current_frame->alloc_count = 0;
174 #endif
175
176 t_buffer_alloc(sizeof(*frame));
177
178 #ifndef STATIC_CHECKER
179 return data_stack_frame_id++;
180 #else
181 struct data_stack_frame *ds_frame = i_new(struct data_stack_frame, 1);
182 ds_frame->id = data_stack_frame_id++;
183 return ds_frame;
184 #endif
185 }
186
t_push_named(const char * format,...)187 data_stack_frame_t t_push_named(const char *format, ...)
188 {
189 data_stack_frame_t ret = t_push(format);
190 #ifdef DEBUG
191 va_list args;
192 va_start(args, format);
193 current_frame->marker = p_strdup_vprintf(unsafe_data_stack_pool, format, args);
194 va_end(args);
195 #else
196 (void)format; /* unused in non-DEBUG builds */
197 #endif
198
199 return ret;
200 }
201
202 #ifdef DEBUG
block_canary_check(struct stack_block * block)203 static void block_canary_check(struct stack_block *block)
204 {
205 if (block->canary != BLOCK_CANARY) {
206 /* Make sure i_panic() won't try to allocate from the
207 same block by falling back onto our emergency block. */
208 current_block = &outofmem_area.block;
209 i_panic("Corrupted data stack canary");
210 }
211 }
212 #endif
213
free_blocks(struct stack_block * block)214 static void free_blocks(struct stack_block *block)
215 {
216 struct stack_block *next;
217
218 /* free all the blocks, except if any of them is bigger than
219 unused_block, replace it */
220 while (block != NULL) {
221 block_canary_check(block);
222 next = block->next;
223
224 #ifdef DEBUG
225 memset(STACK_BLOCK_DATA(block), CLEAR_CHR, block->size);
226 #endif
227
228 if (block == &outofmem_area.block)
229 ;
230 else if (unused_block == NULL ||
231 block->size > unused_block->size) {
232 free(unused_block);
233 unused_block = block;
234 } else {
235 free(block);
236 }
237
238 block = next;
239 }
240 }
241
242 #ifdef DEBUG
t_pop_verify(void)243 static void t_pop_verify(void)
244 {
245 struct stack_block *block;
246 unsigned char *p;
247 size_t pos, max_pos, used_size;
248
249 block = current_frame->block;
250 pos = block->size - current_frame->block_space_left;
251 while (block != NULL) {
252 block_canary_check(block);
253 used_size = block->size - block->left;
254 p = STACK_BLOCK_DATA(block);
255 while (pos < used_size) {
256 size_t requested_size = *(size_t *)(p + pos);
257 if (used_size - pos < requested_size)
258 i_panic("data stack[%s]: saved alloc size broken",
259 current_frame->marker);
260 max_pos = pos + ALLOC_SIZE(requested_size);
261 pos += MEM_ALIGN(sizeof(size_t)) + requested_size;
262
263 for (; pos < max_pos; pos++) {
264 if (p[pos] != CLEAR_CHR)
265 i_panic("data stack[%s]: buffer overflow",
266 current_frame->marker);
267 }
268 }
269
270 /* if we had used t_buffer_get(), the rest of the buffer
271 may not contain CLEAR_CHRs. but we've already checked all
272 the allocations, so there's no need to check them anyway. */
273 block = block->next;
274 pos = 0;
275 }
276 }
277 #endif
278
t_pop_last_unsafe(void)279 void t_pop_last_unsafe(void)
280 {
281 size_t block_space_left;
282
283 if (unlikely(current_frame == NULL))
284 i_panic("t_pop() called with empty stack");
285
286 data_stack_last_buffer_reset(FALSE);
287 #ifdef DEBUG
288 t_pop_verify();
289 #endif
290
291 /* Usually the block doesn't change. If it doesn't, the next pointer
292 must also be NULL. */
293 if (current_block != current_frame->block) {
294 current_block = current_frame->block;
295 if (current_block->next != NULL) {
296 /* free unused blocks */
297 free_blocks(current_block->next);
298 current_block->next = NULL;
299 }
300 }
301 block_canary_check(current_block);
302
303 /* current_frame points inside the stack frame that will be freed.
304 make sure it's not accessed after it's already freed/cleaned. */
305 block_space_left = current_frame->block_space_left;
306 current_frame = current_frame->prev;
307
308 #ifdef DEBUG
309 size_t start_pos, end_pos;
310
311 start_pos = current_block->size - block_space_left;
312 end_pos = current_block->size - current_block->left_lowwater;
313 i_assert(end_pos >= start_pos);
314 memset(STACK_BLOCK_DATA(current_block) + start_pos, CLEAR_CHR,
315 end_pos - start_pos);
316 current_block->left_lowwater = block_space_left;
317 #endif
318
319 current_block->left = block_space_left;
320
321 data_stack_frame_id--;
322 }
323
t_pop(data_stack_frame_t * id)324 bool t_pop(data_stack_frame_t *id)
325 {
326 t_pop_last_unsafe();
327 #ifndef STATIC_CHECKER
328 if (unlikely(data_stack_frame_id != *id))
329 return FALSE;
330 *id = 0;
331 #else
332 unsigned int frame_id = (*id)->id;
333 i_free_and_null(*id);
334
335 if (unlikely(data_stack_frame_id != frame_id))
336 return FALSE;
337 #endif
338 return TRUE;
339 }
340
t_pop_pass_str(data_stack_frame_t * id,const char ** str)341 bool t_pop_pass_str(data_stack_frame_t *id, const char **str)
342 {
343 if (str == NULL || !data_stack_frame_contains(id, *str))
344 return t_pop(id);
345
346 /* FIXME: The string could be memmove()d to the beginning of the
347 data stack frame and the previous frame's size extended past it.
348 This would avoid the malloc. It's a bit complicated though. */
349 char *tmp_str = i_strdup(*str);
350 bool ret = t_pop(id);
351 *str = t_strdup(tmp_str);
352 i_free(tmp_str);
353 return ret;
354 }
355
mem_block_reset(struct stack_block * block)356 static void mem_block_reset(struct stack_block *block)
357 {
358 block->prev = NULL;
359 block->next = NULL;
360 block->left = block->size;
361 #ifdef DEBUG
362 block->left_lowwater = block->size;
363 #endif
364 }
365
mem_block_alloc(size_t min_size)366 static struct stack_block *mem_block_alloc(size_t min_size)
367 {
368 struct stack_block *block;
369 size_t prev_size, alloc_size;
370
371 prev_size = current_block == NULL ? 0 : current_block->size;
372 /* Use INITIAL_STACK_SIZE without growing it to nearest power. */
373 alloc_size = prev_size == 0 ? min_size :
374 nearest_power(MALLOC_ADD(prev_size, min_size));
375
376 /* nearest_power() returns 2^n values, so alloc_size can't be
377 anywhere close to SIZE_MAX */
378 block = malloc(SIZEOF_MEMBLOCK + alloc_size);
379 if (unlikely(block == NULL)) {
380 if (outofmem) {
381 if (min_size > outofmem_area.block.left)
382 abort();
383 return &outofmem_area.block;
384 }
385 outofmem = TRUE;
386 i_panic("data stack: Out of memory when allocating %zu bytes",
387 alloc_size + SIZEOF_MEMBLOCK);
388 }
389 block->size = alloc_size;
390 block->canary = BLOCK_CANARY;
391 mem_block_reset(block);
392 #ifdef DEBUG
393 memset(STACK_BLOCK_DATA(block), CLEAR_CHR, alloc_size);
394 #endif
395 return block;
396 }
397
data_stack_send_grow_event(size_t last_alloc_size)398 static void data_stack_send_grow_event(size_t last_alloc_size)
399 {
400 if (event_datastack_deinitialized) {
401 /* already in the deinitialization code -
402 don't send more events */
403 return;
404 }
405 if (event_datastack == NULL)
406 event_datastack = event_create(NULL);
407 event_set_name(event_datastack, "data_stack_grow");
408 event_add_int(event_datastack, "alloc_size", data_stack_get_alloc_size());
409 event_add_int(event_datastack, "used_size", data_stack_get_used_size());
410 event_add_int(event_datastack, "last_alloc_size", last_alloc_size);
411 event_add_int(event_datastack, "last_block_size", current_block->size);
412 #ifdef DEBUG
413 event_add_int(event_datastack, "frame_alloc_bytes",
414 current_frame->alloc_bytes);
415 event_add_int(event_datastack, "frame_alloc_count",
416 current_frame->alloc_count);
417 #endif
418 event_add_str(event_datastack, "frame_marker", current_frame->marker);
419
420 /* It's possible that the data stack gets grown and shrunk rapidly.
421 Try to avoid doing expensive work if the event isn't even used for
422 anything. Note that at this point all the event fields must be
423 set already that might potentially be used by the filters. */
424 if (!event_want_debug(event_datastack))
425 return;
426
427 /* Getting backtrace is potentially inefficient, so do it after
428 checking if the event is wanted. Note that this prevents using the
429 backtrace field in event field comparisons. */
430 const char *backtrace;
431 if (backtrace_get(&backtrace) == 0)
432 event_add_str(event_datastack, "backtrace", backtrace);
433
434 string_t *str = t_str_new(128);
435 str_printfa(str, "total_used=%zu, total_alloc=%zu, last_alloc_size=%zu",
436 data_stack_get_used_size(),
437 data_stack_get_alloc_size(),
438 last_alloc_size);
439 #ifdef DEBUG
440 str_printfa(str, ", frame_bytes=%llu, frame_alloc_count=%u",
441 current_frame->alloc_bytes, current_frame->alloc_count);
442 #endif
443 e_debug(event_datastack, "Growing data stack by %zu for '%s' (%s)",
444 current_block->size, current_frame->marker,
445 str_c(str));
446 }
447
t_malloc_real(size_t size,bool permanent)448 static void *t_malloc_real(size_t size, bool permanent)
449 {
450 void *ret;
451 size_t alloc_size;
452 bool warn = FALSE;
453 #ifdef DEBUG
454 int old_errno = errno;
455 #endif
456
457 if (unlikely(size == 0 || size > SSIZE_T_MAX))
458 i_panic("Trying to allocate %zu bytes", size);
459
460 if (unlikely(!data_stack_initialized)) {
461 /* kludgy, but allow this before initialization */
462 data_stack_init();
463 }
464 block_canary_check(current_block);
465
466 /* allocate only aligned amount of memory so alignment comes
467 always properly */
468 alloc_size = ALLOC_SIZE(size);
469 #ifdef DEBUG
470 if(permanent) {
471 current_frame->alloc_bytes += alloc_size;
472 current_frame->alloc_count++;
473 }
474 #endif
475 data_stack_last_buffer_reset(TRUE);
476
477 if (permanent) {
478 /* used for t_try_realloc() */
479 current_frame->last_alloc_size = alloc_size;
480 }
481
482 if (current_block->left < alloc_size) {
483 struct stack_block *block;
484
485 /* current block is full, see if we can use the unused_block */
486 if (unused_block != NULL && unused_block->size >= alloc_size) {
487 block = unused_block;
488 unused_block = NULL;
489 mem_block_reset(block);
490 } else {
491 /* current block is full, allocate a new one */
492 block = mem_block_alloc(alloc_size);
493 warn = TRUE;
494 }
495
496 /* The newly allocated block will replace the current_block,
497 i.e. current_block always points to the last element in
498 the linked list. */
499 block->prev = current_block;
500 current_block->next = block;
501 current_block = block;
502 }
503
504 /* enough space in current block, use it */
505 ret = data_stack_after_last_alloc(current_block);
506
507 #ifdef DEBUG
508 if (current_block->left - alloc_size < current_block->left_lowwater)
509 current_block->left_lowwater = current_block->left - alloc_size;
510 #endif
511 if (permanent)
512 current_block->left -= alloc_size;
513
514 if (warn) T_BEGIN {
515 /* sending event can cause errno changes. */
516 #ifdef DEBUG
517 i_assert(errno == old_errno);
518 #else
519 int old_errno = errno;
520 #endif
521 /* warn after allocation, so if e_debug() wants to
522 allocate more memory we don't go to infinite loop */
523 data_stack_send_grow_event(alloc_size);
524 /* reset errno back to what it was */
525 errno = old_errno;
526 } T_END;
527 #ifdef DEBUG
528 memcpy(ret, &size, sizeof(size));
529 ret = PTR_OFFSET(ret, MEM_ALIGN(sizeof(size)));
530 /* make sure the sentry contains CLEAR_CHRs. it might not if we
531 had used t_buffer_get(). */
532 memset(PTR_OFFSET(ret, size), CLEAR_CHR,
533 MEM_ALIGN(size + SENTRY_COUNT) - size);
534
535 /* we rely on errno not changing. it shouldn't. */
536 i_assert(errno == old_errno);
537 #endif
538 return ret;
539 }
540
t_malloc_no0(size_t size)541 void *t_malloc_no0(size_t size)
542 {
543 return t_malloc_real(size, TRUE);
544 }
545
t_malloc0(size_t size)546 void *t_malloc0(size_t size)
547 {
548 void *mem;
549
550 mem = t_malloc_real(size, TRUE);
551 memset(mem, 0, size);
552 return mem;
553 }
554
555 bool ATTR_NO_SANITIZE_INTEGER
t_try_realloc(void * mem,size_t size)556 t_try_realloc(void *mem, size_t size)
557 {
558 size_t debug_adjust = 0, last_alloc_size;
559 unsigned char *after_last_alloc;
560
561 if (unlikely(size == 0 || size > SSIZE_T_MAX))
562 i_panic("Trying to allocate %zu bytes", size);
563 block_canary_check(current_block);
564 data_stack_last_buffer_reset(TRUE);
565
566 last_alloc_size = current_frame->last_alloc_size;
567
568 /* see if we're trying to grow the memory we allocated last */
569 after_last_alloc = data_stack_after_last_alloc(current_block);
570 #ifdef DEBUG
571 debug_adjust = MEM_ALIGN(sizeof(size_t));
572 #endif
573 if (after_last_alloc - last_alloc_size + debug_adjust == mem) {
574 /* yeah, see if we have space to grow */
575 size_t new_alloc_size, alloc_growth;
576
577 new_alloc_size = ALLOC_SIZE(size);
578 alloc_growth = (new_alloc_size - last_alloc_size);
579 #ifdef DEBUG
580 size_t old_raw_size; /* sorry, non-C99 users - add braces if you need them */
581 old_raw_size = *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t)));
582 i_assert(ALLOC_SIZE(old_raw_size) == last_alloc_size);
583 /* Only check one byte for over-run, that catches most
584 offenders who are likely to use t_try_realloc() */
585 i_assert(((unsigned char*)mem)[old_raw_size] == CLEAR_CHR);
586 #endif
587
588 if (current_block->left >= alloc_growth) {
589 /* just shrink the available size */
590 current_block->left -= alloc_growth;
591 current_frame->last_alloc_size = new_alloc_size;
592 #ifdef DEBUG
593 if (current_block->left < current_block->left_lowwater)
594 current_block->left_lowwater = current_block->left;
595 /* All reallocs are permanent by definition
596 However, they don't count as a new allocation */
597 current_frame->alloc_bytes += alloc_growth;
598 *(size_t *)PTR_OFFSET(mem, -(ptrdiff_t)MEM_ALIGN(sizeof(size_t))) = size;
599 memset(PTR_OFFSET(mem, size), CLEAR_CHR,
600 new_alloc_size - size - MEM_ALIGN(sizeof(size_t)));
601 #endif
602 return TRUE;
603 }
604 }
605
606 return FALSE;
607 }
608
t_get_bytes_available(void)609 size_t t_get_bytes_available(void)
610 {
611 block_canary_check(current_block);
612 #ifndef DEBUG
613 const unsigned int min_extra = 0;
614 #else
615 const unsigned int min_extra = SENTRY_COUNT + MEM_ALIGN(sizeof(size_t));
616 #endif
617 if (current_block->left < min_extra)
618 return 0;
619 size_t size = current_block->left - min_extra;
620 i_assert(ALLOC_SIZE(size) == current_block->left);
621 return size;
622 }
623
t_buffer_get(size_t size)624 void *t_buffer_get(size_t size)
625 {
626 void *ret;
627
628 ret = t_malloc_real(size, FALSE);
629
630 last_buffer_size = size;
631 last_buffer_block = current_block;
632 return ret;
633 }
634
t_buffer_reget(void * buffer,size_t size)635 void *t_buffer_reget(void *buffer, size_t size)
636 {
637 size_t old_size;
638 void *new_buffer;
639
640 old_size = last_buffer_size;
641 if (size <= old_size)
642 return buffer;
643
644 new_buffer = t_buffer_get(size);
645 if (new_buffer != buffer)
646 memcpy(new_buffer, buffer, old_size);
647
648 return new_buffer;
649 }
650
t_buffer_alloc(size_t size)651 void t_buffer_alloc(size_t size)
652 {
653 i_assert(last_buffer_block != NULL);
654 i_assert(last_buffer_size >= size);
655 i_assert(current_block->left >= size);
656
657 /* we've already reserved the space, now we just mark it used */
658 (void)t_malloc_real(size, TRUE);
659 }
660
t_buffer_alloc_last_full(void)661 void t_buffer_alloc_last_full(void)
662 {
663 if (last_buffer_block != NULL)
664 (void)t_malloc_real(last_buffer_size, TRUE);
665 }
666
data_stack_frame_contains(data_stack_frame_t * id,const void * _ptr)667 bool data_stack_frame_contains(data_stack_frame_t *id, const void *_ptr)
668 {
669 const unsigned char *block_data, *ptr = _ptr;
670 const struct stack_block *block;
671 unsigned int wanted_frame_id;
672 size_t block_start_pos, block_used;
673
674 /* first handle the fast path - NULL can never be within the frame */
675 if (ptr == NULL)
676 return FALSE;
677
678 #ifndef STATIC_CHECKER
679 wanted_frame_id = *id;
680 #else
681 wanted_frame_id = (*id)->id;
682 #endif
683 /* Too much effort to support more than the latest frame.
684 It's the only thing that is currently needed anyway. */
685 i_assert(wanted_frame_id+1 == data_stack_frame_id);
686 block = current_frame->block;
687 i_assert(block != NULL);
688
689 /* See if it's in the frame's first block. Only the data after
690 block_start_pos belong to this frame. */
691 block_data = STACK_BLOCK_DATA(block);
692 block_start_pos = block->size - current_frame->block_space_left;
693 block_used = block->size - block->left;
694 if (ptr >= block_data + block_start_pos &&
695 ptr <= block_data + block_used)
696 return TRUE;
697
698 /* See if it's in the other blocks. All the data in them belong to
699 this frame. */
700 for (block = block->next; block != NULL; block = block->next) {
701 block_data = STACK_BLOCK_DATA(block);
702 block_used = block->size - block->left;
703 if (ptr >= block_data && ptr < block_data + block_used)
704 return TRUE;
705 }
706 return FALSE;
707 }
708
data_stack_get_alloc_size(void)709 size_t data_stack_get_alloc_size(void)
710 {
711 struct stack_block *block;
712 size_t size = 0;
713
714 i_assert(current_block->next == NULL);
715
716 for (block = current_block; block != NULL; block = block->prev)
717 size += block->size;
718 return size;
719 }
720
data_stack_get_used_size(void)721 size_t data_stack_get_used_size(void)
722 {
723 struct stack_block *block;
724 size_t size = 0;
725
726 i_assert(current_block->next == NULL);
727
728 for (block = current_block; block != NULL; block = block->prev)
729 size += block->size - block->left;
730 return size;
731 }
732
data_stack_free_unused(void)733 void data_stack_free_unused(void)
734 {
735 free(unused_block);
736 unused_block = NULL;
737 }
738
data_stack_init(void)739 void data_stack_init(void)
740 {
741 if (data_stack_initialized) {
742 /* already initialized (we did auto-initialization in
743 t_malloc/t_push) */
744 return;
745 }
746 data_stack_initialized = TRUE;
747 data_stack_frame_id = 1;
748
749 outofmem_area.block.size = outofmem_area.block.left =
750 sizeof(outofmem_area) - sizeof(outofmem_area.block);
751 outofmem_area.block.canary = BLOCK_CANARY;
752
753 current_block = mem_block_alloc(INITIAL_STACK_SIZE);
754 current_frame = NULL;
755
756 last_buffer_block = NULL;
757 last_buffer_size = 0;
758
759 root_frame_id = t_push("data_stack_init");
760 }
761
data_stack_deinit_event(void)762 void data_stack_deinit_event(void)
763 {
764 event_unref(&event_datastack);
765 event_datastack_deinitialized = TRUE;
766 }
767
data_stack_deinit(void)768 void data_stack_deinit(void)
769 {
770 if (!t_pop(&root_frame_id) ||
771 current_frame != NULL)
772 i_panic("Missing t_pop() call");
773
774 free(current_block);
775 current_block = NULL;
776 data_stack_free_unused();
777 }
778