1 /*
2 * ProFTPD - FTP server daemon
3 * Copyright (c) 1997, 1998 Public Flood Software
4 * Copyright (c) 1999, 2000 MacGyver aka Habeeb J. Dihu <macgyver@tos.net>
5 * Copyright (c) 2001-2020 The ProFTPD Project team
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
20 *
21 * As a special exemption, Public Flood Software/MacGyver aka Habeeb J. Dihu
22 * and other respective copyright holders give permission to link this program
23 * with OpenSSL, and distribute the resulting executable, without including
24 * the source code for OpenSSL in the source distribution.
25 */
26
27 /* Resource allocation code */
28
29 #include "conf.h"
30
31 /* Manage free storage blocks */
32
33 union align {
34 char *cp;
35 void (*f)(void);
36 long l;
37 FILE *fp;
38 double d;
39 };
40
41 #define CLICK_SZ (sizeof(union align))
42
43 union block_hdr {
44 union align a;
45
46 /* Padding */
47 #if defined(_LP64) || defined(__LP64__)
48 char pad[32];
49 #endif
50
51 /* Actual header */
52 struct {
53 void *endp;
54 union block_hdr *next;
55 void *first_avail;
56 } h;
57 };
58
59 static union block_hdr *block_freelist = NULL;
60
61 /* Statistics */
62 static unsigned int stat_malloc = 0; /* incr when malloc required */
63 static unsigned int stat_freehit = 0; /* incr when freelist used */
64
65 #ifdef PR_USE_DEVEL
66 static const char *trace_channel = "pool";
67 #endif /* PR_USE_DEVEL */
68
69 #ifdef PR_USE_DEVEL
70 /* Debug flags */
71 static int debug_flags = 0;
72
oom_printf(const char * fmt,...)73 static void oom_printf(const char *fmt, ...) {
74 char buf[PR_TUNABLE_BUFFER_SIZE];
75 va_list msg;
76
77 memset(buf, '\0', sizeof(buf));
78
79 va_start(msg, fmt);
80 pr_vsnprintf(buf, sizeof(buf), fmt, msg);
81 va_end(msg);
82
83 buf[sizeof(buf)-1] = '\0';
84 fprintf(stderr, "%s\n", buf);
85 }
86 #endif /* PR_USE_DEVEL */
87
88 /* Lowest level memory allocation functions
89 */
90
null_alloc(void)91 static void null_alloc(void) {
92 pr_log_pri(PR_LOG_ALERT, "Out of memory!");
93 #ifdef PR_USE_DEVEL
94 if (debug_flags & PR_POOL_DEBUG_FL_OOM_DUMP_POOLS) {
95 pr_pool_debug_memory(oom_printf);
96 }
97 #endif
98
99 exit(1);
100 }
101
smalloc(size_t size)102 static void *smalloc(size_t size) {
103 void *res;
104
105 if (size == 0) {
106 /* Avoid zero-length malloc(); on non-POSIX systems, the behavior is
107 * not dependable. And on POSIX systems, malloc(3) might still return
108 * a "unique pointer" for a zero-length allocation (or NULL).
109 *
110 * Either way, a zero-length allocation request here means that someone
111 * is doing something they should not be doing.
112 */
113 null_alloc();
114 }
115
116 res = malloc(size);
117 if (res == NULL) {
118 null_alloc();
119 }
120
121 return res;
122 }
123
124 /* Grab a completely new block from the system pool. Relies on malloc()
125 * to return truly aligned memory.
126 */
malloc_block(size_t size)127 static union block_hdr *malloc_block(size_t size) {
128 union block_hdr *blok =
129 (union block_hdr *) smalloc(size + sizeof(union block_hdr));
130
131 blok->h.next = NULL;
132 blok->h.first_avail = (char *) (blok + 1);
133 blok->h.endp = size + (char *) blok->h.first_avail;
134
135 return blok;
136 }
137
chk_on_blk_list(union block_hdr * blok,union block_hdr * free_blk,const char * pool_tag)138 static void chk_on_blk_list(union block_hdr *blok, union block_hdr *free_blk,
139 const char *pool_tag) {
140
141 #ifdef PR_USE_DEVEL
142 /* Debug code */
143
144 while (free_blk) {
145 if (free_blk != blok) {
146 free_blk = free_blk->h.next;
147 continue;
148 }
149
150 pr_log_pri(PR_LOG_WARNING, "fatal: DEBUG: Attempt to free already free "
151 "block in pool '%s'", pool_tag ? pool_tag : "<unnamed>");
152 exit(1);
153 }
154 #endif /* PR_USE_DEVEL */
155 }
156
157 /* Free a chain of blocks -- _must_ call with alarms blocked. */
158
free_blocks(union block_hdr * blok,const char * pool_tag)159 static void free_blocks(union block_hdr *blok, const char *pool_tag) {
160 /* Puts new blocks at head of block list, point next pointer of
161 * last block in chain to free blocks we already had.
162 */
163
164 union block_hdr *old_free_list = block_freelist;
165
166 if (!blok)
167 return; /* Shouldn't be freeing an empty pool */
168
169 block_freelist = blok;
170
171 /* Adjust first_avail pointers */
172
173 while (blok->h.next) {
174 chk_on_blk_list(blok, old_free_list, pool_tag);
175 blok->h.first_avail = (char *) (blok + 1);
176 blok = blok->h.next;
177 }
178
179 chk_on_blk_list(blok, old_free_list, pool_tag);
180 blok->h.first_avail = (char *) (blok + 1);
181 blok->h.next = old_free_list;
182 }
183
184 /* Get a new block, from the free list if possible, otherwise malloc a new
185 * one. minsz is the requested size of the block to be allocated.
186 * If exact is TRUE, then minsz is the exact size of the allocated block;
187 * otherwise, the allocated size will be rounded up from minsz to the nearest
188 * multiple of BLOCK_MINFREE.
189 *
190 * Important: BLOCK ALARMS BEFORE CALLING
191 */
192
new_block(int minsz,int exact)193 static union block_hdr *new_block(int minsz, int exact) {
194 union block_hdr **lastptr = &block_freelist;
195 union block_hdr *blok = block_freelist;
196
197 if (!exact) {
198 minsz = 1 + ((minsz - 1) / BLOCK_MINFREE);
199 minsz *= BLOCK_MINFREE;
200 }
201
202 /* Check if we have anything of the requested size on our free list first...
203 */
204 while (blok) {
205 if (minsz <= ((char *) blok->h.endp - (char *) blok->h.first_avail)) {
206 *lastptr = blok->h.next;
207 blok->h.next = NULL;
208
209 stat_freehit++;
210 return blok;
211 }
212
213 lastptr = &blok->h.next;
214 blok = blok->h.next;
215 }
216
217 /* Nope...damn. Have to malloc() a new one. */
218 stat_malloc++;
219 return malloc_block(minsz);
220 }
221
222 struct cleanup;
223
224 static void run_cleanups(struct cleanup *);
225
226 /* Pool internal and management */
227
228 struct pool_rec {
229 union block_hdr *first;
230 union block_hdr *last;
231 struct cleanup *cleanups;
232 struct pool_rec *sub_pools;
233 struct pool_rec *sub_next;
234 struct pool_rec *sub_prev;
235 struct pool_rec *parent;
236 char *free_first_avail;
237 const char *tag;
238 };
239
240 pool *permanent_pool = NULL;
241 pool *global_config_pool = NULL;
242
243 /* Each pool structure is allocated in the start of it's own first block,
244 * so there is a need to know how many bytes that is (once properly
245 * aligned).
246 */
247
248 #define POOL_HDR_CLICKS (1 + ((sizeof(struct pool_rec) - 1) / CLICK_SZ))
249 #define POOL_HDR_BYTES (POOL_HDR_CLICKS * CLICK_SZ)
250
251 #ifdef PR_USE_DEVEL
252
blocks_in_block_list(union block_hdr * blok)253 static unsigned long blocks_in_block_list(union block_hdr *blok) {
254 unsigned long count = 0;
255
256 while (blok) {
257 count++;
258 blok = blok->h.next;
259 }
260
261 return count;
262 }
263
bytes_in_block_list(union block_hdr * blok)264 static unsigned long bytes_in_block_list(union block_hdr *blok) {
265 unsigned long size = 0;
266
267 while (blok) {
268 size += ((char *) blok->h.endp - (char *) (blok + 1));
269 blok = blok->h.next;
270 }
271
272 return size;
273 }
274
subpools_in_pool(pool * p)275 static unsigned int subpools_in_pool(pool *p) {
276 unsigned int count = 0;
277 pool *iter;
278
279 if (p->sub_pools == NULL)
280 return 0;
281
282 for (iter = p->sub_pools; iter; iter = iter->sub_next) {
283 /* Count one for the current subpool (iter). */
284 count += (subpools_in_pool(iter) + 1);
285 }
286
287 return count;
288 }
289
290 /* Walk all pools, starting with top level permanent pool, displaying a
291 * tree.
292 */
walk_pools(pool * p,unsigned long level,void (* debugf)(const char *,...))293 static long walk_pools(pool *p, unsigned long level,
294 void (*debugf)(const char *, ...)) {
295 char _levelpad[80] = "";
296 long total = 0;
297
298 if (p == NULL) {
299 return 0;
300 }
301
302 if (level > 1) {
303 memset(_levelpad, ' ', sizeof(_levelpad)-1);
304
305 if ((level - 1) * 3 >= sizeof(_levelpad)) {
306 _levelpad[sizeof(_levelpad)-1] = 0;
307
308 } else {
309 _levelpad[(level - 1) * 3] = '\0';
310 }
311 }
312
313 /* The emitted message is:
314 *
315 * <pool-tag> [pool-ptr] (n B, m L, r P)
316 *
317 * where n is the number of bytes (B), m is the number of allocated blocks
318 * in the pool list (L), and r is the number of sub-pools (P).
319 */
320
321 for (; p; p = p->sub_next) {
322 total += bytes_in_block_list(p->first);
323 if (level == 0) {
324 debugf("%s [%p] (%lu B, %lu L, %u P)",
325 p->tag ? p->tag : "<unnamed>", p, bytes_in_block_list(p->first),
326 blocks_in_block_list(p->first), subpools_in_pool(p));
327
328 } else {
329 debugf("%s + %s [%p] (%lu B, %lu L, %u P)", _levelpad,
330 p->tag ? p->tag : "<unnamed>", p, bytes_in_block_list(p->first),
331 blocks_in_block_list(p->first), subpools_in_pool(p));
332 }
333
334 /* Recurse */
335 if (p->sub_pools) {
336 total += walk_pools(p->sub_pools, level+1, debugf);
337 }
338 }
339
340 return total;
341 }
342
debug_pool_info(void (* debugf)(const char *,...))343 static void debug_pool_info(void (*debugf)(const char *, ...)) {
344 if (block_freelist) {
345 debugf("Free block list: %lu bytes",
346 bytes_in_block_list(block_freelist));
347
348 } else {
349 debugf("Free block list: empty");
350 }
351
352 debugf("%u blocks allocated", stat_malloc);
353 debugf("%u blocks reused", stat_freehit);
354 }
355
pool_printf(const char * fmt,...)356 static void pool_printf(const char *fmt, ...) {
357 char buf[PR_TUNABLE_BUFFER_SIZE];
358 va_list msg;
359
360 memset(buf, '\0', sizeof(buf));
361
362 va_start(msg, fmt);
363 pr_vsnprintf(buf, sizeof(buf), fmt, msg);
364 va_end(msg);
365
366 buf[sizeof(buf)-1] = '\0';
367 pr_trace_msg(trace_channel, 5, "%s", buf);
368 }
369
pr_pool_debug_memory(void (* debugf)(const char *,...))370 void pr_pool_debug_memory(void (*debugf)(const char *, ...)) {
371 if (debugf == NULL) {
372 debugf = pool_printf;
373 }
374
375 debugf("Memory pool allocation:");
376 debugf("Total %lu bytes allocated", walk_pools(permanent_pool, 0, debugf));
377 debug_pool_info(debugf);
378 }
379
pr_pool_debug_set_flags(int flags)380 int pr_pool_debug_set_flags(int flags) {
381 if (flags < 0) {
382 errno = EINVAL;
383 return -1;
384 }
385
386 debug_flags = flags;
387 return 0;
388 }
389 #endif /* PR_USE_DEVEL */
390
pr_pool_tag(pool * p,const char * tag)391 void pr_pool_tag(pool *p, const char *tag) {
392 if (p == NULL ||
393 tag == NULL) {
394 return;
395 }
396
397 p->tag = tag;
398 }
399
pr_pool_get_tag(pool * p)400 const char *pr_pool_get_tag(pool *p) {
401 if (p == NULL) {
402 errno = EINVAL;
403 return NULL;
404 }
405
406 return p->tag;
407 }
408
409 /* Release the entire free block list */
pool_release_free_block_list(void)410 static void pool_release_free_block_list(void) {
411 union block_hdr *blok = NULL, *next = NULL;
412
413 pr_alarms_block();
414
415 for (blok = block_freelist; blok; blok = next) {
416 next = blok->h.next;
417 free(blok);
418 }
419 block_freelist = NULL;
420
421 pr_alarms_unblock();
422 }
423
make_sub_pool(struct pool_rec * p)424 struct pool_rec *make_sub_pool(struct pool_rec *p) {
425 union block_hdr *blok;
426 pool *new_pool;
427
428 pr_alarms_block();
429
430 blok = new_block(0, FALSE);
431
432 new_pool = (pool *) blok->h.first_avail;
433 blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
434
435 memset(new_pool, 0, sizeof(struct pool_rec));
436 new_pool->free_first_avail = blok->h.first_avail;
437 new_pool->first = new_pool->last = blok;
438
439 if (p) {
440 new_pool->parent = p;
441 new_pool->sub_next = p->sub_pools;
442
443 if (new_pool->sub_next)
444 new_pool->sub_next->sub_prev = new_pool;
445
446 p->sub_pools = new_pool;
447 }
448
449 pr_alarms_unblock();
450
451 return new_pool;
452 }
453
pr_pool_create_sz(struct pool_rec * p,size_t sz)454 struct pool_rec *pr_pool_create_sz(struct pool_rec *p, size_t sz) {
455 union block_hdr *blok;
456 pool *new_pool;
457
458 pr_alarms_block();
459
460 blok = new_block(sz + POOL_HDR_BYTES, TRUE);
461
462 new_pool = (pool *) blok->h.first_avail;
463 blok->h.first_avail = POOL_HDR_BYTES + (char *) blok->h.first_avail;
464
465 memset(new_pool, 0, sizeof(struct pool_rec));
466 new_pool->free_first_avail = blok->h.first_avail;
467 new_pool->first = new_pool->last = blok;
468
469 if (p) {
470 new_pool->parent = p;
471 new_pool->sub_next = p->sub_pools;
472
473 if (new_pool->sub_next)
474 new_pool->sub_next->sub_prev = new_pool;
475
476 p->sub_pools = new_pool;
477 }
478
479 pr_alarms_unblock();
480
481 return new_pool;
482 }
483
484 /* Initialize the pool system by creating the base permanent_pool. */
485
init_pools(void)486 void init_pools(void) {
487 if (permanent_pool == NULL) {
488 permanent_pool = make_sub_pool(NULL);
489 }
490
491 pr_pool_tag(permanent_pool, "permanent_pool");
492 }
493
free_pools(void)494 void free_pools(void) {
495 destroy_pool(permanent_pool);
496 permanent_pool = NULL;
497 pool_release_free_block_list();
498 }
499
clear_pool(struct pool_rec * p)500 static void clear_pool(struct pool_rec *p) {
501
502 /* Sanity check. */
503 if (p == NULL) {
504 return;
505 }
506
507 pr_alarms_block();
508
509 /* Run through any cleanups. */
510 run_cleanups(p->cleanups);
511 p->cleanups = NULL;
512
513 /* Destroy subpools. */
514 while (p->sub_pools) {
515 destroy_pool(p->sub_pools);
516 }
517
518 p->sub_pools = NULL;
519
520 free_blocks(p->first->h.next, p->tag);
521 p->first->h.next = NULL;
522
523 p->last = p->first;
524 p->first->h.first_avail = p->free_first_avail;
525
526 p->tag = NULL;
527 pr_alarms_unblock();
528 }
529
destroy_pool(pool * p)530 void destroy_pool(pool *p) {
531 if (p == NULL) {
532 return;
533 }
534
535 pr_alarms_block();
536
537 if (p->parent) {
538 if (p->parent->sub_pools == p) {
539 p->parent->sub_pools = p->sub_next;
540 }
541
542 if (p->sub_prev) {
543 p->sub_prev->sub_next = p->sub_next;
544 }
545
546 if (p->sub_next) {
547 p->sub_next->sub_prev = p->sub_prev;
548 }
549 }
550
551 clear_pool(p);
552 free_blocks(p->first, p->tag);
553
554 pr_alarms_unblock();
555
556 #ifdef PR_DEVEL_NO_POOL_FREELIST
557 /* If configured explicitly to do so, call free(3) on the freelist after
558 * a pool is destroyed. This can be useful for tracking down use-after-free
559 * and other memory issues using libraries such as dmalloc.
560 */
561 pool_release_free_block_list();
562 #endif /* PR_EVEL_NO_POOL_FREELIST */
563 }
564
565 /* Allocation interface...
566 */
567
alloc_pool(struct pool_rec * p,size_t reqsz,int exact)568 static void *alloc_pool(struct pool_rec *p, size_t reqsz, int exact) {
569 /* Round up requested size to an even number of aligned units */
570 size_t nclicks = 1 + ((reqsz - 1) / CLICK_SZ);
571 size_t sz = nclicks * CLICK_SZ;
572 union block_hdr *blok;
573 char *first_avail, *new_first_avail;
574
575 /* For performance, see if space is available in the most recently
576 * allocated block.
577 */
578
579 blok = p->last;
580 if (blok == NULL) {
581 errno = EINVAL;
582 return NULL;
583 }
584
585 first_avail = blok->h.first_avail;
586
587 if (reqsz == 0) {
588 /* Don't try to allocate memory of zero length.
589 *
590 * This should NOT happen normally; if it does, by returning NULL we
591 * almost guarantee a null pointer dereference.
592 */
593 errno = EINVAL;
594 return NULL;
595 }
596
597 new_first_avail = first_avail + sz;
598
599 if (new_first_avail <= (char *) blok->h.endp) {
600 blok->h.first_avail = new_first_avail;
601 return (void *) first_avail;
602 }
603
604 /* Need a new one that's big enough */
605 pr_alarms_block();
606
607 blok = new_block(sz, exact);
608 p->last->h.next = blok;
609 p->last = blok;
610
611 first_avail = blok->h.first_avail;
612 blok->h.first_avail = sz + (char *) blok->h.first_avail;
613
614 pr_alarms_unblock();
615 return (void *) first_avail;
616 }
617
palloc(struct pool_rec * p,size_t sz)618 void *palloc(struct pool_rec *p, size_t sz) {
619 return alloc_pool(p, sz, FALSE);
620 }
621
pallocsz(struct pool_rec * p,size_t sz)622 void *pallocsz(struct pool_rec *p, size_t sz) {
623 return alloc_pool(p, sz, TRUE);
624 }
625
pcalloc(struct pool_rec * p,size_t sz)626 void *pcalloc(struct pool_rec *p, size_t sz) {
627 void *res;
628
629 res = palloc(p, sz);
630 memset(res, '\0', sz);
631
632 return res;
633 }
634
pcallocsz(struct pool_rec * p,size_t sz)635 void *pcallocsz(struct pool_rec *p, size_t sz) {
636 void *res;
637
638 res = pallocsz(p, sz);
639 memset(res, '\0', sz);
640
641 return res;
642 }
643
644 /* Array functions */
645
make_array(pool * p,unsigned int nelts,size_t elt_size)646 array_header *make_array(pool *p, unsigned int nelts, size_t elt_size) {
647 array_header *res;
648
649 if (p == NULL ||
650 elt_size == 0) {
651 errno = EINVAL;
652 return NULL;
653 }
654
655 res = palloc(p, sizeof(array_header));
656
657 if (nelts < 1)
658 nelts = 1;
659
660 res->elts = pcalloc(p, nelts * elt_size);
661 res->pool = p;
662 res->elt_size = elt_size;
663 res->nelts = 0;
664 res->nalloc = nelts;
665
666 return res;
667 }
668
clear_array(array_header * arr)669 void clear_array(array_header *arr) {
670 if (arr == NULL) {
671 return;
672 }
673
674 arr->elts = pcalloc(arr->pool, arr->nalloc * arr->elt_size);
675 arr->nelts = 0;
676 }
677
push_array(array_header * arr)678 void *push_array(array_header *arr) {
679 if (arr == NULL) {
680 errno = EINVAL;
681 return NULL;
682 }
683
684 if (arr->nelts == arr->nalloc) {
685 char *new_data = pcalloc(arr->pool, arr->nalloc * arr->elt_size * 2);
686
687 memcpy(new_data, arr->elts, arr->nalloc * arr->elt_size);
688 arr->elts = new_data;
689 arr->nalloc *= 2;
690 }
691
692 ++arr->nelts;
693 return ((char *) arr->elts) + (arr->elt_size * (arr->nelts - 1));
694 }
695
array_cat2(array_header * dst,const array_header * src)696 int array_cat2(array_header *dst, const array_header *src) {
697 size_t elt_size;
698
699 if (dst == NULL ||
700 src == NULL) {
701 errno = EINVAL;
702 return -1;
703 }
704
705 elt_size = dst->elt_size;
706
707 if (dst->nelts + src->nelts > dst->nalloc) {
708 size_t new_size;
709 char *new_data;
710
711 new_size = dst->nalloc * 2;
712 if (new_size == 0) {
713 ++new_size;
714 }
715
716 while ((dst->nelts + src->nelts) > new_size) {
717 new_size *= 2;
718 }
719
720 new_data = pcalloc(dst->pool, elt_size * new_size);
721 memcpy(new_data, dst->elts, dst->nalloc * elt_size);
722
723 dst->elts = new_data;
724 dst->nalloc = new_size;
725 }
726
727 memcpy(((char *) dst->elts) + (dst->nelts * elt_size), (char *) src->elts,
728 elt_size * src->nelts);
729 dst->nelts += src->nelts;
730
731 return 0;
732 }
733
array_cat(array_header * dst,const array_header * src)734 void array_cat(array_header *dst, const array_header *src) {
735 (void) array_cat2(dst, src);
736 }
737
copy_array(pool * p,const array_header * arr)738 array_header *copy_array(pool *p, const array_header *arr) {
739 array_header *res;
740
741 if (p == NULL ||
742 arr == NULL) {
743 errno = EINVAL;
744 return NULL;
745 }
746
747 res = make_array(p, arr->nalloc, arr->elt_size);
748
749 memcpy(res->elts, arr->elts, arr->elt_size * arr->nelts);
750 res->nelts = arr->nelts;
751 return res;
752 }
753
754 /* copy an array that is assumed to consist solely of strings */
copy_array_str(pool * p,const array_header * arr)755 array_header *copy_array_str(pool *p, const array_header *arr) {
756 register unsigned int i;
757 array_header *res;
758
759 if (p == NULL ||
760 arr == NULL) {
761 errno = EINVAL;
762 return NULL;
763 }
764
765 res = copy_array(p, arr);
766
767 for (i = 0; i < arr->nelts; i++)
768 ((char **) res->elts)[i] = pstrdup(p, ((char **) res->elts)[i]);
769
770 return res;
771 }
772
copy_array_hdr(pool * p,const array_header * arr)773 array_header *copy_array_hdr(pool *p, const array_header *arr) {
774 array_header *res;
775
776 if (p == NULL ||
777 arr == NULL) {
778 errno = EINVAL;
779 return NULL;
780 }
781
782 res = palloc(p, sizeof(array_header));
783
784 res->elts = arr->elts;
785 res->pool = p;
786 res->elt_size = arr->elt_size;
787 res->nelts = arr->nelts;
788 res->nalloc = arr->nelts; /* Force overflow on push */
789
790 return res;
791 }
792
append_arrays(pool * p,const array_header * first,const array_header * second)793 array_header *append_arrays(pool *p, const array_header *first,
794 const array_header *second) {
795 array_header *res;
796
797 if (p == NULL ||
798 first == NULL ||
799 second == NULL) {
800 errno = EINVAL;
801 return NULL;
802 }
803
804 res = copy_array_hdr(p, first);
805
806 array_cat(res, second);
807 return res;
808 }
809
810 /* Generic cleanups */
811
812 typedef struct cleanup {
813 void *user_data;
814 void (*cleanup_cb)(void *);
815 struct cleanup *next;
816
817 } cleanup_t;
818
register_cleanup2(pool * p,void * user_data,void (* cleanup_cb)(void *))819 void register_cleanup2(pool *p, void *user_data, void (*cleanup_cb)(void*)) {
820 cleanup_t *c;
821
822 if (p == NULL) {
823 return;
824 }
825
826 c = pcalloc(p, sizeof(cleanup_t));
827 c->user_data = user_data;
828 c->cleanup_cb = cleanup_cb;
829
830 /* Add this cleanup to the given pool's list of cleanups. */
831 c->next = p->cleanups;
832 p->cleanups = c;
833 }
834
register_cleanup(pool * p,void * user_data,void (* plain_cleanup_cb)(void *),void (* child_cleanup_cb)(void *))835 void register_cleanup(pool *p, void *user_data, void (*plain_cleanup_cb)(void*),
836 void (*child_cleanup_cb)(void *)) {
837 (void) child_cleanup_cb;
838 register_cleanup2(p, user_data, plain_cleanup_cb);
839 }
840
unregister_cleanup(pool * p,void * user_data,void (* cleanup_cb)(void *))841 void unregister_cleanup(pool *p, void *user_data, void (*cleanup_cb)(void *)) {
842 cleanup_t *c, **lastp;
843
844 if (p == NULL) {
845 return;
846 }
847
848 c = p->cleanups;
849 lastp = &p->cleanups;
850
851 while (c != NULL) {
852 if (c->user_data == user_data &&
853 (c->cleanup_cb == cleanup_cb || cleanup_cb == NULL)) {
854
855 /* Remove the given cleanup by pointing the previous next pointer to
856 * the matching cleanup's next pointer.
857 */
858 *lastp = c->next;
859 break;
860 }
861
862 lastp = &c->next;
863 c = c->next;
864 }
865 }
866
run_cleanups(cleanup_t * c)867 static void run_cleanups(cleanup_t *c) {
868 while (c != NULL) {
869 if (c->cleanup_cb) {
870 (*c->cleanup_cb)(c->user_data);
871 }
872
873 c = c->next;
874 }
875 }
876