1 /* PSPP - a program for statistical analysis.
2 Copyright (C) 2000, 2010, 2011, 2012 Free Software Foundation, Inc.
3
4 This program is free software: you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program. If not, see <http://www.gnu.org/licenses/>. */
16
17 #include <config.h>
18
19 #include "libpspp/pool.h"
20
21 #include <stdint.h>
22 #include <stdlib.h>
23
24 #include "libpspp/assertion.h"
25 #include "libpspp/message.h"
26 #include "libpspp/temp-file.h"
27 #include "libpspp/str.h"
28
29 #include "gl/xalloc.h"
30
31 /* Fast, low-overhead memory block suballocator. */
32 struct pool
33 {
34 struct pool *parent; /* Pool of which this pool is a subpool. */
35 struct pool_block *blocks; /* Blocks owned by the pool. */
36 struct pool_gizmo *gizmos; /* Other stuff owned by the pool. */
37 };
38
39 /* Pool block. */
40 struct pool_block
41 {
42 struct pool_block *prev;
43 struct pool_block *next;
44 size_t ofs;
45 };
46
47 /* Gizmo types. */
48 enum
49 {
50 POOL_GIZMO_MALLOC,
51 POOL_GIZMO_FILE,
52 POOL_GIZMO_TEMP_FILE,
53 POOL_GIZMO_SUBPOOL,
54 POOL_GIZMO_REGISTERED,
55 };
56
57 /* Pool routines can maintain objects (`gizmos') as well as doing
58 suballocation.
59 This structure is used to keep track of them. */
60 struct pool_gizmo
61 {
62 struct pool *pool;
63 struct pool_gizmo *prev;
64 struct pool_gizmo *next;
65
66 long serial; /* Serial number. */
67 int type; /* Type of this gizmo. */
68
69 /* Type-dependent info. */
70 union
71 {
72 FILE *file; /* POOL_GIZMO_FILE, POOL_GIZMO_TEMP_FILE. */
73 struct pool *subpool; /* POOL_GIZMO_SUBPOOL. */
74
75 /* POOL_GIZMO_REGISTERED. */
76 struct
77 {
78 void (*free) (void *p);
79 void *p;
80 }
81 registered;
82 }
83 p;
84 };
85
86 /* Rounds X up to the next multiple of Y. */
87 #ifndef ROUND_UP
88 #define ROUND_UP(X, Y) \
89 (((X) + ((Y) - 1)) / (Y) * (Y))
90 #endif
91
92 /* Types that provide typically useful alignment sizes. */
93 union align
94 {
95 void *op;
96 void (*fp) (void);
97 long l;
98 double d;
99
100 /* glibc jmp_buf on ia64 requires 16-byte alignment. This ensures it. */
101 size_t s[2];
102 };
103
104 /* This should be the alignment size used by malloc(). The size of
105 the union above is correct, if not optimal, in all known cases.
106
107 This is normally 8 bytes for 32-bit architectures and 16 bytes for 64-bit
108 architectures. */
109 #define ALIGN_SIZE sizeof (union align)
110
111 /* DISCRETE_BLOCKS may be declared as nonzero to prevent
112 suballocation of blocks. This is useful under memory
113 debuggers like valgrind because it allows the source location
114 of bugs to be more accurately pinpointed.
115
116 On the other hand, if we're testing the library, then we want to
117 test the library's real functionality, not its crippled, slow,
118 simplified functionality. */
119 /*#define DISCRETE_BLOCKS 1*/
120
121 /* Size of each block allocated in the pool, in bytes.
122 Should be at least 1k. */
123 #ifndef BLOCK_SIZE
124 #define BLOCK_SIZE 1024
125 #endif
126
127
128 /* Sizes of some structures with alignment padding included. */
129 #define POOL_BLOCK_SIZE ROUND_UP (sizeof (struct pool_block), ALIGN_SIZE)
130 #define POOL_GIZMO_SIZE ROUND_UP (sizeof (struct pool_gizmo), ALIGN_SIZE)
131 #define POOL_SIZE ROUND_UP (sizeof (struct pool), ALIGN_SIZE)
132
133 /* Serial number used to keep track of gizmos for mark/release. */
134 static long serial = 0;
135
136 /* Prototypes. */
137 static void add_gizmo (struct pool *, struct pool_gizmo *);
138 static void free_gizmo (struct pool_gizmo *);
139 static void free_all_gizmos (struct pool *pool);
140 static void delete_gizmo (struct pool *, struct pool_gizmo *);
141 static void check_gizmo (struct pool *, struct pool_gizmo *);
142
143 /* General routines. */
144
145 /* Creates and returns a new memory pool, which allows malloc()'d
146 blocks to be suballocated in a time- and space-efficient manner.
147 The entire contents of the memory pool are freed at once.
148
149 In addition, other objects can be associated with a memory pool.
150 These are released when the pool is destroyed. */
151 struct pool *
pool_create(void)152 pool_create (void)
153 {
154 struct pool_block *block;
155 struct pool *pool;
156
157 block = xmalloc (BLOCK_SIZE);
158 block->prev = block->next = block;
159 block->ofs = POOL_BLOCK_SIZE + POOL_SIZE;
160
161 pool = (struct pool *) (((char *) block) + POOL_BLOCK_SIZE);
162 pool->parent = NULL;
163 pool->blocks = block;
164 pool->gizmos = NULL;
165
166 return pool;
167 }
168
169 /* Creates a pool, allocates a block STRUCT_SIZE bytes in
170 length from it, stores the pool's address at offset
171 POOL_MEMBER_OFFSET within the block, and returns the allocated
172 block.
173
174 Meant for use indirectly via pool_create_container(). */
175 void *
pool_create_at_offset(size_t struct_size,size_t pool_member_offset)176 pool_create_at_offset (size_t struct_size, size_t pool_member_offset)
177 {
178 struct pool *pool;
179 char *struct_;
180
181 assert (struct_size >= sizeof pool);
182 assert (pool_member_offset <= struct_size - sizeof pool);
183
184 pool = pool_create ();
185 struct_ = pool_alloc (pool, struct_size);
186 *(struct pool **) (struct_ + pool_member_offset) = pool;
187 return struct_;
188 }
189
190 /* Destroy the specified pool, including all subpools. */
191 void
pool_destroy(struct pool * pool)192 pool_destroy (struct pool *pool)
193 {
194 if (pool == NULL)
195 return;
196
197 /* Remove this pool from its parent's list of gizmos. */
198 if (pool->parent)
199 delete_gizmo (pool->parent, (void *) (((char *) pool) + POOL_SIZE));
200
201 free_all_gizmos (pool);
202
203 /* Free all the memory. */
204 {
205 struct pool_block *cur, *next;
206
207 pool->blocks->prev->next = NULL;
208 for (cur = pool->blocks; cur; cur = next)
209 {
210 next = cur->next;
211 free (cur);
212 }
213 }
214 }
215
216 /* Release all the memory and gizmos in POOL.
217 Blocks are not given back with free() but kept for later
218 allocations. To give back memory, use a subpool instead. */
219 void
pool_clear(struct pool * pool)220 pool_clear (struct pool *pool)
221 {
222 free_all_gizmos (pool);
223
224 /* Zero out block sizes. */
225 {
226 struct pool_block *cur;
227
228 cur = pool->blocks;
229 do
230 {
231 cur->ofs = POOL_BLOCK_SIZE;
232 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
233 {
234 cur->ofs += POOL_SIZE;
235 if (pool->parent != NULL)
236 cur->ofs += POOL_GIZMO_SIZE;
237 }
238 cur = cur->next;
239 }
240 while (cur != pool->blocks);
241 }
242 }
243
244 /* Suballocation routines. */
245
246 /* Allocates a memory region AMT bytes in size from POOL and returns a
247 pointer to the region's start.
248 The region is properly aligned for storing any object. */
249 void *
pool_alloc(struct pool * pool,size_t amt)250 pool_alloc (struct pool *pool, size_t amt)
251 {
252 assert (pool != NULL);
253
254 if (amt == 0)
255 return NULL;
256
257 #ifndef DISCRETE_BLOCKS
258 if (amt <= MAX_SUBALLOC)
259 {
260 /* If there is space in this block, take it. */
261 struct pool_block *b = pool->blocks;
262 b->ofs = ROUND_UP (b->ofs, ALIGN_SIZE);
263 if (b->ofs + amt <= BLOCK_SIZE)
264 {
265 void *const p = ((char *) b) + b->ofs;
266 b->ofs += amt;
267 return p;
268 }
269
270 /* No space in this block, so we must make other
271 arrangements. */
272 if (b->next->ofs == 0)
273 {
274 /* The next block is empty. Use it. */
275 b = b->next;
276 b->ofs = POOL_BLOCK_SIZE;
277 if ((char *) b + POOL_BLOCK_SIZE == (char *) pool)
278 b->ofs += POOL_SIZE;
279 }
280 else
281 {
282 /* Create a new block at the start of the list. */
283 b = xmalloc (BLOCK_SIZE);
284 b->next = pool->blocks;
285 b->prev = pool->blocks->prev;
286 b->ofs = POOL_BLOCK_SIZE;
287 pool->blocks->prev->next = b;
288 pool->blocks->prev = b;
289 }
290 pool->blocks = b;
291
292 /* Allocate space from B. */
293 b->ofs += amt;
294 return ((char *) b) + b->ofs - amt;
295 }
296 else
297 #endif
298 return pool_malloc (pool, amt);
299 }
300
301 /* Allocates a memory region AMT bytes in size from POOL and
302 returns a pointer to the region's start. The region is not
303 necessarily aligned, so it is most suitable for storing
304 strings. */
305 void *
pool_alloc_unaligned(struct pool * pool,size_t amt)306 pool_alloc_unaligned (struct pool *pool, size_t amt)
307 {
308 if (pool == NULL)
309 return xmalloc (amt);
310
311 #ifndef DISCRETE_BLOCKS
312 /* Strings need not be aligned on any boundary, but some
313 operations may be more efficient when they are. However,
314 that's only going to help with reasonably long strings. */
315 if (amt < ALIGN_SIZE)
316 {
317 if (amt == 0)
318 return NULL;
319 else
320 {
321 struct pool_block *const b = pool->blocks;
322
323 if (b->ofs + amt <= BLOCK_SIZE)
324 {
325 void *p = ((char *) b) + b->ofs;
326 b->ofs += amt;
327 return p;
328 }
329 }
330 }
331 #endif
332
333 return pool_alloc (pool, amt);
334 }
335
336 /* Allocates a memory region N * S bytes in size from POOL and
337 returns a pointer to the region's start.
338 N must be nonnegative, S must be positive.
339 Terminates the program if the memory cannot be obtained,
340 including the case where N * S overflows the range of size_t. */
341 void *
pool_nalloc(struct pool * pool,size_t n,size_t s)342 pool_nalloc (struct pool *pool, size_t n, size_t s)
343 {
344 if (xalloc_oversized (n, s))
345 xalloc_die ();
346 return pool_alloc (pool, n * s);
347 }
348
349 /* Allocates SIZE bytes in POOL, copies BUFFER into it, and
350 returns the new copy. */
351 void *
pool_clone(struct pool * pool,const void * buffer,size_t size)352 pool_clone (struct pool *pool, const void *buffer, size_t size)
353 {
354 void *block = pool_alloc (pool, size);
355 memcpy (block, buffer, size);
356 return block;
357 }
358
359 /* Allocates SIZE bytes of unaligned data in POOL, copies BUFFER
360 into it, and returns the new copy. */
361 void *
pool_clone_unaligned(struct pool * pool,const void * buffer,size_t size)362 pool_clone_unaligned (struct pool *pool, const void *buffer, size_t size)
363 {
364 void *block = pool_alloc_unaligned (pool, size);
365 memcpy (block, buffer, size);
366 return block;
367 }
368
369 /* Duplicates null-terminated STRING, within POOL, and returns a
370 pointer to the duplicate. For use only with strings, because
371 the returned pointere may not be aligned properly for other
372 types. */
373 char *
pool_strdup(struct pool * pool,const char * string)374 pool_strdup (struct pool *pool, const char *string)
375 {
376 return pool_clone_unaligned (pool, string, strlen (string) + 1);
377 }
378
379 /* Duplicates the SIZE bytes of STRING, plus a trailing 0 byte,
380 and returns a pointer to the duplicate. For use only with
381 strings, because the returned pointere may not be aligned
382 properly for other types. */
383 char *
pool_strdup0(struct pool * pool,const char * string,size_t size)384 pool_strdup0 (struct pool *pool, const char *string, size_t size)
385 {
386 char *new_string = pool_alloc_unaligned (pool, size + 1);
387 memcpy (new_string, string, size);
388 new_string[size] = '\0';
389 return new_string;
390 }
391
392 /* Formats FORMAT with the given ARGS in memory allocated from
393 POOL and returns the formatted string. */
394 char *
pool_vasprintf(struct pool * pool,const char * format,va_list args_)395 pool_vasprintf (struct pool *pool, const char *format, va_list args_)
396 {
397 struct pool_block *b;
398 va_list args;
399 int needed, avail;
400 char *s;
401
402 va_copy (args, args_);
403 b = pool->blocks;
404 avail = BLOCK_SIZE - b->ofs;
405 s = ((char *) b) + b->ofs;
406 needed = vsnprintf (s, avail, format, args);
407 va_end (args);
408
409 if (needed >= 0)
410 {
411 if (needed < avail)
412 {
413 /* Success. Reserve the space that was actually used. */
414 b->ofs += needed + 1;
415 }
416 else
417 {
418 /* Failure, but now we know how much space is needed.
419 Allocate that much and reformat. */
420 s = pool_alloc (pool, needed + 1);
421
422 va_copy (args, args_);
423 vsprintf (s, format, args);
424 va_end (args);
425 }
426 }
427 else
428 {
429 /* Some old libc's returned -1 when the destination string
430 was too short. This should be uncommon these days and
431 it's a rare case anyhow. Use the easiest solution: punt
432 to dynamic allocation. */
433 va_copy (args, args_);
434 s = xvasprintf (format, args);
435 va_end (args);
436
437 pool_register (pool, free, s);
438 }
439
440 return s;
441 }
442
443 /* Formats FORMAT in memory allocated from POOL
444 and returns the formatted string. */
445 char *
pool_asprintf(struct pool * pool,const char * format,...)446 pool_asprintf (struct pool *pool, const char *format, ...)
447 {
448 va_list args;
449 char *string;
450
451 va_start (args, format);
452 string = pool_vasprintf (pool, format, args);
453 va_end (args);
454
455 return string;
456 }
457
458 /* Standard allocation routines. */
459
460 /* Allocates AMT bytes using malloc(), to be managed by POOL, and
461 returns a pointer to the beginning of the block.
462 If POOL is a null pointer, then allocates a normal memory block
463 with xmalloc(). */
464 void *
pool_malloc(struct pool * pool,size_t amt)465 pool_malloc (struct pool *pool, size_t amt)
466 {
467 if (pool != NULL)
468 {
469 if (amt != 0)
470 {
471 struct pool_gizmo *g = xmalloc (amt + POOL_GIZMO_SIZE);
472 g->type = POOL_GIZMO_MALLOC;
473 add_gizmo (pool, g);
474
475 return ((char *) g) + POOL_GIZMO_SIZE;
476 }
477 else
478 return NULL;
479 }
480 else
481 return xmalloc (amt);
482 }
483
484 /* Allocates and returns N elements of S bytes each, to be
485 managed by POOL.
486 If POOL is a null pointer, then allocates a normal memory block
487 with malloc().
488 N must be nonnegative, S must be positive.
489 Terminates the program if the memory cannot be obtained,
490 including the case where N * S overflows the range of size_t. */
491 void *
pool_nmalloc(struct pool * pool,size_t n,size_t s)492 pool_nmalloc (struct pool *pool, size_t n, size_t s)
493 {
494 if (xalloc_oversized (n, s))
495 xalloc_die ();
496 return pool_malloc (pool, n * s);
497 }
498
499 /* Allocates AMT bytes using malloc(), to be managed by POOL,
500 zeros the block, and returns a pointer to the beginning of the
501 block.
502 If POOL is a null pointer, then allocates a normal memory block
503 with xmalloc(). */
504 void *
pool_zalloc(struct pool * pool,size_t amt)505 pool_zalloc (struct pool *pool, size_t amt)
506 {
507 void *p = pool_malloc (pool, amt);
508 memset (p, 0, amt);
509 return p;
510 }
511
512 /* Allocates and returns N elements of S bytes each, to be
513 managed by POOL, and zeros the block.
514 If POOL is a null pointer, then allocates a normal memory block
515 with malloc().
516 N must be nonnegative, S must be positive.
517 Terminates the program if the memory cannot be obtained,
518 including the case where N * S overflows the range of size_t. */
519 void *
pool_calloc(struct pool * pool,size_t n,size_t s)520 pool_calloc (struct pool *pool, size_t n, size_t s)
521 {
522 void *p = pool_nmalloc (pool, n, s);
523 memset (p, 0, n * s);
524 return p;
525 }
526
527 /* Changes the allocation size of the specified memory block P managed
528 by POOL to AMT bytes and returns a pointer to the beginning of the
529 block.
530 If POOL is a null pointer, then the block is reallocated in the
531 usual way with realloc(). */
532 void *
pool_realloc(struct pool * pool,void * p,size_t amt)533 pool_realloc (struct pool *pool, void *p, size_t amt)
534 {
535 if (pool != NULL)
536 {
537 if (p != NULL)
538 {
539 if (amt != 0)
540 {
541 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
542 check_gizmo (pool, g);
543
544 g = xrealloc (g, amt + POOL_GIZMO_SIZE);
545 if (g->next)
546 g->next->prev = g;
547 if (g->prev)
548 g->prev->next = g;
549 else
550 pool->gizmos = g;
551 check_gizmo (pool, g);
552
553 return ((char *) g) + POOL_GIZMO_SIZE;
554 }
555 else
556 {
557 pool_free (pool, p);
558 return NULL;
559 }
560 }
561 else
562 return pool_malloc (pool, amt);
563 }
564 else
565 return xrealloc (p, amt);
566 }
567
568 /* Changes the allocation size of the specified memory block P
569 managed by POOL to N * S bytes and returns a pointer to the
570 beginning of the block.
571 N must be nonnegative, S must be positive.
572 If POOL is a null pointer, then the block is reallocated in
573 the usual way with xrealloc().
574 Terminates the program if the memory cannot be obtained,
575 including the case where N * S overflows the range of size_t. */
576 void *
pool_nrealloc(struct pool * pool,void * p,size_t n,size_t s)577 pool_nrealloc (struct pool *pool, void *p, size_t n, size_t s)
578 {
579 if (xalloc_oversized (n, s))
580 xalloc_die ();
581 return pool_realloc (pool, p, n * s);
582 }
583
584 /* If P is null, allocate a block of at least *PN such objects;
585 otherwise, reallocate P so that it contains more than *PN
586 objects each of S bytes. *PN must be nonzero unless P is
587 null, and S must be nonzero. Set *PN to the new number of
588 objects, and return the pointer to the new block. *PN is
589 never set to zero, and the returned pointer is never null.
590
591 The block returned is managed by POOL. If POOL is a null
592 pointer, then the block is reallocated in the usual way with
593 x2nrealloc().
594
595 Terminates the program if the memory cannot be obtained,
596 including the case where the memory required overflows the
597 range of size_t.
598
599 Repeated reallocations are guaranteed to make progress, either by
600 allocating an initial block with a nonzero size, or by allocating a
601 larger block.
602
603 In the following implementation, nonzero sizes are doubled so that
604 repeated reallocations have O(N log N) overall cost rather than
605 O(N**2) cost, but the specification for this function does not
606 guarantee that sizes are doubled.
607
608 Here is an example of use:
609
610 int *p = NULL;
611 struct pool *pool;
612 size_t used = 0;
613 size_t allocated = 0;
614
615 void
616 append_int (int value)
617 {
618 if (used == allocated)
619 p = pool_2nrealloc (pool, p, &allocated, sizeof *p);
620 p[used++] = value;
621 }
622
623 This causes x2nrealloc to allocate a block of some nonzero size the
624 first time it is called.
625
626 To have finer-grained control over the initial size, set *PN to a
627 nonzero value before calling this function with P == NULL. For
628 example:
629
630 int *p = NULL;
631 struct pool *pool;
632 size_t used = 0;
633 size_t allocated = 0;
634 size_t allocated1 = 1000;
635
636 void
637 append_int (int value)
638 {
639 if (used == allocated)
640 {
641 p = pool_2nrealloc (pool, p, &allocated1, sizeof *p);
642 allocated = allocated1;
643 }
644 p[used++] = value;
645 }
646
647 This function implementation is from gnulib. */
648 void *
pool_2nrealloc(struct pool * pool,void * p,size_t * pn,size_t s)649 pool_2nrealloc (struct pool *pool, void *p, size_t *pn, size_t s)
650 {
651 size_t n = *pn;
652
653 if (p == NULL)
654 {
655 if (n == 0)
656 {
657 /* The approximate size to use for initial small allocation
658 requests, when the invoking code specifies an old size of
659 zero. 64 bytes is the largest "small" request for the
660 GNU C library malloc. */
661 enum { DEFAULT_MXFAST = 64 };
662
663 n = DEFAULT_MXFAST / s;
664 n += !n;
665 }
666 }
667 else
668 {
669 if (SIZE_MAX / 2 / s < n)
670 xalloc_die ();
671 n *= 2;
672 }
673
674 *pn = n;
675 return pool_realloc (pool, p, n * s);
676 }
677
678 /* Frees block P managed by POOL.
679 If POOL is a null pointer, then the block is freed as usual with
680 free(). */
681 void
pool_free(struct pool * pool,void * p)682 pool_free (struct pool *pool, void *p)
683 {
684 if (pool != NULL && p != NULL)
685 {
686 struct pool_gizmo *g = (void *) (((char *) p) - POOL_GIZMO_SIZE);
687 check_gizmo (pool, g);
688 delete_gizmo (pool, g);
689 free (g);
690 }
691 else
692 free (p);
693 }
694
695 /* Gizmo allocations. */
696
697 /* Creates and returns a pool as a subpool of POOL.
698 The subpool will be destroyed automatically when POOL is destroyed.
699 It may also be destroyed explicitly in advance. */
700 struct pool *
pool_create_subpool(struct pool * pool)701 pool_create_subpool (struct pool *pool)
702 {
703 struct pool *subpool;
704 struct pool_gizmo *g;
705
706 assert (pool != NULL);
707 subpool = pool_create ();
708 subpool->parent = pool;
709
710 g = (void *) (((char *) subpool->blocks) + subpool->blocks->ofs);
711 subpool->blocks->ofs += POOL_GIZMO_SIZE;
712
713 g->type = POOL_GIZMO_SUBPOOL;
714 g->p.subpool = subpool;
715
716 add_gizmo (pool, g);
717
718 return subpool;
719 }
720
721 /* Makes SUBPOOL a subpool of POOL.
722 SUBPOOL must not already have a parent pool.
723 The subpool will be destroyed automatically when POOL is destroyed.
724 It may also be destroyed explicitly in advance. */
725 void
pool_add_subpool(struct pool * pool,struct pool * subpool)726 pool_add_subpool (struct pool *pool, struct pool *subpool)
727 {
728 struct pool_gizmo *g;
729
730 assert (pool != NULL);
731 assert (subpool != NULL);
732 assert (subpool->parent == NULL);
733
734 g = pool_alloc (subpool, sizeof *g);
735 g->type = POOL_GIZMO_SUBPOOL;
736 g->p.subpool = subpool;
737 add_gizmo (pool, g);
738
739 subpool->parent = pool;
740 }
741
742 /* Opens file FILE_NAME with mode MODE and returns a handle to it
743 if successful or a null pointer if not.
744 The file will be closed automatically when POOL is destroyed, or it
745 may be closed explicitly in advance using pool_fclose(), or
746 detached from the pool with pool_detach_file(). */
747 FILE *
pool_fopen(struct pool * pool,const char * file_name,const char * mode)748 pool_fopen (struct pool *pool, const char *file_name, const char *mode)
749 {
750 FILE *f;
751
752 assert (pool && file_name && mode);
753 f = fopen (file_name, mode);
754 if (f != NULL)
755 pool_attach_file (pool, f);
756
757 return f;
758 }
759
760 /* Closes file FILE managed by POOL.
761 Returns 0 if successful, EOF if an I/O error occurred. */
762 int
pool_fclose(struct pool * pool,FILE * file)763 pool_fclose (struct pool *pool, FILE *file)
764 {
765 assert (pool && file);
766 pool_detach_file (pool, file);
767 return fclose (file);
768 }
769
770 /* Attaches FILE to POOL.
771 The file will be closed automatically when POOL is destroyed, or it
772 may be closed explicitly in advance using pool_fclose(), or
773 detached from the pool with pool_detach_file(). */
774 void
pool_attach_file(struct pool * pool,FILE * file)775 pool_attach_file (struct pool *pool, FILE *file)
776 {
777 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
778 g->type = POOL_GIZMO_FILE;
779 g->p.file = file;
780 add_gizmo (pool, g);
781 }
782
783 /* Detaches FILE from POOL. */
784 void
pool_detach_file(struct pool * pool,FILE * file)785 pool_detach_file (struct pool *pool, FILE *file)
786 {
787 struct pool_gizmo *g;
788
789 for (g = pool->gizmos; g; g = g->next)
790 if (g->type == POOL_GIZMO_FILE && g->p.file == file)
791 {
792 delete_gizmo (pool, g);
793 return;
794 }
795 }
796
797 /* Creates a temporary file with create_temp_file() and returns a handle to it
798 if successful or a null pointer if not.
799 The file will be closed automatically when POOL is destroyed, or it
800 may be closed explicitly in advance using pool_fclose_temp_file(), or
801 detached from the pool with pool_detach_temp_file(). */
802 FILE *
pool_create_temp_file(struct pool * pool)803 pool_create_temp_file (struct pool *pool)
804 {
805 FILE *file = create_temp_file ();
806 if (file != NULL)
807 pool_attach_temp_file (pool, file);
808 return file;
809 }
810
811 /* Closes file FILE managed by POOL.
812 FILE must have been opened with create_temp_file(). */
813 void
pool_fclose_temp_file(struct pool * pool,FILE * file)814 pool_fclose_temp_file (struct pool *pool, FILE *file)
815 {
816 assert (pool && file);
817 pool_detach_temp_file (pool, file);
818 close_temp_file (file);
819 }
820
821 /* Attaches FILE, which must have been opened with create_temp_file(), to POOL.
822 The file will be closed automatically when POOL is destroyed, or it
823 may be closed explicitly in advance using pool_fclose_temp_file(), or
824 detached from the pool with pool_detach_temp_file(). */
825 void
pool_attach_temp_file(struct pool * pool,FILE * file)826 pool_attach_temp_file (struct pool *pool, FILE *file)
827 {
828 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
829 g->type = POOL_GIZMO_TEMP_FILE;
830 g->p.file = file;
831 add_gizmo (pool, g);
832 }
833
834 /* Detaches FILE that was opened with create_temp_file() from POOL. */
835 void
pool_detach_temp_file(struct pool * pool,FILE * file)836 pool_detach_temp_file (struct pool *pool, FILE *file)
837 {
838 struct pool_gizmo *g;
839
840 for (g = pool->gizmos; g; g = g->next)
841 if (g->type == POOL_GIZMO_TEMP_FILE && g->p.file == file)
842 {
843 delete_gizmo (pool, g);
844 return;
845 }
846 }
847
848 /* Registers FREE to be called with argument P.
849 P should be unique among those registered in POOL so that it can be
850 uniquely identified by pool_unregister().
851 If not unregistered, FREE will be called with argument P when POOL
852 is destroyed. */
853 void
pool_register(struct pool * pool,void (* free)(void *),void * p)854 pool_register (struct pool *pool, void (*free) (void *), void *p)
855 {
856 assert (pool && free && p);
857
858 {
859 struct pool_gizmo *g = pool_alloc (pool, sizeof *g);
860 g->type = POOL_GIZMO_REGISTERED;
861 g->p.registered.free = free;
862 g->p.registered.p = p;
863 add_gizmo (pool, g);
864 }
865 }
866
867 /* Unregisters previously registered P from POOL.
868 Returns true only if P was found to be registered in POOL. */
869 bool
pool_unregister(struct pool * pool,void * p)870 pool_unregister (struct pool *pool, void *p)
871 {
872 assert (pool && p);
873
874 {
875 struct pool_gizmo *g;
876
877 for (g = pool->gizmos; g; g = g->next)
878 if (g->type == POOL_GIZMO_REGISTERED && g->p.registered.p == p)
879 {
880 delete_gizmo (pool, g);
881 return true;
882 }
883 }
884
885 return false;
886 }
887
888 /* Partial freeing. */
889
890 /* Notes the state of POOL into MARK so that it may be restored
891 by a call to pool_release(). */
892 void
pool_mark(struct pool * pool,struct pool_mark * mark)893 pool_mark (struct pool *pool, struct pool_mark *mark)
894 {
895 assert (pool && mark);
896
897 mark->block = pool->blocks;
898 mark->ofs = pool->blocks->ofs;
899
900 mark->serial = serial;
901 }
902
903 /* Restores to POOL the state recorded in MARK.
904 Emptied blocks are not given back with free() but kept for
905 later allocations. To get that behavior, use a subpool
906 instead. */
907 void
pool_release(struct pool * pool,const struct pool_mark * mark)908 pool_release (struct pool *pool, const struct pool_mark *mark)
909 {
910 assert (pool && mark);
911
912 {
913 struct pool_gizmo *cur, *next;
914
915 for (cur = pool->gizmos; cur && cur->serial >= mark->serial; cur = next)
916 {
917 next = cur->next;
918 free_gizmo (cur);
919 }
920
921 if (cur != NULL)
922 {
923 cur->prev = NULL;
924 pool->gizmos = cur;
925 }
926 else
927 pool->gizmos = NULL;
928 }
929
930 {
931 struct pool_block *cur;
932
933 for (cur = pool->blocks; cur != mark->block; cur = cur->next)
934 {
935 cur->ofs = POOL_BLOCK_SIZE;
936 if ((char *) cur + POOL_BLOCK_SIZE == (char *) pool)
937 {
938 cur->ofs += POOL_SIZE;
939 if (pool->parent != NULL)
940 cur->ofs += POOL_GIZMO_SIZE;
941 }
942 }
943 pool->blocks = mark->block;
944 pool->blocks->ofs = mark->ofs;
945 }
946 }
947
948 /* Private functions. */
949
950 /* Adds GIZMO at the beginning of POOL's gizmo list. */
951 static void
add_gizmo(struct pool * pool,struct pool_gizmo * gizmo)952 add_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
953 {
954 assert (pool && gizmo);
955
956 gizmo->pool = pool;
957 gizmo->next = pool->gizmos;
958 gizmo->prev = NULL;
959 if (pool->gizmos)
960 pool->gizmos->prev = gizmo;
961 pool->gizmos = gizmo;
962
963 gizmo->serial = serial++;
964
965 check_gizmo (pool, gizmo);
966 }
967
968 /* Removes GIZMO from POOL's gizmo list. */
969 static void
delete_gizmo(struct pool * pool,struct pool_gizmo * gizmo)970 delete_gizmo (struct pool *pool, struct pool_gizmo *gizmo)
971 {
972 assert (pool && gizmo);
973
974 check_gizmo (pool, gizmo);
975
976 if (gizmo->prev)
977 gizmo->prev->next = gizmo->next;
978 else
979 pool->gizmos = gizmo->next;
980 if (gizmo->next)
981 gizmo->next->prev = gizmo->prev;
982 }
983
984 /* Frees any of GIZMO's internal state.
985 GIZMO's data must not be referenced after calling this function. */
986 static void
free_gizmo(struct pool_gizmo * gizmo)987 free_gizmo (struct pool_gizmo *gizmo)
988 {
989 assert (gizmo != NULL);
990
991 switch (gizmo->type)
992 {
993 case POOL_GIZMO_MALLOC:
994 free (gizmo);
995 break;
996 case POOL_GIZMO_FILE:
997 fclose (gizmo->p.file); /* Ignore errors. */
998 break;
999 case POOL_GIZMO_TEMP_FILE:
1000 close_temp_file (gizmo->p.file); /* Ignore errors. */
1001 break;
1002 case POOL_GIZMO_SUBPOOL:
1003 gizmo->p.subpool->parent = NULL;
1004 pool_destroy (gizmo->p.subpool);
1005 break;
1006 case POOL_GIZMO_REGISTERED:
1007 gizmo->p.registered.free (gizmo->p.registered.p);
1008 break;
1009 default:
1010 NOT_REACHED ();
1011 }
1012 }
1013
1014 /* Free all the gizmos in POOL. */
1015 static void
free_all_gizmos(struct pool * pool)1016 free_all_gizmos (struct pool *pool)
1017 {
1018 struct pool_gizmo *cur, *next;
1019
1020 for (cur = pool->gizmos; cur; cur = next)
1021 {
1022 next = cur->next;
1023 free_gizmo (cur);
1024 }
1025 pool->gizmos = NULL;
1026 }
1027
1028 static void
check_gizmo(struct pool * p,struct pool_gizmo * g)1029 check_gizmo (struct pool *p, struct pool_gizmo *g)
1030 {
1031 assert (g->pool == p);
1032 assert (g->next == NULL || g->next->prev == g);
1033 assert ((g->prev != NULL && g->prev->next == g)
1034 || (g->prev == NULL && p->gizmos == g));
1035
1036 }
1037