1 /********************************************
2 zmalloc.c
3 copyright 2008-2013,2019, Thomas E. Dickey
4 copyright 1991-1993,1995, Michael D. Brennan
5
6 This is a source file for mawk, an implementation of
7 the AWK programming language.
8
9 Mawk is distributed without warranty under the terms of
10 the GNU General Public License, version 2, 1991.
11 ********************************************/
12
13 /*
14 * $MawkId: zmalloc.c,v 1.31 2019/01/30 01:30:02 tom Exp $
15 */
16
17 /* zmalloc.c */
18 #include "mawk.h"
19 #include "zmalloc.h"
20
21 #if defined(NO_LEAKS) && defined(HAVE_TSEARCH)
22 #define USE_TSEARCH 1
23 #endif
24
25 #ifdef USE_TSEARCH
26 #include <search.h>
27 #endif
28
29 #define ZSHIFT 3
30 #define ZBLOCKSZ BlocksToBytes(1)
31
32 #define BytesToBlocks(size) ((((unsigned)size) + ZBLOCKSZ - 1) >> ZSHIFT)
33 #define BlocksToBytes(size) ((size) << ZSHIFT)
34
35 /*
36 zmalloc() gets mem from malloc() in CHUNKS of 2048 bytes
37 and cuts these blocks into smaller pieces that are multiples
38 of eight bytes. When a piece is returned via zfree(), it goes
39 on a linked linear list indexed by its size. The lists are
40 an array, pool[].
41
42 E.g., if you ask for 22 bytes with p = zmalloc(22), you actually get
43 a piece of size 24. When you free it with zfree(p,22) , it is added
44 to the list at pool[2].
45 */
46
47 #define POOLSZ 16
48
49 #define CHUNK 256
50 /* number of blocks to get from malloc */
51
52 /*****************************************************************************/
53
54 /*
55 * zmalloc() implements a scheme which makes it hard to use memory-checking
56 * tools such as valgrind to audit the program to find where there are leaks.
57 * That is due to its maintaining (for speed...) pools of memory chunks.
58 *
59 * Define DEBUG_ZMALLOC to build mawk with a simpler interface to the system
60 * malloc, which verifies whether the size-parameter passed to zfree() is
61 * consistent with the zmalloc() parameter.
62 *
63 * The NO_LEAKS code relies upon this simpler interface.
64 */
65
66 #if !defined(DEBUG_ZMALLOC) && defined(NO_LEAKS)
67 #define DEBUG_ZMALLOC 1
68 #endif
69
70 #ifdef DEBUG_ZMALLOC
71 #define IsPoolable(blocks) 0
72 #define Malloc(n) calloc(1,n)
73 #else
74 #define IsPoolable(blocks) ((blocks) <= POOLSZ)
75 #define Malloc(n) malloc(n)
76 #endif
77
78 #ifdef USE_TSEARCH
79
80 typedef struct {
81 PTR ptr;
82 size_t size;
83 } PTR_DATA;
84
85 static void *ptr_data;
86
87 #if 0
88 static void
89 show_tsearch(const void *nodep, const VISIT which, const int depth)
90 {
91 const PTR_DATA *p = *(PTR_DATA * const *) nodep;
92 if (which == postorder || which == leaf) {
93 TRACE(("show_data %d:%p ->%p %lu\n", depth, p, p->ptr, p->size));
94 }
95 }
96
97 static void
98 show_ptr_data(void)
99 {
100 twalk(ptr_data, show_tsearch);
101 }
102
103 #define ShowPtrData() show_ptr_data()
104 #else
105 #define ShowPtrData() /* nothing */
106 #endif
107
108 static void
free_ptr_data(void * a)109 free_ptr_data(void *a)
110 {
111 TRACE(("free_ptr_data %p -> %p\n", a, ((PTR_DATA *) (a))->ptr));
112 free(a);
113 }
114
115 static int
compare_ptr_data(const void * a,const void * b)116 compare_ptr_data(const void *a, const void *b)
117 {
118 const PTR_DATA *p = (const PTR_DATA *) a;
119 const PTR_DATA *q = (const PTR_DATA *) b;
120 int result;
121
122 TRACE2(("compare %p->%p %p->%p\n", p, p->ptr, q, q->ptr));
123 if (p->ptr > q->ptr) {
124 result = 1;
125 } else if (p->ptr < q->ptr) {
126 result = -1;
127 } else {
128 result = 0;
129 }
130 return result;
131 }
132
133 static void
record_ptr(PTR ptr,size_t size)134 record_ptr(PTR ptr, size_t size)
135 {
136 PTR_DATA *item;
137 PTR_DATA **result;
138
139 item = malloc(sizeof(PTR_DATA));
140 item->ptr = ptr;
141 item->size = size;
142
143 TRACE(("record_ptr %p -> %p %lu\n", (void *) item, ptr, (unsigned long) size));
144 result = tsearch(item, &ptr_data, compare_ptr_data);
145 assert(result != 0);
146 assert(*result != 0);
147
148 TRACE2(("->%p (%p %lu)\n",
149 (*result), (*result)->ptr,
150 (unsigned long) (*result)->size));
151 ShowPtrData();
152 }
153
154 static void
finish_ptr(PTR ptr,size_t size)155 finish_ptr(PTR ptr, size_t size)
156 {
157 PTR_DATA dummy;
158 PTR_DATA *later;
159 PTR_DATA **item;
160 void *check;
161
162 dummy.ptr = ptr;
163 dummy.size = size;
164
165 TRACE2(("finish_ptr (%p) -> %p %lu\n", &dummy, ptr, (unsigned long) size));
166 item = tfind(&dummy, &ptr_data, compare_ptr_data);
167
168 assert(item != 0);
169 assert(*item != 0);
170
171 TRACE(("finish_ptr %p -> %p %lu\n",
172 (void *) (*item),
173 (*item)->ptr,
174 (unsigned long) (*item)->size));
175
176 later = *item;
177 check = tdelete(&dummy, &ptr_data, compare_ptr_data);
178 if (check) {
179 free(later);
180 }
181
182 ShowPtrData();
183 }
184
185 #define FinishPtr(ptr,size) finish_ptr(ptr,size)
186 #define RecordPtr(ptr,size) record_ptr(ptr,size)
187 #else
188 #define FinishPtr(ptr,size) /* nothing */
189 #define RecordPtr(ptr,size) /* nothing */
190 #endif
191
192 /*****************************************************************************/
193
194 static void
out_of_mem(void)195 out_of_mem(void)
196 {
197 static char out[] = "out of memory";
198
199 if (mawk_state == EXECUTION) {
200 rt_error("%s", out);
201 } else {
202 /* I don't think this will ever happen */
203 compile_error("%s", out);
204 mawk_exit(2);
205 }
206 }
207
208 typedef union zblock {
209 char dummy[ZBLOCKSZ];
210 union zblock *link;
211 } ZBLOCK;
212
213 /* ZBLOCKS of sizes 1, 2, ... 16
214 which is bytes of sizes 8, 16, ... , 128
215 are stored on the linked linear lists in
216 pool[0], pool[1], ... , pool[15]
217 */
218
219 static ZBLOCK *pool[POOLSZ];
220
221 PTR
zmalloc(size_t size)222 zmalloc(size_t size)
223 {
224 unsigned blocks = BytesToBlocks(size);
225 size_t bytes = (size_t) BlocksToBytes(blocks);
226 register ZBLOCK *p;
227 static unsigned amt_avail;
228 static ZBLOCK *avail;
229
230 if (!IsPoolable(blocks)) {
231 p = (ZBLOCK *) Malloc(bytes);
232 if (!p)
233 out_of_mem();
234 RecordPtr(p, size);
235 } else {
236
237 if ((p = pool[blocks - 1]) != 0) {
238 pool[blocks - 1] = p->link;
239 } else {
240
241 if (blocks > amt_avail) {
242 if (amt_avail != 0) /* free avail */
243 {
244 avail->link = pool[--amt_avail];
245 pool[amt_avail] = avail;
246 }
247
248 if (!(avail = (ZBLOCK *) Malloc((size_t) (CHUNK * ZBLOCKSZ)))) {
249 /* if we get here, almost out of memory */
250 amt_avail = 0;
251 p = (ZBLOCK *) Malloc(bytes);
252 if (!p)
253 out_of_mem();
254 RecordPtr(p, bytes);
255 return (PTR) p;
256 } else {
257 RecordPtr(avail, CHUNK * ZBLOCKSZ);
258 amt_avail = CHUNK;
259 }
260 }
261
262 /* get p from the avail pile */
263 p = avail;
264 avail += blocks;
265 amt_avail -= blocks;
266 }
267 }
268 return (PTR) p;
269 }
270
271 void
zfree(PTR p,size_t size)272 zfree(PTR p, size_t size)
273 {
274 unsigned blocks = BytesToBlocks(size);
275
276 if (!IsPoolable(blocks)) {
277 FinishPtr(p, size);
278 free(p);
279 } else {
280 ((ZBLOCK *) p)->link = pool[--blocks];
281 pool[blocks] = (ZBLOCK *) p;
282 }
283 }
284
285 PTR
zrealloc(PTR p,size_t old_size,size_t new_size)286 zrealloc(PTR p, size_t old_size, size_t new_size)
287 {
288 register PTR q;
289
290 TRACE(("zrealloc %p %lu ->%lu\n", p, old_size, new_size));
291 if (new_size > (BlocksToBytes(POOLSZ)) &&
292 old_size > (BlocksToBytes(POOLSZ))) {
293 if (!(q = realloc(p, new_size))) {
294 out_of_mem();
295 }
296 FinishPtr(p, old_size);
297 RecordPtr(q, new_size);
298 #ifdef DEBUG_ZMALLOC
299 if (new_size > old_size) {
300 memset((char *) q + old_size, 0, new_size - old_size);
301 }
302 #endif
303 } else {
304 q = zmalloc(new_size);
305 memcpy(q, p, old_size < new_size ? old_size : new_size);
306 zfree(p, old_size);
307 }
308 return q;
309 }
310
311 #ifdef NO_LEAKS
312 void
zmalloc_leaks(void)313 zmalloc_leaks(void)
314 {
315 #ifdef USE_TSEARCH
316 TRACE(("zmalloc_leaks\n"));
317 while (ptr_data != 0) {
318 PTR_DATA *data = *(PTR_DATA **) ptr_data;
319 tdelete(data, &ptr_data, compare_ptr_data);
320 free_ptr_data(data);
321 }
322 #endif
323 }
324 #endif
325