1 /*
2 * mem.c - memory management
3 *
4 * This file is part of zsh, the Z shell.
5 *
6 * Copyright (c) 1992-1997 Paul Falstad
7 * All rights reserved.
8 *
9 * Permission is hereby granted, without written agreement and without
10 * license or royalty fees, to use, copy, modify, and distribute this
11 * software and to distribute modified versions of this software for any
12 * purpose, provided that the above copyright notice and the following
13 * two paragraphs appear in all copies of this software.
14 *
15 * In no event shall Paul Falstad or the Zsh Development Group be liable
16 * to any party for direct, indirect, special, incidental, or consequential
17 * damages arising out of the use of this software and its documentation,
18 * even if Paul Falstad and the Zsh Development Group have been advised of
19 * the possibility of such damage.
20 *
21 * Paul Falstad and the Zsh Development Group specifically disclaim any
22 * warranties, including, but not limited to, the implied warranties of
23 * merchantability and fitness for a particular purpose. The software
24 * provided hereunder is on an "as is" basis, and Paul Falstad and the
25 * Zsh Development Group have no obligation to provide maintenance,
26 * support, updates, enhancements, or modifications.
27 *
28 */
29
30 #include "zsh.mdh"
31 #include "mem.pro"
32
33 /*
34 There are two ways to allocate memory in zsh. The first way is
35 to call zalloc/zshcalloc, which call malloc/calloc directly. It
36 is legal to call realloc() or free() on memory allocated this way.
37 The second way is to call zhalloc/hcalloc, which allocates memory
38 from one of the memory pools on the heap stack. Such memory pools
39 will automatically created when the heap allocation routines are
40 called. To be sure that they are freed at appropriate times
41 one should call pushheap() before one starts using heaps and
42 popheap() after that (when the memory allocated on the heaps since
43 the last pushheap() isn't needed anymore).
44 pushheap() saves the states of all currently allocated heaps and
45 popheap() resets them to the last state saved and destroys the
46 information about that state. If you called pushheap() and
47 allocated some memory on the heaps and then come to a place where
48 you don't need the allocated memory anymore but you still want
49 to allocate memory on the heap, you should call freeheap(). This
50 works like popheap(), only that it doesn't free the information
51 about the heap states (i.e. the heaps are like after the call to
52 pushheap() and you have to call popheap some time later).
53
54 Memory allocated in this way does not have to be freed explicitly;
55 it will all be freed when the pool is destroyed. In fact,
56 attempting to free this memory may result in a core dump.
57
58 If possible, the heaps are allocated using mmap() so that the
59 (*real*) heap isn't filled up with empty zsh heaps. If mmap()
60 is not available and zsh's own allocator is used, we use a simple trick
61 to avoid that: we allocate a large block of memory before allocating
62 a heap pool, this memory is freed again immediately after the pool
63 is allocated. If there are only small blocks on the free list this
64 guarantees that the memory for the pool is at the end of the memory
65 which means that we can give it back to the system when the pool is
66 freed.
67
68 hrealloc(char *p, size_t old, size_t new) is an optimisation
69 with a similar interface to realloc(). Typically the new size
70 will be larger than the old one, since there is no gain in
71 shrinking the allocation (indeed, that will confused hrealloc()
72 since it will forget that the unused space once belonged to this
73 pointer). However, new == 0 is a special case; then if we
74 had to allocate a special heap for this memory it is freed at
75 that point.
76 */
77
78 #if defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP) && defined(HAVE_MUNMAP)
79
80 #include <sys/mman.h>
81
82 /*
83 * This definition is designed to enable use of memory mapping on MacOS.
84 * However, performance tests indicate that MacOS mapped regions are
85 * somewhat slower to allocate than memory from malloc(), so whether
86 * using this improves performance depends on details of zhalloc().
87 */
88 #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
89 #define MAP_ANONYMOUS MAP_ANON
90 #endif
91
92 #if defined(MAP_ANONYMOUS) && defined(MAP_PRIVATE)
93
94 #define USE_MMAP 1
95 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE)
96
97 #endif
98 #endif
99
100 #ifdef ZSH_MEM_WARNING
101 # ifndef DEBUG
102 # define DEBUG 1
103 # endif
104 #endif
105
106 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
107
108 static int h_m[1025], h_push, h_pop, h_free;
109
110 #endif
111
112 /* Make sure we align to the longest fundamental type. */
113 union mem_align {
114 zlong l;
115 double d;
116 };
117
118 #define H_ISIZE sizeof(union mem_align)
119 #define HEAPSIZE (16384 - H_ISIZE)
120 /* Memory available for user data in default arena size */
121 #define HEAP_ARENA_SIZE (HEAPSIZE - sizeof(struct heap))
122 #define HEAPFREE (16384 - H_ISIZE)
123
124 /* Memory available for user data in heap h */
125 #define ARENA_SIZEOF(h) ((h)->size - sizeof(struct heap))
126
127 /* list of zsh heaps */
128
129 static Heap heaps;
130
131 /* a heap with free space, not always correct (it will be the last heap
132 * if that was newly allocated but it may also be another one) */
133
134 static Heap fheap;
135
136 /**/
137 #ifdef ZSH_HEAP_DEBUG
138 /*
139 * The heap ID we'll allocate next.
140 *
141 * We'll avoid using 0 as that means zero-initialised memory
142 * containing a heap ID is (correctly) marked as invalid.
143 */
144 static Heapid next_heap_id = (Heapid)1;
145
146 /*
147 * The ID of the heap from which we last allocated heap memory.
148 * In theory, since we carefully avoid allocating heap memory during
149 * interrupts, after any call to zhalloc() or wrappers this should
150 * be the ID of the heap containing the memory just returned.
151 */
152 /**/
153 mod_export Heapid last_heap_id;
154
155 /*
156 * Stack of heaps saved by new_heaps().
157 * Assumes old_heaps() will come along and restore it later
158 * (outputs an error if old_heaps() is called out of sequence).
159 */
160 static LinkList heaps_saved;
161
162 /*
163 * Debugging verbosity. This must be set from a debugger.
164 * An 'or' of bits from the enum heap_debug_verbosity.
165 */
166 static volatile int heap_debug_verbosity;
167
168 /*
169 * Generate a heap identifier that's unique up to unsigned integer wrap.
170 *
171 * For the purposes of debugging we won't bother trying to make a
172 * heap_id globally unique, which would require checking all existing
173 * heaps every time we create an ID and still wouldn't do what we
174 * ideally want, which is to make sure the IDs of valid heaps are
175 * different from the IDs of no-longer-valid heaps. Given that,
176 * we'll just assume that if we haven't tracked the problem when the
177 * ID wraps we're out of luck. We could change the type to a long long
178 * if we wanted more room
179 */
180
181 static Heapid
new_heap_id(void)182 new_heap_id(void)
183 {
184 return next_heap_id++;
185 }
186
187 /**/
188 #endif
189
190 /* Use new heaps from now on. This returns the old heap-list. */
191
192 /**/
193 mod_export Heap
new_heaps(void)194 new_heaps(void)
195 {
196 Heap h;
197
198 queue_signals();
199 h = heaps;
200
201 fheap = heaps = NULL;
202 unqueue_signals();
203
204 #ifdef ZSH_HEAP_DEBUG
205 if (heap_debug_verbosity & HDV_NEW) {
206 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
207 " saved, new heaps created.\n", h->heap_id);
208 }
209 if (!heaps_saved)
210 heaps_saved = znewlinklist();
211 zpushnode(heaps_saved, h);
212 #endif
213 return h;
214 }
215
216 /* Re-install the old heaps again, freeing the new ones. */
217
218 /**/
219 mod_export void
old_heaps(Heap old)220 old_heaps(Heap old)
221 {
222 Heap h, n;
223
224 queue_signals();
225 for (h = heaps; h; h = n) {
226 n = h->next;
227 DPUTS(h->sp, "BUG: old_heaps() with pushed heaps");
228 #ifdef ZSH_HEAP_DEBUG
229 if (heap_debug_verbosity & HDV_FREE) {
230 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
231 "freed in old_heaps().\n", h->heap_id);
232 }
233 #endif
234 #ifdef USE_MMAP
235 munmap((void *) h, h->size);
236 #else
237 zfree(h, HEAPSIZE);
238 #endif
239 #ifdef ZSH_VALGRIND
240 VALGRIND_DESTROY_MEMPOOL((char *)h);
241 #endif
242 }
243 heaps = old;
244 #ifdef ZSH_HEAP_DEBUG
245 if (heap_debug_verbosity & HDV_OLD) {
246 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
247 "restored.\n", heaps->heap_id);
248 }
249 {
250 Heap myold = heaps_saved ? getlinknode(heaps_saved) : NULL;
251 if (old != myold)
252 {
253 fprintf(stderr, "HEAP DEBUG: invalid old heap " HEAPID_FMT
254 ", expecting " HEAPID_FMT ".\n", old->heap_id,
255 myold->heap_id);
256 }
257 }
258 #endif
259 fheap = NULL;
260 unqueue_signals();
261 }
262
263 /* Temporarily switch to other heaps (or back again). */
264
265 /**/
266 mod_export Heap
switch_heaps(Heap new)267 switch_heaps(Heap new)
268 {
269 Heap h;
270
271 queue_signals();
272 h = heaps;
273
274 #ifdef ZSH_HEAP_DEBUG
275 if (heap_debug_verbosity & HDV_SWITCH) {
276 fprintf(stderr, "HEAP DEBUG: heap temporarily switched from "
277 HEAPID_FMT " to " HEAPID_FMT ".\n", h->heap_id, new->heap_id);
278 }
279 #endif
280 heaps = new;
281 fheap = NULL;
282 unqueue_signals();
283
284 return h;
285 }
286
287 /* save states of zsh heaps */
288
289 /**/
290 mod_export void
pushheap(void)291 pushheap(void)
292 {
293 Heap h;
294 Heapstack hs;
295
296 queue_signals();
297
298 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
299 h_push++;
300 #endif
301
302 for (h = heaps; h; h = h->next) {
303 DPUTS(!h->used && h->next, "BUG: empty heap");
304 hs = (Heapstack) zalloc(sizeof(*hs));
305 hs->next = h->sp;
306 h->sp = hs;
307 hs->used = h->used;
308 #ifdef ZSH_HEAP_DEBUG
309 hs->heap_id = h->heap_id;
310 h->heap_id = new_heap_id();
311 if (heap_debug_verbosity & HDV_PUSH) {
312 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT " pushed, new id is "
313 HEAPID_FMT ".\n",
314 hs->heap_id, h->heap_id);
315 }
316 #endif
317 }
318 unqueue_signals();
319 }
320
321 /* reset heaps to previous state */
322
323 /**/
324 mod_export void
freeheap(void)325 freeheap(void)
326 {
327 Heap h, hn, hl = NULL;
328
329 queue_signals();
330
331 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
332 h_free++;
333 #endif
334
335 /*
336 * When pushheap() is called, it sweeps over the entire heaps list of
337 * arenas and marks every one of them with the amount of free space in
338 * that arena at that moment. zhalloc() is then allowed to grab bits
339 * out of any of those arenas that have free space.
340 *
341 * Whenever fheap is NULL here, the loop below sweeps back over the
342 * entire heap list again, resetting the free space in every arena to
343 * the amount stashed by pushheap() and finding the arena with the most
344 * free space to optimize zhalloc()'s next search. When there's a lot
345 * of stuff already on the heap, this is an enormous amount of work,
346 * and performance goes to hell.
347 *
348 * Therefore, we defer freeing the most recently allocated arena until
349 * we reach popheap().
350 *
351 * However, if the arena to which fheap points is unused, we want to
352 * reclaim space in earlier arenas, so we have no choice but to do the
353 * sweep for a new fheap.
354 */
355 if (fheap && !fheap->sp)
356 fheap = NULL; /* We used to do this unconditionally */
357 /*
358 * In other cases, either fheap is already correct, or it has never
359 * been set and this loop will do it, or it'll be reset from scratch
360 * on the next popheap(). So all that's needed here is to pick up
361 * the scan wherever the last pass [or the last popheap()] left off.
362 */
363 for (h = (fheap ? fheap : heaps); h; h = hn) {
364 hn = h->next;
365 if (h->sp) {
366 #ifdef ZSH_MEM_DEBUG
367 #ifdef ZSH_VALGRIND
368 VALGRIND_MAKE_MEM_UNDEFINED((char *)arena(h) + h->sp->used,
369 h->used - h->sp->used);
370 #endif
371 memset(arena(h) + h->sp->used, 0xff, h->used - h->sp->used);
372 #endif
373 h->used = h->sp->used;
374 if (!fheap) {
375 if (h->used < ARENA_SIZEOF(h))
376 fheap = h;
377 } else if (ARENA_SIZEOF(h) - h->used >
378 ARENA_SIZEOF(fheap) - fheap->used)
379 fheap = h;
380 hl = h;
381 #ifdef ZSH_HEAP_DEBUG
382 /*
383 * As the free makes the heap invalid, give it a new
384 * identifier. We're not popping it, so don't use
385 * the one in the heap stack.
386 */
387 {
388 Heapid new_id = new_heap_id();
389 if (heap_debug_verbosity & HDV_FREE) {
390 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
391 " freed, new id is " HEAPID_FMT ".\n",
392 h->heap_id, new_id);
393 }
394 h->heap_id = new_id;
395 }
396 #endif
397 #ifdef ZSH_VALGRIND
398 VALGRIND_MEMPOOL_TRIM((char *)h, (char *)arena(h), h->used);
399 #endif
400 } else {
401 if (fheap == h)
402 fheap = NULL;
403 if (h->next) {
404 /* We want to cut this out of the arena list if we can */
405 if (h == heaps)
406 hl = heaps = h->next;
407 else if (hl && hl->next == h)
408 hl->next = h->next;
409 else {
410 DPUTS(hl, "hl->next != h when freeing");
411 hl = h;
412 continue;
413 }
414 h->next = NULL;
415 } else {
416 /* Leave an empty arena at the end until popped */
417 h->used = 0;
418 fheap = hl = h;
419 break;
420 }
421 #ifdef USE_MMAP
422 munmap((void *) h, h->size);
423 #else
424 zfree(h, HEAPSIZE);
425 #endif
426 #ifdef ZSH_VALGRIND
427 VALGRIND_DESTROY_MEMPOOL((char *)h);
428 #endif
429 }
430 }
431 if (hl)
432 hl->next = NULL;
433 else
434 heaps = fheap = NULL;
435
436 unqueue_signals();
437 }
438
439 /* reset heap to previous state and destroy state information */
440
441 /**/
442 mod_export void
popheap(void)443 popheap(void)
444 {
445 Heap h, hn, hl = NULL;
446 Heapstack hs;
447
448 queue_signals();
449
450 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
451 h_pop++;
452 #endif
453
454 fheap = NULL;
455 for (h = heaps; h; h = hn) {
456 hn = h->next;
457 if ((hs = h->sp)) {
458 h->sp = hs->next;
459 #ifdef ZSH_MEM_DEBUG
460 #ifdef ZSH_VALGRIND
461 VALGRIND_MAKE_MEM_UNDEFINED((char *)arena(h) + hs->used,
462 h->used - hs->used);
463 #endif
464 memset(arena(h) + hs->used, 0xff, h->used - hs->used);
465 #endif
466 h->used = hs->used;
467 #ifdef ZSH_HEAP_DEBUG
468 if (heap_debug_verbosity & HDV_POP) {
469 fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
470 " popped, old heap was " HEAPID_FMT ".\n",
471 h->heap_id, hs->heap_id);
472 }
473 h->heap_id = hs->heap_id;
474 #endif
475 #ifdef ZSH_VALGRIND
476 VALGRIND_MEMPOOL_TRIM((char *)h, (char *)arena(h), h->used);
477 #endif
478 if (!fheap) {
479 if (h->used < ARENA_SIZEOF(h))
480 fheap = h;
481 } else if (ARENA_SIZEOF(h) - h->used >
482 ARENA_SIZEOF(fheap) - fheap->used)
483 fheap = h;
484 zfree(hs, sizeof(*hs));
485
486 hl = h;
487 } else {
488 if (h->next) {
489 /* We want to cut this out of the arena list if we can */
490 if (h == heaps)
491 hl = heaps = h->next;
492 else if (hl && hl->next == h)
493 hl->next = h->next;
494 else {
495 DPUTS(hl, "hl->next != h when popping");
496 hl = h;
497 continue;
498 }
499 h->next = NULL;
500 } else if (hl == h) /* This is the last arena of all */
501 hl = NULL;
502 #ifdef USE_MMAP
503 munmap((void *) h, h->size);
504 #else
505 zfree(h, HEAPSIZE);
506 #endif
507 #ifdef ZSH_VALGRIND
508 VALGRIND_DESTROY_MEMPOOL((char *)h);
509 #endif
510 }
511 }
512 if (hl)
513 hl->next = NULL;
514 else
515 heaps = NULL;
516
517 unqueue_signals();
518 }
519
520 #ifdef USE_MMAP
521 /*
522 * Utility function to allocate a heap area of at least *n bytes.
523 * *n will be rounded up to the next page boundary.
524 */
525 static Heap
mmap_heap_alloc(size_t * n)526 mmap_heap_alloc(size_t *n)
527 {
528 Heap h;
529 static size_t pgsz = 0;
530
531 if (!pgsz) {
532
533 #ifdef _SC_PAGESIZE
534 pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */
535 #else
536 # ifdef _SC_PAGE_SIZE
537 pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */
538 # else
539 pgsz = getpagesize();
540 # endif
541 #endif
542
543 pgsz--;
544 }
545 *n = (*n + pgsz) & ~pgsz;
546 h = (Heap) mmap(NULL, *n, PROT_READ | PROT_WRITE,
547 MMAP_FLAGS, -1, 0);
548 if (h == ((Heap) -1)) {
549 zerr("fatal error: out of heap memory");
550 exit(1);
551 }
552
553 return h;
554 }
555 #endif
556
557 /* check whether a pointer is within a memory pool */
558
559 /**/
560 mod_export void *
zheapptr(void * p)561 zheapptr(void *p)
562 {
563 Heap h;
564 queue_signals();
565 for (h = heaps; h; h = h->next)
566 if ((char *)p >= arena(h) &&
567 (char *)p + H_ISIZE < arena(h) + ARENA_SIZEOF(h))
568 break;
569 unqueue_signals();
570 return (h ? p : 0);
571 }
572
573 /* allocate memory from the current memory pool */
574
575 /**/
576 mod_export void *
zhalloc(size_t size)577 zhalloc(size_t size)
578 {
579 Heap h, hp = NULL;
580 size_t n;
581 #ifdef ZSH_VALGRIND
582 size_t req_size = size;
583
584 if (size == 0)
585 return NULL;
586 #endif
587
588 size = (size + H_ISIZE - 1) & ~(H_ISIZE - 1);
589
590 queue_signals();
591
592 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
593 h_m[size < (1024 * H_ISIZE) ? (size / H_ISIZE) : 1024]++;
594 #endif
595
596 /* find a heap with enough free space */
597
598 /*
599 * This previously assigned:
600 * h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used))
601 * ? fheap : heaps);
602 * but we think that nothing upstream of fheap has more free space,
603 * so why start over at heaps just because fheap has too little?
604 */
605 for (h = (fheap ? fheap : heaps); h; h = h->next) {
606 hp = h;
607 if (ARENA_SIZEOF(h) >= (n = size + h->used)) {
608 void *ret;
609
610 h->used = n;
611 ret = arena(h) + n - size;
612 unqueue_signals();
613 #ifdef ZSH_HEAP_DEBUG
614 last_heap_id = h->heap_id;
615 if (heap_debug_verbosity & HDV_ALLOC) {
616 fprintf(stderr, "HEAP DEBUG: allocated memory from heap "
617 HEAPID_FMT ".\n", h->heap_id);
618 }
619 #endif
620 #ifdef ZSH_VALGRIND
621 VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)ret, req_size);
622 #endif
623 return ret;
624 }
625 }
626 {
627 /* not found, allocate new heap */
628 #if defined(ZSH_MEM) && !defined(USE_MMAP)
629 static int called = 0;
630 void *foo = called ? (void *)malloc(HEAPFREE) : NULL;
631 /* tricky, see above */
632 #endif
633
634 n = HEAP_ARENA_SIZE > size ? HEAPSIZE : size + sizeof(*h);
635
636 #ifdef USE_MMAP
637 h = mmap_heap_alloc(&n);
638 #else
639 h = (Heap) zalloc(n);
640 #endif
641
642 #if defined(ZSH_MEM) && !defined(USE_MMAP)
643 if (called)
644 zfree(foo, HEAPFREE);
645 called = 1;
646 #endif
647
648 h->size = n;
649 h->used = size;
650 h->next = NULL;
651 h->sp = NULL;
652 #ifdef ZSH_HEAP_DEBUG
653 h->heap_id = new_heap_id();
654 if (heap_debug_verbosity & HDV_CREATE) {
655 fprintf(stderr, "HEAP DEBUG: create new heap " HEAPID_FMT ".\n",
656 h->heap_id);
657 }
658 #endif
659 #ifdef ZSH_VALGRIND
660 VALGRIND_CREATE_MEMPOOL((char *)h, 0, 0);
661 VALGRIND_MAKE_MEM_NOACCESS((char *)arena(h),
662 n - ((char *)arena(h)-(char *)h));
663 VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)arena(h), req_size);
664 #endif
665
666 DPUTS(hp && hp->next, "failed to find end of chain in zhalloc");
667 if (hp)
668 hp->next = h;
669 else
670 heaps = h;
671 fheap = h;
672
673 unqueue_signals();
674 #ifdef ZSH_HEAP_DEBUG
675 last_heap_id = h->heap_id;
676 if (heap_debug_verbosity & HDV_ALLOC) {
677 fprintf(stderr, "HEAP DEBUG: allocated memory from heap "
678 HEAPID_FMT ".\n", h->heap_id);
679 }
680 #endif
681 return arena(h);
682 }
683 }
684
685 /**/
686 mod_export void *
hrealloc(char * p,size_t old,size_t new)687 hrealloc(char *p, size_t old, size_t new)
688 {
689 Heap h, ph;
690
691 #ifdef ZSH_VALGRIND
692 size_t new_req = new;
693 #endif
694
695 old = (old + H_ISIZE - 1) & ~(H_ISIZE - 1);
696 new = (new + H_ISIZE - 1) & ~(H_ISIZE - 1);
697
698 if (old == new)
699 return p;
700 if (!old && !p)
701 #ifdef ZSH_VALGRIND
702 return zhalloc(new_req);
703 #else
704 return zhalloc(new);
705 #endif
706
707 /* find the heap with p */
708
709 queue_signals();
710 for (h = heaps, ph = NULL; h; ph = h, h = h->next)
711 if (p >= arena(h) && p < arena(h) + ARENA_SIZEOF(h))
712 break;
713
714 DPUTS(!h, "BUG: hrealloc() called for non-heap memory.");
715 DPUTS(h->sp && arena(h) + h->sp->used > p,
716 "BUG: hrealloc() wants to realloc pushed memory");
717
718 /*
719 * If the end of the old chunk is before the used pointer,
720 * more memory has been zhalloc'ed afterwards.
721 * We can't tell if that's still in use, obviously, since
722 * that's the whole point of heap memory.
723 * We have no choice other than to grab some more memory
724 * somewhere else and copy in the old stuff.
725 */
726 if (p + old < arena(h) + h->used) {
727 if (new > old) {
728 #ifdef ZSH_VALGRIND
729 char *ptr = (char *) zhalloc(new_req);
730 #else
731 char *ptr = (char *) zhalloc(new);
732 #endif
733 memcpy(ptr, p, old);
734 #ifdef ZSH_MEM_DEBUG
735 memset(p, 0xff, old);
736 #endif
737 #ifdef ZSH_VALGRIND
738 VALGRIND_MEMPOOL_FREE((char *)h, (char *)p);
739 /*
740 * zhalloc() marked h,ptr,new as an allocation so we don't
741 * need to do that here.
742 */
743 #endif
744 unqueue_signals();
745 return ptr;
746 } else {
747 #ifdef ZSH_VALGRIND
748 VALGRIND_MEMPOOL_FREE((char *)h, (char *)p);
749 if (p) {
750 VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)p,
751 new_req);
752 VALGRIND_MAKE_MEM_DEFINED((char *)h, (char *)p);
753 }
754 #endif
755 unqueue_signals();
756 return new ? p : NULL;
757 }
758 }
759
760 DPUTS(p + old != arena(h) + h->used, "BUG: hrealloc more than allocated");
761
762 /*
763 * We now know there's nothing afterwards in the heap, now see if
764 * there's nothing before. Then we can reallocate the whole thing.
765 * Otherwise, we need to keep the stuff at the start of the heap,
766 * then allocate a new one too; this is handled below. (This will
767 * guarantee we occupy a full heap next time round, provided we
768 * don't use the heap for anything else.)
769 */
770 if (p == arena(h)) {
771 #ifdef ZSH_HEAP_DEBUG
772 Heapid heap_id = h->heap_id;
773 #endif
774 /*
775 * Zero new seems to be a special case saying we've finished
776 * with the specially reallocated memory, see scanner() in glob.c.
777 */
778 if (!new) {
779 if (ph)
780 ph->next = h->next;
781 else
782 heaps = h->next;
783 fheap = NULL;
784 #ifdef USE_MMAP
785 munmap((void *) h, h->size);
786 #else
787 zfree(h, HEAPSIZE);
788 #endif
789 #ifdef ZSH_VALGRIND
790 VALGRIND_DESTROY_MEMPOOL((char *)h);
791 #endif
792 unqueue_signals();
793 return NULL;
794 }
795 if (new > ARENA_SIZEOF(h)) {
796 Heap hnew;
797 /*
798 * Not enough memory in this heap. Allocate a new
799 * one of sufficient size.
800 *
801 * To avoid this happening too often, allocate
802 * chunks in multiples of HEAPSIZE.
803 * (Historical note: there didn't used to be any
804 * point in this since we didn't consistently record
805 * the allocated size of the heap, but now we do.)
806 */
807 size_t n = (new + sizeof(*h) + HEAPSIZE);
808 n -= n % HEAPSIZE;
809 fheap = NULL;
810
811 #ifdef USE_MMAP
812 {
813 /*
814 * I don't know any easy portable way of requesting
815 * a mmap'd segment be extended, so simply allocate
816 * a new one and copy.
817 */
818 hnew = mmap_heap_alloc(&n);
819 /* Copy the entire heap, header (with next pointer) included */
820 memcpy(hnew, h, h->size);
821 munmap((void *)h, h->size);
822 }
823 #else
824 hnew = (Heap) realloc(h, n);
825 #endif
826 #ifdef ZSH_VALGRIND
827 VALGRIND_MEMPOOL_FREE((char *)h, p);
828 VALGRIND_DESTROY_MEMPOOL((char *)h);
829 VALGRIND_CREATE_MEMPOOL((char *)hnew, 0, 0);
830 VALGRIND_MEMPOOL_ALLOC((char *)hnew, (char *)arena(hnew),
831 new_req);
832 VALGRIND_MAKE_MEM_DEFINED((char *)hnew, (char *)arena(hnew));
833 #endif
834 h = hnew;
835
836 h->size = n;
837 if (ph)
838 ph->next = h;
839 else
840 heaps = h;
841 }
842 #ifdef ZSH_VALGRIND
843 else {
844 VALGRIND_MEMPOOL_FREE((char *)h, (char *)p);
845 VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)p, new_req);
846 VALGRIND_MAKE_MEM_DEFINED((char *)h, (char *)p);
847 }
848 #endif
849 h->used = new;
850 #ifdef ZSH_HEAP_DEBUG
851 h->heap_id = heap_id;
852 #endif
853 unqueue_signals();
854 return arena(h);
855 }
856 #ifndef USE_MMAP
857 DPUTS(h->used > ARENA_SIZEOF(h), "BUG: hrealloc at invalid address");
858 #endif
859 if (h->used + (new - old) <= ARENA_SIZEOF(h)) {
860 h->used += new - old;
861 unqueue_signals();
862 #ifdef ZSH_VALGRIND
863 VALGRIND_MEMPOOL_FREE((char *)h, (char *)p);
864 VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)p, new_req);
865 VALGRIND_MAKE_MEM_DEFINED((char *)h, (char *)p);
866 #endif
867 return p;
868 } else {
869 char *t = zhalloc(new);
870 memcpy(t, p, old > new ? new : old);
871 h->used -= old;
872 #ifdef ZSH_MEM_DEBUG
873 memset(p, 0xff, old);
874 #endif
875 #ifdef ZSH_VALGRIND
876 VALGRIND_MEMPOOL_FREE((char *)h, (char *)p);
877 /* t already marked as allocated by zhalloc() */
878 #endif
879 unqueue_signals();
880 return t;
881 }
882 }
883
884 /**/
885 #ifdef ZSH_HEAP_DEBUG
886 /*
887 * Check if heap_id is the identifier of a currently valid heap,
888 * including any heap buried on the stack, or of permanent memory.
889 * Return 0 if so, else 1.
890 *
891 * This gets confused by use of switch_heaps(). That's because so do I.
892 */
893
894 /**/
895 mod_export int
memory_validate(Heapid heap_id)896 memory_validate(Heapid heap_id)
897 {
898 Heap h;
899 Heapstack hs;
900 LinkNode node;
901
902 if (heap_id == HEAPID_PERMANENT)
903 return 0;
904
905 queue_signals();
906 for (h = heaps; h; h = h->next) {
907 if (h->heap_id == heap_id) {
908 unqueue_signals();
909 return 0;
910 }
911 for (hs = heaps->sp; hs; hs = hs->next) {
912 if (hs->heap_id == heap_id) {
913 unqueue_signals();
914 return 0;
915 }
916 }
917 }
918
919 if (heaps_saved) {
920 for (node = firstnode(heaps_saved); node; incnode(node)) {
921 for (h = (Heap)getdata(node); h; h = h->next) {
922 if (h->heap_id == heap_id) {
923 unqueue_signals();
924 return 0;
925 }
926 for (hs = heaps->sp; hs; hs = hs->next) {
927 if (hs->heap_id == heap_id) {
928 unqueue_signals();
929 return 0;
930 }
931 }
932 }
933 }
934 }
935
936 unqueue_signals();
937 return 1;
938 }
939 /**/
940 #endif
941
942 /* allocate memory from the current memory pool and clear it */
943
944 /**/
945 mod_export void *
hcalloc(size_t size)946 hcalloc(size_t size)
947 {
948 void *ptr;
949
950 ptr = zhalloc(size);
951 memset(ptr, 0, size);
952 return ptr;
953 }
954
955 /* allocate permanent memory */
956
957 /**/
958 mod_export void *
zalloc(size_t size)959 zalloc(size_t size)
960 {
961 void *ptr;
962
963 if (!size)
964 size = 1;
965 queue_signals();
966 if (!(ptr = (void *) malloc(size))) {
967 zerr("fatal error: out of memory");
968 exit(1);
969 }
970 unqueue_signals();
971
972 return ptr;
973 }
974
975 /**/
976 mod_export void *
zshcalloc(size_t size)977 zshcalloc(size_t size)
978 {
979 void *ptr = zalloc(size);
980 if (!size)
981 size = 1;
982 memset(ptr, 0, size);
983 return ptr;
984 }
985
986 /* This front-end to realloc is used to make sure we have a realloc *
987 * that conforms to POSIX realloc. Older realloc's can fail if *
988 * passed a NULL pointer, but POSIX realloc should handle this. A *
989 * better solution would be for configure to check if realloc is *
990 * POSIX compliant, but I'm not sure how to do that. */
991
992 /**/
993 mod_export void *
zrealloc(void * ptr,size_t size)994 zrealloc(void *ptr, size_t size)
995 {
996 queue_signals();
997 if (ptr) {
998 if (size) {
999 /* Do normal realloc */
1000 if (!(ptr = (void *) realloc(ptr, size))) {
1001 zerr("fatal error: out of memory");
1002 exit(1);
1003 }
1004 unqueue_signals();
1005 return ptr;
1006 }
1007 else
1008 /* If ptr is not NULL, but size is zero, *
1009 * then object pointed to is freed. */
1010 free(ptr);
1011
1012 ptr = NULL;
1013 } else {
1014 /* If ptr is NULL, then behave like malloc */
1015 if (!(ptr = (void *) malloc(size))) {
1016 zerr("fatal error: out of memory");
1017 exit(1);
1018 }
1019 }
1020 unqueue_signals();
1021
1022 return ptr;
1023 }
1024
1025 /**/
1026 #ifdef ZSH_MEM
1027
1028 /*
1029 Below is a simple segment oriented memory allocator for systems on
1030 which it is better than the system's one. Memory is given in blocks
1031 aligned to an integer multiple of sizeof(union mem_align), which will
1032 probably be 64-bit as it is the longer of zlong or double. Each block is
1033 preceded by a header which contains the length of the data part (in
1034 bytes). In allocated blocks only this field of the structure m_hdr is
1035 senseful. In free blocks the second field (next) is a pointer to the next
1036 free segment on the free list.
1037
1038 On top of this simple allocator there is a second allocator for small
1039 chunks of data. It should be both faster and less space-consuming than
1040 using the normal segment mechanism for such blocks.
1041 For the first M_NSMALL-1 possible sizes memory is allocated in arrays
1042 that can hold M_SNUM blocks. Each array is stored in one segment of the
1043 main allocator. In these segments the third field of the header structure
1044 (free) contains a pointer to the first free block in the array. The
1045 last field (used) gives the number of already used blocks in the array.
1046
1047 If the macro name ZSH_MEM_DEBUG is defined, some information about the memory
1048 usage is stored. This information can than be viewed by calling the
1049 builtin `mem' (which is only available if ZSH_MEM_DEBUG is set).
1050
1051 If ZSH_MEM_WARNING is defined, error messages are printed in case of errors.
1052
1053 If ZSH_SECURE_FREE is defined, free() checks if the given address is really
1054 one that was returned by malloc(), it ignores it if it wasn't (printing
1055 an error message if ZSH_MEM_WARNING is also defined).
1056 */
1057 #if !defined(__hpux) && !defined(DGUX) && !defined(__osf__)
1058 # if defined(_BSD)
1059 # ifndef HAVE_BRK_PROTO
1060 extern int brk _((caddr_t));
1061 # endif
1062 # ifndef HAVE_SBRK_PROTO
1063 extern caddr_t sbrk _((int));
1064 # endif
1065 # else
1066 # ifndef HAVE_BRK_PROTO
1067 extern int brk _((void *));
1068 # endif
1069 # ifndef HAVE_SBRK_PROTO
1070 extern void *sbrk _((int));
1071 # endif
1072 # endif
1073 #endif
1074
1075 #if defined(_BSD) && !defined(STDC_HEADERS)
1076 # define FREE_RET_T int
1077 # define FREE_ARG_T char *
1078 # define FREE_DO_RET
1079 # define MALLOC_RET_T char *
1080 # define MALLOC_ARG_T size_t
1081 #else
1082 # define FREE_RET_T void
1083 # define FREE_ARG_T void *
1084 # define MALLOC_RET_T void *
1085 # define MALLOC_ARG_T size_t
1086 #endif
1087
1088 /* structure for building free list in blocks holding small blocks */
1089
1090 struct m_shdr {
1091 struct m_shdr *next; /* next one on free list */
1092 #ifdef PAD_64_BIT
1093 /* dummy to make this 64-bit aligned */
1094 struct m_shdr *dummy;
1095 #endif
1096 };
1097
1098 struct m_hdr {
1099 zlong len; /* length of memory block */
1100 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
1101 /* either 1 or 2 zlong's, whichever makes up 64 bits. */
1102 zlong dummy1;
1103 #endif
1104 struct m_hdr *next; /* if free: next on free list
1105 if block of small blocks: next one with
1106 small blocks of same size*/
1107 struct m_shdr *free; /* if block of small blocks: free list */
1108 zlong used; /* if block of small blocks: number of used
1109 blocks */
1110 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
1111 zlong dummy2;
1112 #endif
1113 };
1114
1115
1116 /* alignment for memory blocks */
1117
1118 #define M_ALIGN (sizeof(union mem_align))
1119
1120 /* length of memory header, length of first field of memory header and
1121 minimal size of a block left free (if we allocate memory and take a
1122 block from the free list that is larger than needed, it must have at
1123 least M_MIN extra bytes to be split; if it has, the rest is put on
1124 the free list) */
1125
1126 #define M_HSIZE (sizeof(struct m_hdr))
1127 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
1128 # define M_ISIZE (2*sizeof(zlong))
1129 #else
1130 # define M_ISIZE (sizeof(zlong))
1131 #endif
1132 #define M_MIN (2 * M_ISIZE)
1133
1134 /* M_FREE is the number of bytes that have to be free before memory is
1135 * given back to the system
1136 * M_KEEP is the number of bytes that will be kept when memory is given
1137 * back; note that this has to be less than M_FREE
1138 * M_ALLOC is the number of extra bytes to request from the system */
1139
1140 #define M_FREE 32768
1141 #define M_KEEP 16384
1142 #define M_ALLOC M_KEEP
1143
1144 /* a pointer to the last free block, a pointer to the free list (the blocks
1145 on this list are kept in order - lowest address first) */
1146
1147 static struct m_hdr *m_lfree, *m_free;
1148
1149 /* system's pagesize */
1150
1151 static long m_pgsz = 0;
1152
1153 /* the highest and the lowest valid memory addresses, kept for fast validity
1154 checks in free() and to find out if and when we can give memory back to
1155 the system */
1156
1157 static char *m_high, *m_low;
1158
1159 /* Management of blocks for small blocks:
1160 Such blocks are kept in lists (one list for each of the sizes that are
1161 allocated in such blocks). The lists are stored in the m_small array.
1162 M_SIDX() calculates the index into this array for a given size. M_SNUM
1163 is the size (in small blocks) of such blocks. M_SLEN() calculates the
1164 size of the small blocks held in a memory block, given a pointer to the
1165 header of it. M_SBLEN() gives the size of a memory block that can hold
1166 an array of small blocks, given the size of these small blocks. M_BSLEN()
1167 calculates the size of the small blocks held in a memory block, given the
1168 length of that block (including the header of the memory block. M_NSMALL
1169 is the number of possible block sizes that small blocks should be used
1170 for. */
1171
1172
1173 #define M_SIDX(S) ((S) / M_ISIZE)
1174 #define M_SNUM 128
1175 #define M_SLEN(M) ((M)->len / M_SNUM)
1176 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
1177 /* Include the dummy in the alignment */
1178 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \
1179 2*sizeof(zlong) + sizeof(struct m_hdr *))
1180 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \
1181 2*sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
1182 #else
1183 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \
1184 sizeof(zlong) + sizeof(struct m_hdr *))
1185 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \
1186 sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
1187 #endif
1188 #define M_NSMALL 8
1189
1190 static struct m_hdr *m_small[M_NSMALL];
1191
1192 #ifdef ZSH_MEM_DEBUG
1193
1194 static int m_s = 0, m_b = 0;
1195 static int m_m[1025], m_f[1025];
1196
1197 static struct m_hdr *m_l;
1198
1199 #endif /* ZSH_MEM_DEBUG */
1200
1201 MALLOC_RET_T
malloc(MALLOC_ARG_T size)1202 malloc(MALLOC_ARG_T size)
1203 {
1204 struct m_hdr *m, *mp, *mt;
1205 long n, s, os = 0;
1206 #ifndef USE_MMAP
1207 struct heap *h, *hp, *hf = NULL, *hfp = NULL;
1208 #endif
1209
1210 /* some systems want malloc to return the highest valid address plus one
1211 if it is called with an argument of zero.
1212
1213 TODO: really? Suppose we allocate more memory, so
1214 that this is now in bounds, then a more rational application
1215 that thinks it can free() anything it malloc'ed, even
1216 of zero length, calls free for it? Aren't we in big
1217 trouble? Wouldn't it be safer just to allocate some
1218 memory anyway?
1219
1220 If the above comment is really correct, then at least
1221 we need to check in free() if we're freeing memory
1222 at m_high.
1223 */
1224
1225 if (!size)
1226 #if 1
1227 size = 1;
1228 #else
1229 return (MALLOC_RET_T) m_high;
1230 #endif
1231
1232 queue_signals(); /* just queue signals rather than handling them */
1233
1234 /* first call, get page size */
1235
1236 if (!m_pgsz) {
1237
1238 #ifdef _SC_PAGESIZE
1239 m_pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */
1240 #else
1241 # ifdef _SC_PAGE_SIZE
1242 m_pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */
1243 # else
1244 m_pgsz = getpagesize();
1245 # endif
1246 #endif
1247
1248 m_free = m_lfree = NULL;
1249 }
1250 size = (size + M_ALIGN - 1) & ~(M_ALIGN - 1);
1251
1252 /* Do we need a small block? */
1253
1254 if ((s = M_SIDX(size)) && s < M_NSMALL) {
1255 /* yep, find a memory block with free small blocks of the
1256 appropriate size (if we find it in this list, this means that
1257 it has room for at least one more small block) */
1258 for (mp = NULL, m = m_small[s]; m && !m->free; mp = m, m = m->next);
1259
1260 if (m) {
1261 /* we found one */
1262 struct m_shdr *sh = m->free;
1263
1264 m->free = sh->next;
1265 m->used++;
1266
1267 /* if all small blocks in this block are allocated, the block is
1268 put at the end of the list blocks with small blocks of this
1269 size (i.e., we try to keep blocks with free blocks at the
1270 beginning of the list, to make the search faster) */
1271
1272 if (m->used == M_SNUM && m->next) {
1273 for (mt = m; mt->next; mt = mt->next);
1274
1275 mt->next = m;
1276 if (mp)
1277 mp->next = m->next;
1278 else
1279 m_small[s] = m->next;
1280 m->next = NULL;
1281 }
1282 #ifdef ZSH_MEM_DEBUG
1283 m_m[size / M_ISIZE]++;
1284 #endif
1285
1286 unqueue_signals();
1287 return (MALLOC_RET_T) sh;
1288 }
1289 /* we still want a small block but there were no block with a free
1290 small block of the requested size; so we use the real allocation
1291 routine to allocate a block for small blocks of this size */
1292 os = size;
1293 size = M_SBLEN(size);
1294 } else
1295 s = 0;
1296
1297 /* search the free list for an block of at least the requested size */
1298 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
1299
1300 #ifndef USE_MMAP
1301
1302 /* if there is an empty zsh heap at a lower address we steal it and take
1303 the memory from it, putting the rest on the free list (remember
1304 that the blocks on the free list are ordered) */
1305
1306 for (hp = NULL, h = heaps; h; hp = h, h = h->next)
1307 if (!h->used &&
1308 (!hf || h < hf) &&
1309 (!m || ((char *)m) > ((char *)h)))
1310 hf = h, hfp = hp;
1311
1312 if (hf) {
1313 /* we found such a heap */
1314 Heapstack hso, hsn;
1315
1316 /* delete structures on the list holding the heap states */
1317 for (hso = hf->sp; hso; hso = hsn) {
1318 hsn = hso->next;
1319 zfree(hso, sizeof(*hso));
1320 }
1321 /* take it from the list of heaps */
1322 if (hfp)
1323 hfp->next = hf->next;
1324 else
1325 heaps = hf->next;
1326 /* now we simply free it and than search the free list again */
1327 zfree(hf, HEAPSIZE);
1328
1329 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
1330 }
1331 #endif
1332 if (!m) {
1333 long nal;
1334 /* no matching free block was found, we have to request new
1335 memory from the system */
1336 n = (size + M_HSIZE + M_ALLOC + m_pgsz - 1) & ~(m_pgsz - 1);
1337
1338 if (((char *)(m = (struct m_hdr *)sbrk(n))) == ((char *)-1)) {
1339 DPUTS1(1, "MEM: allocation error at sbrk, size %L.", n);
1340 unqueue_signals();
1341 return NULL;
1342 }
1343 if ((nal = ((long)(char *)m) & (M_ALIGN-1))) {
1344 if ((char *)sbrk(M_ALIGN - nal) == (char *)-1) {
1345 DPUTS(1, "MEM: allocation error at sbrk.");
1346 unqueue_signals();
1347 return NULL;
1348 }
1349 m = (struct m_hdr *) ((char *)m + (M_ALIGN - nal));
1350 }
1351 /* set m_low, for the check in free() */
1352 if (!m_low)
1353 m_low = (char *)m;
1354
1355 #ifdef ZSH_MEM_DEBUG
1356 m_s += n;
1357
1358 if (!m_l)
1359 m_l = m;
1360 #endif
1361
1362 /* save new highest address */
1363 m_high = ((char *)m) + n;
1364
1365 /* initialize header */
1366 m->len = n - M_ISIZE;
1367 m->next = NULL;
1368
1369 /* put it on the free list and set m_lfree pointing to it */
1370 if ((mp = m_lfree))
1371 m_lfree->next = m;
1372 m_lfree = m;
1373 }
1374 if ((n = m->len - size) > M_MIN) {
1375 /* the block we want to use has more than M_MIN bytes plus the
1376 number of bytes that were requested; we split it in two and
1377 leave the rest on the free list */
1378 struct m_hdr *mtt = (struct m_hdr *)(((char *)m) + M_ISIZE + size);
1379
1380 mtt->len = n - M_ISIZE;
1381 mtt->next = m->next;
1382
1383 m->len = size;
1384
1385 /* put the rest on the list */
1386 if (m_lfree == m)
1387 m_lfree = mtt;
1388
1389 if (mp)
1390 mp->next = mtt;
1391 else
1392 m_free = mtt;
1393 } else if (mp) {
1394 /* the block we found wasn't the first one on the free list */
1395 if (m == m_lfree)
1396 m_lfree = mp;
1397 mp->next = m->next;
1398 } else {
1399 /* it was the first one */
1400 m_free = m->next;
1401 if (m == m_lfree)
1402 m_lfree = m_free;
1403 }
1404
1405 if (s) {
1406 /* we are allocating a block that should hold small blocks */
1407 struct m_shdr *sh, *shn;
1408
1409 /* build the free list in this block and set `used' filed */
1410 m->free = sh = (struct m_shdr *)(((char *)m) +
1411 sizeof(struct m_hdr) + os);
1412
1413 for (n = M_SNUM - 2; n--; sh = shn)
1414 shn = sh->next = sh + s;
1415 sh->next = NULL;
1416
1417 m->used = 1;
1418
1419 /* put the block on the list of blocks holding small blocks if
1420 this size */
1421 m->next = m_small[s];
1422 m_small[s] = m;
1423
1424 #ifdef ZSH_MEM_DEBUG
1425 m_m[os / M_ISIZE]++;
1426 #endif
1427
1428 unqueue_signals();
1429 return (MALLOC_RET_T) (((char *)m) + sizeof(struct m_hdr));
1430 }
1431 #ifdef ZSH_MEM_DEBUG
1432 m_m[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1433 #endif
1434
1435 unqueue_signals();
1436 return (MALLOC_RET_T) & m->next;
1437 }
1438
1439 /* this is an internal free(); the second argument may, but need not hold
1440 the size of the block the first argument is pointing to; if it is the
1441 right size of this block, freeing it will be faster, though; the value
1442 0 for this parameter means: `don't know' */
1443
1444 /**/
1445 mod_export void
zfree(void * p,int sz)1446 zfree(void *p, int sz)
1447 {
1448 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt = NULL;
1449 int i;
1450 # ifdef DEBUG
1451 int osz = sz;
1452 # endif
1453
1454 #ifdef ZSH_SECURE_FREE
1455 sz = 0;
1456 #else
1457 sz = (sz + M_ALIGN - 1) & ~(M_ALIGN - 1);
1458 #endif
1459
1460 if (!p)
1461 return;
1462
1463 /* first a simple check if the given address is valid */
1464 if (((char *)p) < m_low || ((char *)p) > m_high ||
1465 ((long)p) & (M_ALIGN - 1)) {
1466 DPUTS(1, "BUG: attempt to free storage at invalid address");
1467 return;
1468 }
1469
1470 queue_signals();
1471
1472 fr_rec:
1473
1474 if ((i = sz / M_ISIZE) < M_NSMALL || !sz)
1475 /* if the given sizes says that it is a small block, find the
1476 memory block holding it; we search all blocks with blocks
1477 of at least the given size; if the size parameter is zero,
1478 this means, that all blocks are searched */
1479 for (; i < M_NSMALL; i++) {
1480 for (mp = NULL, mt = m_small[i];
1481 mt && (((char *)mt) > ((char *)p) ||
1482 (((char *)mt) + mt->len) < ((char *)p));
1483 mp = mt, mt = mt->next);
1484
1485 if (mt) {
1486 /* we found the block holding the small block */
1487 struct m_shdr *sh = (struct m_shdr *)p;
1488
1489 #ifdef ZSH_SECURE_FREE
1490 struct m_shdr *sh2;
1491
1492 /* check if the given address is equal to the address of
1493 the first small block plus an integer multiple of the
1494 block size */
1495 if ((((char *)p) - (((char *)mt) + sizeof(struct m_hdr))) %
1496 M_BSLEN(mt->len)) {
1497
1498 DPUTS(1, "BUG: attempt to free storage at invalid address");
1499 unqueue_signals();
1500 return;
1501 }
1502 /* check, if the address is on the (block-intern) free list */
1503 for (sh2 = mt->free; sh2; sh2 = sh2->next)
1504 if (((char *)p) == ((char *)sh2)) {
1505
1506 DPUTS(1, "BUG: attempt to free already free storage");
1507 unqueue_signals();
1508 return;
1509 }
1510 #endif
1511 DPUTS(M_BSLEN(mt->len) < osz,
1512 "BUG: attempt to free more than allocated.");
1513
1514 #ifdef ZSH_MEM_DEBUG
1515 m_f[M_BSLEN(mt->len) / M_ISIZE]++;
1516 memset(sh, 0xff, M_BSLEN(mt->len));
1517 #endif
1518
1519 /* put the block onto the free list */
1520 sh->next = mt->free;
1521 mt->free = sh;
1522
1523 if (--mt->used) {
1524 /* if there are still used blocks in this block, we
1525 put it at the beginning of the list with blocks
1526 holding small blocks of the same size (since we
1527 know that there is at least one free block in it,
1528 this will make allocation of small blocks faster;
1529 it also guarantees that long living memory blocks
1530 are preferred over younger ones */
1531 if (mp) {
1532 mp->next = mt->next;
1533 mt->next = m_small[i];
1534 m_small[i] = mt;
1535 }
1536 unqueue_signals();
1537 return;
1538 }
1539 /* if there are no more used small blocks in this
1540 block, we free the whole block */
1541 if (mp)
1542 mp->next = mt->next;
1543 else
1544 m_small[i] = mt->next;
1545
1546 m = mt;
1547 p = (void *) & m->next;
1548
1549 break;
1550 } else if (sz) {
1551 /* if we didn't find a block and a size was given, try it
1552 again as if no size were given */
1553 sz = 0;
1554 goto fr_rec;
1555 }
1556 }
1557 #ifdef ZSH_MEM_DEBUG
1558 if (!mt)
1559 m_f[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1560 #endif
1561
1562 #ifdef ZSH_SECURE_FREE
1563 /* search all memory blocks, if one of them is at the given address */
1564 for (mt = (struct m_hdr *)m_low;
1565 ((char *)mt) < m_high;
1566 mt = (struct m_hdr *)(((char *)mt) + M_ISIZE + mt->len))
1567 if (((char *)p) == ((char *)&mt->next))
1568 break;
1569
1570 /* no block was found at the given address */
1571 if (((char *)mt) >= m_high) {
1572 DPUTS(1, "BUG: attempt to free storage at invalid address");
1573 unqueue_signals();
1574 return;
1575 }
1576 #endif
1577
1578 /* see if the block is on the free list */
1579 for (mp = NULL, mt = m_free; mt && mt < m; mp = mt, mt = mt->next);
1580
1581 if (m == mt) {
1582 /* it is, ouch! */
1583 DPUTS(1, "BUG: attempt to free already free storage");
1584 unqueue_signals();
1585 return;
1586 }
1587 DPUTS(m->len < osz, "BUG: attempt to free more than allocated");
1588 #ifdef ZSH_MEM_DEBUG
1589 memset(p, 0xff, m->len);
1590 #endif
1591 if (mt && ((char *)mt) == (((char *)m) + M_ISIZE + m->len)) {
1592 /* the block after the one we are freeing is free, we put them
1593 together */
1594 m->len += mt->len + M_ISIZE;
1595 m->next = mt->next;
1596
1597 if (mt == m_lfree)
1598 m_lfree = m;
1599 } else
1600 m->next = mt;
1601
1602 if (mp && ((char *)m) == (((char *)mp) + M_ISIZE + mp->len)) {
1603 /* the block before the one we are freeing is free, we put them
1604 together */
1605 mp->len += m->len + M_ISIZE;
1606 mp->next = m->next;
1607
1608 if (m == m_lfree)
1609 m_lfree = mp;
1610 } else if (mp)
1611 /* otherwise, we just put it on the free list */
1612 mp->next = m;
1613 else {
1614 m_free = m;
1615 if (!m_lfree)
1616 m_lfree = m_free;
1617 }
1618
1619 /* if the block we have just freed was at the end of the process heap
1620 and now there is more than one page size of memory, we can give
1621 it back to the system (and we do it ;-) */
1622 if ((((char *)m_lfree) + M_ISIZE + m_lfree->len) == m_high &&
1623 m_lfree->len >= m_pgsz + M_MIN + M_FREE) {
1624 long n = (m_lfree->len - M_MIN - M_KEEP) & ~(m_pgsz - 1);
1625
1626 m_lfree->len -= n;
1627 #ifdef HAVE_BRK
1628 if (brk(m_high -= n) == -1) {
1629 #else
1630 m_high -= n;
1631 if (sbrk(-n) == (void *)-1) {
1632 #endif /* HAVE_BRK */
1633 DPUTS(1, "MEM: allocation error at brk.");
1634 }
1635
1636 #ifdef ZSH_MEM_DEBUG
1637 m_b += n;
1638 #endif
1639 }
1640 unqueue_signals();
1641 }
1642
1643 FREE_RET_T
1644 free(FREE_ARG_T p)
1645 {
1646 zfree(p, 0); /* 0 means: size is unknown */
1647
1648 #ifdef FREE_DO_RET
1649 return 0;
1650 #endif
1651 }
1652
1653 /* this one is for strings (and only strings, real strings, real C strings,
1654 those that have a zero byte at the end) */
1655
1656 /**/
1657 mod_export void
1658 zsfree(char *p)
1659 {
1660 if (p)
1661 zfree(p, strlen(p) + 1);
1662 }
1663
1664 MALLOC_RET_T
1665 realloc(MALLOC_RET_T p, MALLOC_ARG_T size)
1666 {
1667 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mt;
1668 char *r;
1669 int i, l = 0;
1670
1671 /* some system..., see above */
1672 if (!p && size) {
1673 queue_signals();
1674 r = malloc(size);
1675 unqueue_signals();
1676 return (MALLOC_RET_T) r;
1677 }
1678
1679 /* and some systems even do this... */
1680 if (!p || !size)
1681 return (MALLOC_RET_T) p;
1682
1683 queue_signals(); /* just queue signals caught rather than handling them */
1684
1685 /* check if we are reallocating a small block, if we do, we have
1686 to compute the size of the block from the sort of block it is in */
1687 for (i = 0; i < M_NSMALL; i++) {
1688 for (mt = m_small[i];
1689 mt && (((char *)mt) > ((char *)p) ||
1690 (((char *)mt) + mt->len) < ((char *)p));
1691 mt = mt->next);
1692
1693 if (mt) {
1694 l = M_BSLEN(mt->len);
1695 break;
1696 }
1697 }
1698 if (!l)
1699 /* otherwise the size of the block is in the memory just before
1700 the given address */
1701 l = m->len;
1702
1703 /* now allocate the new block, copy the old contents, and free the
1704 old block */
1705 r = malloc(size);
1706 memcpy(r, (char *)p, (size > l) ? l : size);
1707 free(p);
1708
1709 unqueue_signals();
1710 return (MALLOC_RET_T) r;
1711 }
1712
1713 MALLOC_RET_T
1714 calloc(MALLOC_ARG_T n, MALLOC_ARG_T size)
1715 {
1716 long l;
1717 char *r;
1718
1719 if (!(l = n * size))
1720 return (MALLOC_RET_T) m_high;
1721
1722 /*
1723 * use realloc() (with a NULL `p` argument it behaves exactly the same
1724 * as malloc() does) to prevent an infinite loop caused by sibling-call
1725 * optimizations (the malloc() call would otherwise be replaced by an
1726 * unconditional branch back to line 1719 ad infinitum).
1727 */
1728 r = realloc(NULL, l);
1729
1730 memset(r, 0, l);
1731
1732 return (MALLOC_RET_T) r;
1733 }
1734
1735 #ifdef ZSH_MEM_DEBUG
1736
1737 /**/
1738 int
1739 bin_mem(char *name, char **argv, Options ops, int func)
1740 {
1741 int i, ii, fi, ui, j;
1742 struct m_hdr *m, *mf, *ms;
1743 char *b, *c, buf[40];
1744 long u = 0, f = 0, to, cu;
1745
1746 queue_signals();
1747 if (OPT_ISSET(ops,'v')) {
1748 printf("The lower and the upper addresses of the heap. Diff gives\n");
1749 printf("the difference between them, i.e. the size of the heap.\n\n");
1750 }
1751 printf("low mem %ld\t high mem %ld\t diff %ld\n",
1752 (long)m_l, (long)m_high, (long)(m_high - ((char *)m_l)));
1753
1754 if (OPT_ISSET(ops,'v')) {
1755 printf("\nThe number of bytes that were allocated using sbrk() and\n");
1756 printf("the number of bytes that were given back to the system\n");
1757 printf("via brk().\n");
1758 }
1759 printf("\nsbrk %d\tbrk %d\n", m_s, m_b);
1760
1761 if (OPT_ISSET(ops,'v')) {
1762 printf("\nInformation about the sizes that were allocated or freed.\n");
1763 printf("For each size that were used the number of mallocs and\n");
1764 printf("frees is shown. Diff gives the difference between these\n");
1765 printf("values, i.e. the number of blocks of that size that is\n");
1766 printf("currently allocated. Total is the product of size and diff,\n");
1767 printf("i.e. the number of bytes that are allocated for blocks of\n");
1768 printf("this size. The last field gives the accumulated number of\n");
1769 printf("bytes for all sizes.\n");
1770 }
1771 printf("\nsize\tmalloc\tfree\tdiff\ttotal\tcum\n");
1772 for (i = 0, cu = 0; i < 1024; i++)
1773 if (m_m[i] || m_f[i]) {
1774 to = (long) i * M_ISIZE * (m_m[i] - m_f[i]);
1775 printf("%ld\t%d\t%d\t%d\t%ld\t%ld\n",
1776 (long)i * M_ISIZE, m_m[i], m_f[i], m_m[i] - m_f[i],
1777 to, (cu += to));
1778 }
1779
1780 if (m_m[i] || m_f[i])
1781 printf("big\t%d\t%d\t%d\n", m_m[i], m_f[i], m_m[i] - m_f[i]);
1782
1783 if (OPT_ISSET(ops,'v')) {
1784 printf("\nThe list of memory blocks. For each block the following\n");
1785 printf("information is shown:\n\n");
1786 printf("num\tthe number of this block\n");
1787 printf("tnum\tlike num but counted separately for used and free\n");
1788 printf("\tblocks\n");
1789 printf("addr\tthe address of this block\n");
1790 printf("len\tthe length of the block\n");
1791 printf("state\tthe state of this block, this can be:\n");
1792 printf("\t used\tthis block is used for one big block\n");
1793 printf("\t free\tthis block is free\n");
1794 printf("\t small\tthis block is used for an array of small blocks\n");
1795 printf("cum\tthe accumulated sizes of the blocks, counted\n");
1796 printf("\tseparately for used and free blocks\n");
1797 printf("\nFor blocks holding small blocks the number of free\n");
1798 printf("blocks, the number of used blocks and the size of the\n");
1799 printf("blocks is shown. For otherwise used blocks the first few\n");
1800 printf("bytes are shown as an ASCII dump.\n");
1801 }
1802 printf("\nblock list:\nnum\ttnum\taddr\t\tlen\tstate\tcum\n");
1803 for (m = m_l, mf = m_free, ii = fi = ui = 1; ((char *)m) < m_high;
1804 m = (struct m_hdr *)(((char *)m) + M_ISIZE + m->len), ii++) {
1805 for (j = 0, ms = NULL; j < M_NSMALL && !ms; j++)
1806 for (ms = m_small[j]; ms; ms = ms->next)
1807 if (ms == m)
1808 break;
1809
1810 if (m == mf)
1811 buf[0] = '\0';
1812 else if (m == ms)
1813 sprintf(buf, "%ld %ld %ld", (long)(M_SNUM - ms->used),
1814 (long)ms->used,
1815 (long)(m->len - sizeof(struct m_hdr)) / M_SNUM + 1);
1816
1817 else {
1818 for (i = 0, b = buf, c = (char *)&m->next; i < 20 && i < m->len;
1819 i++, c++)
1820 *b++ = (*c >= ' ' && *c < 127) ? *c : '.';
1821 *b = '\0';
1822 }
1823
1824 printf("%d\t%d\t%ld\t%ld\t%s\t%ld\t%s\n", ii,
1825 (m == mf) ? fi++ : ui++,
1826 (long)m, (long)m->len,
1827 (m == mf) ? "free" : ((m == ms) ? "small" : "used"),
1828 (m == mf) ? (f += m->len) : (u += m->len),
1829 buf);
1830
1831 if (m == mf)
1832 mf = mf->next;
1833 }
1834
1835 if (OPT_ISSET(ops,'v')) {
1836 printf("\nHere is some information about the small blocks used.\n");
1837 printf("For each size the arrays with the number of free and the\n");
1838 printf("number of used blocks are shown.\n");
1839 }
1840 printf("\nsmall blocks:\nsize\tblocks (free/used)\n");
1841
1842 for (i = 0; i < M_NSMALL; i++)
1843 if (m_small[i]) {
1844 printf("%ld\t", (long)i * M_ISIZE);
1845
1846 for (ii = 0, m = m_small[i]; m; m = m->next) {
1847 printf("(%ld/%ld) ", (long)(M_SNUM - m->used),
1848 (long)m->used);
1849 if (!((++ii) & 7))
1850 printf("\n\t");
1851 }
1852 putchar('\n');
1853 }
1854 if (OPT_ISSET(ops,'v')) {
1855 printf("\n\nBelow is some information about the allocation\n");
1856 printf("behaviour of the zsh heaps. First the number of times\n");
1857 printf("pushheap(), popheap(), and freeheap() were called.\n");
1858 }
1859 printf("\nzsh heaps:\n\n");
1860
1861 printf("push %d\tpop %d\tfree %d\n\n", h_push, h_pop, h_free);
1862
1863 if (OPT_ISSET(ops,'v')) {
1864 printf("\nThe next list shows for several sizes the number of times\n");
1865 printf("memory of this size were taken from heaps.\n\n");
1866 }
1867 printf("size\tmalloc\ttotal\n");
1868 for (i = 0; i < 1024; i++)
1869 if (h_m[i])
1870 printf("%ld\t%d\t%ld\n", (long)i * H_ISIZE, h_m[i],
1871 (long)i * H_ISIZE * h_m[i]);
1872 if (h_m[1024])
1873 printf("big\t%d\n", h_m[1024]);
1874
1875 unqueue_signals();
1876 return 0;
1877 }
1878
1879 #endif
1880
1881 /**/
1882 #else /* not ZSH_MEM */
1883
1884 /**/
1885 mod_export void
1886 zfree(void *p, UNUSED(int sz))
1887 {
1888 free(p);
1889 }
1890
1891 /**/
1892 mod_export void
1893 zsfree(char *p)
1894 {
1895 free(p);
1896 }
1897
1898 /**/
1899 #endif
1900