1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2013 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Eclipse Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.eclipse.org/org/documents/epl-v10.html *
11 * (with md5 checksum b35adb5213ca9657e911e9befb180842) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <glenn.s.fowler@gmail.com> *
18 * David Korn <dgkorn@gmail.com> *
19 * Phong Vo <phongvo@gmail.com> *
20 * *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23
_STUB_malloc()24 void _STUB_malloc(){}
25
26 #else
27
28 #if _UWIN
29
30 #define calloc ______calloc
31 #define _ast_free ______free
32 #define malloc ______malloc
33 #define mallinfo ______mallinfo
34 #define mallopt ______mallopt
35 #define mstats ______mstats
36 #define realloc ______realloc
37
38 #define _STDLIB_H_ 1
39
40 extern int atexit(void(*)(void));
41 extern char* getenv(const char*);
42
43 #endif /*_UWIN*/
44
45 #include "vmhdr.h"
46 #include <errno.h>
47
48 #if _UWIN
49
50 #include <malloc.h>
51
52 #define _map_malloc 1
53 #define _mal_alloca 1
54
55 #undef calloc
56 #define calloc _ast_calloc
57 #undef _ast_free
58 #define free _ast_free
59 #undef malloc
60 #define malloc _ast_malloc
61 #undef mallinfo
62 typedef struct ______mallinfo Mallinfo_t;
63 #undef mallopt
64 #undef mstats
65 typedef struct ______mstats Mstats_t;
66 #undef realloc
67 #define realloc _ast_realloc
68
69 #endif /*_UWIN*/
70
71 /* If this code is to be used as native malloc then we won't have to worry about
72 ** freeing/resizing data allocated by some other malloc. As such, vmregion() can
73 ** be redefined to be Vmregion to bypass a superfluous computation.
74 */
75 #if VM_NATIVE
76 #define vmregion(d) Vmregion
77 #endif
78
79 #if __STD_C
80 #define F0(f,t0) f(t0)
81 #define F1(f,t1,a1) f(t1 a1)
82 #define F2(f,t1,a1,t2,a2) f(t1 a1, t2 a2)
83 #else
84 #define F0(f,t0) f()
85 #define F1(f,t1,a1) f(a1) t1 a1;
86 #define F2(f,t1,a1,t2,a2) f(a1, a2) t1 a1; t2 a2;
87 #endif /*__STD_C*/
88
89 /*
90 * define _AST_std_malloc=1 to force the standard malloc
91 * if _map_malloc is also defined then _ast_malloc etc.
92 * will simply call malloc etc.
93 */
94
95 #if !defined(_AST_std_malloc) && __CYGWIN__
96 #define _AST_std_malloc 1
97 #endif
98
99 /* Malloc compatibility functions
100 **
101 ** These can be used for debugging and are driven by the environment variable
102 ** VMALLOC_OPTIONS, a space-separated list of [no]name[=value] options:
103 **
104 ** abort if Vmregion==Vmdebug then VM_DBABORT is set,
105 ** otherwise _BLD_debug enabled assertions abort()
106 ** on failure
107 ** check=c enable check c[:d...], prefix c with "no" to disable
108 ** region: vmbest-integrity (on by default _BLD_DEBUG)
109 ** segment: _vmchkmem() anon memory availability checks
110 ** debug verbose debug trace to stderr
111 ** getmemory=f enable f[:g..] getmemory() functions if supported, all by default
112 ** anon: mmap(MAP_ANON)
113 ** break|sbrk: sbrk()
114 ** native: native malloc()
115 ** safe: safe sbrk() emulation via mmap(MAP_ANON)
116 ** zero: mmap(/dev/zero)
117 ** keep disable free -- if code works with this enabled then it
118 ** probably accesses free'd data
119 ** method=m sets Vmregion=m if not defined, m (Vm prefix optional)
120 ** best: best fit
121 ** debug: detailed verification checks
122 ** last: only last malloc() value can be freed
123 ** pagesize=n sets memory allocation page size to n
124 ** period=n sets Vmregion=Vmdebug if not defined, if
125 ** Vmregion==Vmdebug the region is checked every n ops
126 ** segsize=n sets memory allocation segment size to n
127 ** start=n sets Vmregion=Vmdebug if not defined, if
128 ** Vmregion==Vmdebug region checking starts after n ops
129 ** test=x enable tests du jour in the range 0x0001..0x8000
130 ** trace=f enable tracing to file f
131 ** usage print region usage stats diagnotics on each system allocation
132 ** verbose enable method and discipline initialization messages to stderr
133 ** warn=f sets Vmregion=Vmdebug if not defined, if
134 ** Vmregion==Vmdebug then warnings printed to file f
135 ** watch=a sets Vmregion=Vmdebug if not defined, if
136 ** Vmregion==Vmdebug then address a is watched
137 **
138 ** Output files are created if they don't exist. &n and /dev/fd/n name
139 ** the file descriptor n which must be open for writing. The pattern %p
140 ** in a file name is replaced by the process ID.
141 **
142 ** Written by Kiem-Phong Vo, phongvo@gmail.com, 01/16/94.
143 */
144
145 #if _sys_stat
146 #include <sys/stat.h>
147 #endif
148 #include <fcntl.h>
149
150 #ifdef S_IRUSR
151 #define CREAT_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)
152 #else
153 #define CREAT_MODE 0644
154 #endif
155
156 static Vmulong_t _Vmdbtime = 0; /* clock counting malloc/free/realloc */
157 static Vmulong_t _Vmdbstart = 0; /* start checking when time passes this */
158 static Vmulong_t _Vmdbcheck = 0; /* check region periodically with this */
159
160 #if __STD_C
atou(char ** sp)161 static Vmulong_t atou(char** sp)
162 #else
163 static Vmulong_t atou(sp)
164 char** sp;
165 #endif
166 {
167 char* s = *sp;
168 Vmulong_t v = 0;
169 int b;
170
171 if(s[0] == '0' && (s[1] == 'x' || s[1] == 'X') )
172 { for(s += 2; *s; ++s)
173 { if(*s >= '0' && *s <= '9')
174 v = (v << 4) + (*s - '0');
175 else if(*s >= 'a' && *s <= 'f')
176 v = (v << 4) + (*s - 'a') + 10;
177 else if(*s >= 'A' && *s <= 'F')
178 v = (v << 4) + (*s - 'A') + 10;
179 else break;
180 }
181 }
182 else
183 { for(; *s; ++s)
184 { if(*s >= '0' && *s <= '9')
185 v = v*10 + (*s - '0');
186 else break;
187 }
188 }
189 switch (*s)
190 {
191 case 'k':
192 case 'K':
193 b = 10;
194 break;
195 case 'm':
196 case 'M':
197 b = 20;
198 break;
199 case 'g':
200 case 'G':
201 b = 30;
202 break;
203 case 't':
204 case 'T':
205 b = 40;
206 break;
207 case 'p':
208 case 'P':
209 b = 50;
210 break;
211 case 'e':
212 case 'E':
213 b = 60;
214 break;
215 default:
216 b = 0;
217 break;
218 }
219 if (b)
220 { if (*++s == 'i' || *s == 'I')
221 { if (*++s == 'b' || *s == 'B')
222 s++;
223 v <<= b;
224 }
225 else
226 for (b /= 10; b; b--)
227 v *= 1000;
228 }
229 *sp = s;
230 return v;
231 }
232
233 #if __STD_C
insertpid(char * begs,char * ends)234 static char* insertpid(char* begs, char* ends)
235 #else
236 static char* insertpid(begs,ends)
237 char* begs;
238 char* ends;
239 #endif
240 { int pid;
241 char* s;
242
243 if((pid = getpid()) < 0)
244 return NIL(char*);
245
246 s = ends;
247 do
248 { if(s == begs)
249 return NIL(char*);
250 *--s = '0' + pid%10;
251 } while((pid /= 10) > 0);
252 while(s < ends)
253 *begs++ = *s++;
254
255 return begs;
256 }
257
258 #if __STD_C
createfile(char * file)259 static int createfile(char* file)
260 #else
261 static int createfile(file)
262 char* file;
263 #endif
264 {
265 char buf[1024];
266 char *next, *endb;
267 int fd;
268
269 next = buf;
270 endb = buf + sizeof(buf);
271 while(*file)
272 { if(*file == '%')
273 { switch(file[1])
274 {
275 case 'p' :
276 if(!(next = insertpid(next,endb)) )
277 return -1;
278 file += 2;
279 break;
280 default :
281 goto copy;
282 }
283 }
284 else
285 { copy:
286 *next++ = *file++;
287 }
288
289 if(next >= endb)
290 return -1;
291 }
292
293 *next = '\0';
294 file = buf;
295 if (*file == '&' && *(file += 1) || strncmp(file, "/dev/fd/", 8) == 0 && *(file += 8))
296 {
297 fd = fcntl((int)atou(&file), F_DUPFD_CLOEXEC, 0);
298 #if F_DUPFD_CLOEXEC == F_DUPFD
299 if (fd >= 0)
300 SETCLOEXEC(fd);
301 #endif
302 }
303 else if (*file)
304 {
305 fd = open(file, O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, CREAT_MODE);
306 #if O_CLOEXEC == 0
307 if (fd >= 0)
308 SETCLOEXEC(fd);
309 #endif
310 }
311 else
312 return -1;
313 return fd;
314 }
315
316 /* Initialize runtime options from the VMALLOC_OPTIONS env var.
317 ** This function is idempotent. Call at least once with boot==1
318 ** to initialize getmemory preferences (called by _vmstart() and
319 ** _vmheapinit()) and at least once with boot==2 to initialize heap
320 ** options (called by _vmstart()), or call with boot==3 to do both.
321 */
_vmoptions(int boot)322 void _vmoptions(int boot)
323 {
324 char *s, *t, *v;
325 Vmulong_t n;
326 int fd;
327 int b;
328 int c;
329 char buf[1024];
330 char *trace = NIL(char*);
331 Vmalloc_t *vm = NIL(Vmalloc_t*);
332
333 static char* options;
334
335 if (boot & 1)
336 { if (_Vmassert & VM_GETMEMORY)
337 return;
338 options = getenv("VMALLOC_OPTIONS");
339 }
340 else if (!(_Vmassert & VM_GETMEMORY))
341 return;
342 if (options && options[0])
343 { /* copy option string to a writable buffer */
344 for(s = &buf[0], t = options, v = &buf[sizeof(buf)-1]; s < v; ++s)
345 if((*s = *t++) == 0 )
346 break;
347 *s = 0;
348
349 for(s = buf;; )
350 { /* skip blanks to option name */
351 while (*s == ' ' || *s == '\t' || *s == '\r' || *s == '\n' || *s == ',')
352 s++;
353 if (*(t = s) == 0)
354 break;
355
356 v = NIL(char*);
357 while (*s)
358 { if (*s == ' ' || *s == '\t' || *s == '\r' || *s == '\n' || *s == ',')
359 { *s++ = 0; /* end of name */
360 break;
361 }
362 else if (!v && *s == '=')
363 { *s++ = 0; /* end of name */
364 if (*(v = s) == 0)
365 v = NIL(char*);
366 }
367 else s++;
368 }
369 if (t[0] == 'n' && t[1] == 'o')
370 continue;
371 switch (t[0])
372 {
373 case 'a': /* abort */
374 if (boot & 2)
375 { if (!vm)
376 vm = vmopen(Vmdcsystem, Vmdebug, 0);
377 if (vm && vm->meth.meth == VM_MTDEBUG)
378 vmset(vm, VM_DBABORT, 1);
379 else _Vmassert |= VM_abort;
380 }
381 break;
382 case 'c': /* address/integrity checks */
383 if ((boot & 2) && v)
384 do
385 {
386 if (v[0] == 'n' && v[1] == 'o')
387 {
388 v += 2;
389 if (v[0] == '-')
390 v++;
391 c = 0;
392 }
393 else
394 c = 1;
395 switch (v[0])
396 {
397 case 'r':
398 b = VM_check_reg;
399 break;
400 case 's':
401 b = VM_check_seg;
402 break;
403 default:
404 b = 0;
405 break;
406 }
407 if (c)
408 _Vmassert |= b;
409 else
410 _Vmassert &= ~b;
411 } while ((v = strchr(v, ':')) && ++v);
412 break;
413 case 'd': /* debug */
414 if (boot & 2)
415 _Vmassert |= VM_debug;
416 break;
417 case 'g': /* getmemory() preference */
418 if ((boot & 1) && v)
419 do
420 {
421 if (v[0] == 'n' && v[1] == 'o')
422 {
423 v += 2;
424 if (v[0] == '-')
425 v++;
426 c = 0;
427 }
428 else
429 c = 1;
430 if (v[0] == 'm' && v[1] == 'm')
431 v++;
432 if (v[0] == 'm' && v[1] == 'a' && v[2] == 'p')
433 v += 3;
434 switch (v[0])
435 {
436 case 'a':
437 b = VM_anon;
438 break;
439 case 'b':
440 b = VM_break;
441 break;
442 case 'm':
443 case 'n':
444 b = VM_native;
445 break;
446 case 's':
447 switch (v[1])
448 {
449 case 'b':
450 b = VM_break;
451 break;
452 default:
453 b = VM_safe;
454 break;
455 }
456 break;
457 case 'z':
458 b = VM_zero;
459 break;
460 default:
461 b = 0;
462 break;
463 }
464 if (c)
465 _Vmassert |= b;
466 else
467 _Vmassert &= ~b;
468 } while ((v = strchr(v, ':')) && ++v);
469 break;
470 case 'k': /* keep */
471 if (boot & 2)
472 _Vmassert |= VM_keep;
473 break;
474 case 'm': /* method=<method> */
475 if ((boot & 2) && v && !vm)
476 {
477 if ((v[0] == 'V' || v[0] == 'v') && (v[1] == 'M' || v[1] == 'm'))
478 v += 2;
479 switch (v[0])
480 {
481 case 'b':
482 vm = Vmheap;
483 break;
484 case 'd': /* debug */
485 vm = vmopen(Vmdcsystem, Vmdebug, 0);
486 break;
487 case 'l': /* last */
488 vm = vmopen(Vmdcsystem, Vmlast, 0);
489 break;
490 case 'p': /* pool */
491 vm = vmopen(Vmdcsystem, Vmpool, 0);
492 break;
493 }
494 }
495 break;
496 case 'p': /* pagesize=<size> period=<count> */
497 switch (t[1])
498 {
499 case 'a':
500 if (boot & 1)
501 _Vmpagesize = atou(&v);
502 break;
503 case 'e':
504 if (boot & 2)
505 {
506 if (!vm)
507 vm = vmopen(Vmdcsystem, Vmdebug, 0);
508 if (v && vm && vm->meth.meth == VM_MTDEBUG)
509 _Vmdbcheck = atou(&v);
510 }
511 break;
512 }
513 break;
514 case 's': /* segsize=<size> start=<count> */
515 switch (t[1])
516 {
517 case 'e':
518 if (boot & 1)
519 _Vmsegsize = atou(&v);
520 break;
521 case 't':
522 if (boot & 2)
523 {
524 if (!vm)
525 vm = vmopen(Vmdcsystem, Vmdebug, 0);
526 if (v && vm && vm->meth.meth == VM_MTDEBUG)
527 _Vmdbstart = atou(&v);
528 }
529 break;
530 }
531 break;
532 case 't': /* test || trace=<path> */
533 if (v)
534 {
535 if ((boot & 1) && t[1] == 'e') /* test */
536 _Vmassert |= atou(&v) & VM_test;
537 if ((boot & 2) && t[1] == 'r') /* trace=<path> */
538 trace = v;
539 }
540 break;
541 case 'u': /* usage */
542 if (boot & 1)
543 _Vmassert |= VM_usage;
544 break;
545 case 'v': /* verbose */
546 if (boot & 1)
547 _Vmassert |= VM_verbose;
548 break;
549 case 'w':
550 if ((boot & 2) && t[1] == 'a')
551 { switch (t[2])
552 {
553 case 'r': /* warn=<path> */
554 if (!vm)
555 vm = vmopen(Vmdcsystem, Vmdebug, 0);
556 if (vm && vm->meth.meth == VM_MTDEBUG &&
557 v && (fd = createfile(v)) >= 0 )
558 vmdebug(fd);
559 break;
560 case 't': /* watch=<addr> */
561 if (!vm)
562 vm = vmopen(Vmdcsystem, Vmdebug, 0);
563 if (vm && vm->meth.meth == VM_MTDEBUG &&
564 v && (n = atou(&v)) > 0 )
565 vmdbwatch((Void_t*)n);
566 break;
567 }
568 }
569 break;
570 }
571 }
572
573 if (vm) /* slip the new region in to drive malloc/free/realloc */
574 { if (vm->meth.meth == VM_MTDEBUG && _Vmdbcheck == 0 )
575 _Vmdbcheck = 1;
576 Vmregion = vm;
577 }
578
579 /* enable tracing */
580 if (trace && (fd = createfile(trace)) >= 0)
581 vmtrace(fd);
582 }
583 if ((boot & 1) && !(_Vmassert & VM_GETMEMORY))
584 _Vmassert |= VM_GETMEMORY;
585 }
586
587 #if ( !_std_malloc || !_BLD_ast ) && !_AST_std_malloc
588
589 #if !_map_malloc
590
591 #undef calloc
592 #undef cfree
593 #undef free
594 #undef mallinfo
595 #undef malloc
596 #undef mallopt
597 #undef memalign
598 #undef posix_memalign
599 #undef mstats
600 #undef realloc
601 #undef valloc
602
603 #if _malloc_hook
604
605 #include <malloc.h>
606
607 #undef calloc
608 #undef cfree
609 #undef free
610 #undef malloc
611 #undef memalign
612 #undef posix_memalign
613 #undef realloc
614
615 #define calloc _ast_calloc
616 #define cfree _ast_cfree
617 #define free _ast_free
618 #define malloc _ast_malloc
619 #define memalign _ast_memalign
620 #define posix_memalign _ast_posix_memalign
621 #define realloc _ast_realloc
622
623 #endif
624
625 #endif
626
627 #if _WINIX
628
629 #include <ast_windows.h>
630
631 #if _UWIN
632
633 #define VMRECORD(p) _vmrecord(p)
634 #define VMBLOCK { int _vmblock = _sigblock();
635 #define VMUNBLOCK _sigunblock(_vmblock); }
636
637 extern int _sigblock(void);
638 extern void _sigunblock(int);
639 extern unsigned long _record[2048];
640
_vmrecord(Void_t * p)641 __inline Void_t* _vmrecord(Void_t* p)
642 {
643 register unsigned long v = ((unsigned long)p)>>16;
644
645 _record[v>>5] |= 1<<((v&0x1f));
646 return p;
647 }
648
649 #else
650
651 #define getenv(s) lcl_getenv(s)
652
653 static char*
lcl_getenv(const char * s)654 lcl_getenv(const char* s)
655 {
656 int n;
657 static char buf[512];
658
659 if (!(n = GetEnvironmentVariable(s, buf, sizeof(buf))) || n > sizeof(buf))
660 return 0;
661 return buf;
662 }
663
664 #endif /* _UWIN */
665
666 #endif /* _WINIX */
667
668 #ifndef VMRECORD
669 #define VMRECORD(p) (p)
670 #define VMBLOCK
671 #define VMUNBLOCK
672 #endif
673
674 #if defined(__EXPORT__)
675 #define extern extern __EXPORT__
676 #endif
677
678 /* not sure of all the implications -- 0 is conservative for now */
679 #define USE_NATIVE 0 /* native free/realloc on non-vmalloc ptrs */
680
681 #if USE_NATIVE
682 static void* native_realloc _ARG_((void*, size_t));
683 static void native_free _ARG_((void*));
684 #endif
685
686 #define VM_STARTING 1
687 #define VM_STARTED 2
688 static unsigned int _Vmstart = 0; /* calling _vmstart() just once */
689 #define VMPROLOGUE(f) \
690 { if(_Vmstart != VM_STARTED) _vmstart(f); \
691 if(_Vmdbcheck && Vmregion->meth.meth == VM_MTDEBUG) \
692 { _Vmdbtime += 1; \
693 if(_Vmdbtime >= _Vmdbstart && (_Vmdbtime % _Vmdbcheck) == 0 ) \
694 vmset(Vmregion, VM_DBCHECK, 1); \
695 } \
696 }
697 #define VMEPILOGUE(f) \
698 { if(_Vmdbcheck && Vmregion->meth.meth == VM_MTDEBUG) \
699 vmset(Vmregion, VM_DBCHECK, 0); \
700 }
701
_vmstart(int freeing)702 static int _vmstart(int freeing)
703 {
704 unsigned int start;
705 char *file;
706 int line;
707 Void_t *func;
708
709 _vmoptions(1);
710
711 if (_Vmassert & VM_debug) debug_printf(2, "%s:%d: _Vmstart=%s\n", _Vmstart == 0 ? "UNINITIALIZED" : _Vmstart == VM_STARTING ? "STARTING" : _Vmstart == VM_STARTED ? "STARTED" : "ERROR" );
712
713 /* compete for the right to do initialization */
714 if((start = asocasint(&_Vmstart, 0, VM_STARTING)) == VM_STARTED )
715 return 0;
716 else if(start == VM_STARTING) /* wait until initialization is done */
717 { asospindecl();
718 int i = 0;
719
720 /*
721 * we allow free() to be called by signal handlers and not deadlock
722 * not so for *alloc()
723 */
724
725 for(asospininit();; asospinnext())
726 { if((start = asogetint(&_Vmstart)) == VM_STARTED)
727 return 0;
728 if(freeing && ++i >= 10)
729 return 0;
730 }
731 }
732
733 /* initialize the heap if not done yet */
734 if(_vmheapinit(NIL(Vmalloc_t*)) != Vmheap )
735 { write(9, "vmalloc: panic: heap initialization error\n", 42);
736 return -1;
737 }
738 /**/DEBUG_ASSERT(Vmheap->data != NIL(Vmdata_t*));
739
740 /* setting options. note that Vmregion may change */
741 VMFLF(Vmregion, file, line, func);
742 _vmoptions(2);
743 Vmregion->file = file; /* reset values for the real call */
744 Vmregion->line = line;
745 Vmregion->func = func;
746
747 asocasint(&_Vmstart, VM_STARTING, VM_STARTED);
748
749 if (_Vmassert & VM_verbose) debug_printf(2, "vmalloc: method=%s\n", Vmregion->meth.meth == VM_MTBEST ? "best" : Vmregion->meth.meth == VM_MTDEBUG ? "debug" : Vmregion->meth.meth == VM_MTLAST ? "last" : Vmregion->meth.meth == VM_MTPOOL ? "pool" : "unknown");
750
751 return 0;
752 }
753
calloc(size_t n_obj,size_t s_obj)754 extern Void_t* calloc(size_t n_obj, size_t s_obj)
755 {
756 Void_t *addr;
757
758 VMPROLOGUE(0);
759 addr = (*Vmregion->meth.resizef)(Vmregion, NIL(Void_t*), n_obj*s_obj, VM_RSZERO, 0);
760 VMEPILOGUE(0);
761
762 return VMRECORD(addr);
763 }
764
malloc(size_t size)765 extern Void_t* malloc(size_t size)
766 {
767 Void_t *addr;
768
769 VMPROLOGUE(0);
770 addr = (*Vmregion->meth.allocf)(Vmregion, size, 0);
771 VMEPILOGUE(0);
772
773 return VMRECORD(addr);
774 }
775
realloc(Void_t * data,size_t size)776 extern Void_t* realloc(Void_t* data, size_t size)
777 {
778 Void_t *addr;
779 Vmalloc_t *vm;
780
781 VMPROLOGUE(0);
782
783 if(!data)
784 return malloc(size);
785 else if((vm = vmregion(data)) )
786 addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 0);
787 else /* not our data */
788 #if USE_NATIVE
789 addr = native_realloc(data, size);
790 #else
791 addr = NIL(Void_t*);
792 #endif
793
794 VMEPILOGUE(0);
795 return VMRECORD(addr);
796 }
797
free(Void_t * data)798 extern void free(Void_t* data)
799 {
800 Vmalloc_t *vm;
801
802 VMPROLOGUE(1);
803
804 if(data && !(_Vmassert & VM_keep))
805 { if((vm = vmregion(data)) )
806 (void)(*vm->meth.freef)(vm, data, 0);
807 #if USE_NATIVE
808 else /* not our data */
809 native_free(data);
810 #endif
811 }
812
813 VMEPILOGUE(1);
814 }
815
cfree(Void_t * data)816 extern void cfree(Void_t* data)
817 {
818 free(data);
819 }
820
memalign(size_t align,size_t size)821 extern Void_t* memalign(size_t align, size_t size)
822 {
823 Void_t *addr;
824
825 VMPROLOGUE(0);
826
827 VMBLOCK
828 addr = (*Vmregion->meth.alignf)(Vmregion, size, align, 0);
829 VMUNBLOCK
830
831 VMEPILOGUE(0);
832
833 return VMRECORD(addr);
834 }
835
aligned_alloc(size_t align,size_t size)836 extern Void_t* aligned_alloc(size_t align, size_t size)
837 {
838 return memalign(align, ROUND(size,align));
839 }
840
posix_memalign(Void_t ** memptr,size_t align,size_t size)841 extern int posix_memalign(Void_t **memptr, size_t align, size_t size)
842 {
843 Void_t *mem;
844
845 if(align == 0 || (align%sizeof(Void_t*)) != 0 || ((align-1)&align) != 0 )
846 return EINVAL;
847
848 if(!(mem = memalign(align, size)) )
849 return ENOMEM;
850
851 *memptr = mem;
852 return 0;
853 }
854
valloc(size_t size)855 extern Void_t* valloc(size_t size)
856 {
857 Void_t *addr;
858
859 VMPROLOGUE(0);
860
861 VMPAGESIZE();
862 addr = memalign(_Vmpagesize, size);
863
864 VMEPILOGUE(0);
865
866 return VMRECORD(addr);
867 }
868
pvalloc(size_t size)869 extern Void_t* pvalloc(size_t size)
870 {
871 Void_t *addr;
872
873 VMPROLOGUE(0);
874
875 VMPAGESIZE();
876 addr = memalign(_Vmpagesize, ROUND(size,_Vmpagesize));
877
878 VMEPILOGUE(0);
879 return VMRECORD(addr);
880 }
881
882 #if !_PACKAGE_ast
strdup(const char * s)883 char* strdup(const char* s)
884 {
885 char *ns;
886 size_t n;
887
888 if(!s)
889 return NIL(char*);
890 else
891 { n = strlen(s);
892 if((ns = malloc(n+1)) )
893 memcpy(ns,s,n+1);
894 return ns;
895 }
896 }
897 #endif /* _PACKAGE_ast */
898
899 #if !_lib_alloca || _mal_alloca
900 #ifndef _stk_down
901 #define _stk_down 0
902 #endif
903 typedef struct Alloca_s Alloca_t;
904 union Alloca_u
905 { struct
906 { char* addr;
907 Alloca_t* next;
908 } head;
909 char array[MEM_ALIGN];
910 };
911 struct Alloca_s
912 { union Alloca_u head;
913 Vmuchar_t data[1];
914 };
915
alloca(size_t size)916 extern Void_t* alloca(size_t size)
917 { char array[MEM_ALIGN];
918 char* file;
919 int line;
920 Void_t* func;
921 Alloca_t* f;
922 Vmalloc_t *vm;
923 static Alloca_t* Frame;
924
925 VMPROLOGUE(0);
926
927 VMFLF(Vmregion,file,line,func); /* save info before freeing frames */
928
929 while(Frame) /* free unused frames */
930 { if(( _stk_down && &array[0] > Frame->head.head.addr) ||
931 (!_stk_down && &array[0] < Frame->head.head.addr) )
932 { f = Frame; Frame = f->head.head.next;
933 if((vm = vmregion(f)) )
934 (void)(*vm->meth.freef)(vm, f, 0);
935 /* else: something bad happened. just keep going */
936 }
937 else break;
938 }
939
940 Vmregion->file = file; /* restore file/line info before allocation */
941 Vmregion->line = line;
942 Vmregion->func = func;
943
944 f = (Alloca_t*)(*Vmregion->meth.allocf)(Vmregion, size+sizeof(Alloca_t)-1, 0);
945
946 /* if f is NULL, this mimics a stack overflow with a memory error! */
947 f->head.head.addr = &array[0];
948 f->head.head.next = Frame;
949 Frame = f;
950
951 VMEPILOGUE(0);
952
953 return (Void_t*)f->data;
954 }
955 #endif /*!_lib_alloca || _mal_alloca*/
956
957 #if _map_malloc
958
959 /* _ast_* versions of malloc & friends */
960 #else
961
962 #if _malloc_hook
963
vm_free_hook(void * ptr,const void * caller)964 static void vm_free_hook(void* ptr, const void* caller)
965 {
966 free(ptr);
967 }
968
vm_malloc_hook(size_t size,const void * caller)969 static void* vm_malloc_hook(size_t size, const void* caller)
970 {
971 void* r;
972
973 r = malloc(size);
974 return r;
975 }
976
vm_memalign_hook(size_t align,size_t size,const void * caller)977 static void* vm_memalign_hook(size_t align, size_t size, const void* caller)
978 {
979 void* r;
980
981 r = memalign(align, size);
982 return r;
983 }
984
vm_realloc_hook(void * ptr,size_t size,const void * caller)985 static void* vm_realloc_hook(void* ptr, size_t size, const void* caller)
986 {
987 void* r;
988
989 r = realloc(ptr, size);
990 return r;
991 }
992
vm_initialize_hook(void)993 static void vm_initialize_hook(void)
994 {
995 __free_hook = vm_free_hook;
996 __malloc_hook = vm_malloc_hook;
997 __memalign_hook = vm_memalign_hook;
998 __realloc_hook = vm_realloc_hook;
999 }
1000
1001 typeof (__malloc_initialize_hook) __malloc_initialize_hook = vm_initialize_hook;
1002
1003 #if 0 /* 2012-02-29 this may be needed to cover shared libs */
1004
1005 void __attribute__ ((constructor)) vm_initialize_initialize_hook(void)
1006 {
1007 vm_initialize_hook();
1008 __malloc_initialize_hook = vm_initialize_hook;
1009 }
1010
1011 #endif
1012
1013 #else
1014
1015 /* intercept _* __* __libc_* variants */
1016
1017 #if __lib__malloc
F2(_calloc,size_t,n,size_t,m)1018 extern Void_t* F2(_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(_cfree,Void_t *,p)1019 extern Void_t F1(_cfree, Void_t*,p) { free(p); }
F1(_free,Void_t *,p)1020 extern Void_t F1(_free, Void_t*,p) { free(p); }
F1(_malloc,size_t,n)1021 extern Void_t* F1(_malloc, size_t,n) { return malloc(n); }
1022 #if _lib_memalign
F2(_memalign,size_t,a,size_t,n)1023 extern Void_t* F2(_memalign, size_t,a, size_t,n) { return memalign(a, n); }
1024 #endif
1025 #if _lib_pvalloc
F1(_pvalloc,size_t,n)1026 extern Void_t* F1(_pvalloc, size_t,n) { return pvalloc(n); }
1027 #endif
F2(_realloc,Void_t *,p,size_t,n)1028 extern Void_t* F2(_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
1029 #if _lib_valloc
F1(_valloc,size_t,n)1030 extern Void_t* F1(_valloc, size_t,n) { return valloc(n); }
1031 #endif
1032 #endif
1033
1034 #if _lib___malloc
F2(__calloc,size_t,n,size_t,m)1035 extern Void_t* F2(__calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(__cfree,Void_t *,p)1036 extern Void_t F1(__cfree, Void_t*,p) { free(p); }
F1(__free,Void_t *,p)1037 extern Void_t F1(__free, Void_t*,p) { free(p); }
F1(__malloc,size_t,n)1038 extern Void_t* F1(__malloc, size_t,n) { return malloc(n); }
1039 #if _lib_memalign
F2(__memalign,size_t,a,size_t,n)1040 extern Void_t* F2(__memalign, size_t,a, size_t,n) { return memalign(a, n); }
1041 #endif
1042 #if _lib_pvalloc
F1(__pvalloc,size_t,n)1043 extern Void_t* F1(__pvalloc, size_t,n) { return pvalloc(n); }
1044 #endif
F2(__realloc,Void_t *,p,size_t,n)1045 extern Void_t* F2(__realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
1046 #if _lib_valloc
F1(__valloc,size_t,n)1047 extern Void_t* F1(__valloc, size_t,n) { return valloc(n); }
1048 #endif
1049 #endif
1050
1051 #if _lib___libc_malloc
F2(__libc_calloc,size_t,n,size_t,m)1052 extern Void_t* F2(__libc_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(__libc_cfree,Void_t *,p)1053 extern Void_t F1(__libc_cfree, Void_t*,p) { free(p); }
F1(__libc_free,Void_t *,p)1054 extern Void_t F1(__libc_free, Void_t*,p) { free(p); }
F1(__libc_malloc,size_t,n)1055 extern Void_t* F1(__libc_malloc, size_t,n) { return malloc(n); }
1056 #if _lib_memalign
F2(__libc_memalign,size_t,a,size_t,n)1057 extern Void_t* F2(__libc_memalign, size_t,a, size_t,n) { return memalign(a, n); }
1058 #endif
1059 #if _lib_pvalloc
F1(__libc_pvalloc,size_t,n)1060 extern Void_t* F1(__libc_pvalloc, size_t,n) { return pvalloc(n); }
1061 #endif
F2(__libc_realloc,Void_t *,p,size_t,n)1062 extern Void_t* F2(__libc_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
1063 #if _lib_valloc
F1(__libc_valloc,size_t,n)1064 extern Void_t* F1(__libc_valloc, size_t,n) { return valloc(n); }
1065 #endif
1066 #endif
1067
1068 #endif /* _malloc_hook */
1069
1070 #endif /* _map_malloc */
1071
1072 #undef extern
1073
1074 #if _hdr_malloc /* need the mallint interface for statistics, etc. */
1075
1076 #undef calloc
1077 #define calloc ______calloc
1078 #undef cfree
1079 #define cfree ______cfree
1080 #undef free
1081 #define free ______free
1082 #undef malloc
1083 #define malloc ______malloc
1084 #undef pvalloc
1085 #define pvalloc ______pvalloc
1086 #undef realloc
1087 #define realloc ______realloc
1088 #undef valloc
1089 #define valloc ______valloc
1090
1091 #if !_UWIN
1092
1093 #include <malloc.h>
1094
1095 typedef struct mallinfo Mallinfo_t;
1096 typedef struct mstats Mstats_t;
1097
1098 #endif
1099
1100 #if defined(__EXPORT__)
1101 #define extern __EXPORT__
1102 #endif
1103
1104 #if _lib_mallopt
1105 #if __STD_C
mallopt(int cmd,int value)1106 extern int mallopt(int cmd, int value)
1107 #else
1108 extern int mallopt(cmd, value)
1109 int cmd;
1110 int value;
1111 #endif
1112 {
1113 VMPROLOGUE(0);
1114 VMEPILOGUE(0);
1115 return 0;
1116 }
1117 #endif /*_lib_mallopt*/
1118
1119 #if _lib_mallinfo && _mem_arena_mallinfo
1120 #if __STD_C
mallinfo(void)1121 extern Mallinfo_t mallinfo(void)
1122 #else
1123 extern Mallinfo_t mallinfo()
1124 #endif
1125 {
1126 Vmstat_t sb;
1127 Mallinfo_t mi;
1128
1129 VMPROLOGUE(0);
1130 VMEPILOGUE(0);
1131
1132 memset(&mi,0,sizeof(mi));
1133 if(vmstat(Vmregion,&sb) >= 0)
1134 { mi.arena = sb.extent;
1135 mi.ordblks = sb.n_busy+sb.n_free;
1136 mi.uordblks = sb.s_busy;
1137 mi.fordblks = sb.s_free;
1138 }
1139 return mi;
1140 }
1141 #endif /* _lib_mallinfo */
1142
1143 #if _lib_mstats && _mem_bytes_total_mstats
1144 #if __STD_C
mstats(void)1145 extern Mstats_t mstats(void)
1146 #else
1147 extern Mstats_t mstats()
1148 #endif
1149 {
1150 Vmstat_t sb;
1151 Mstats_t ms;
1152
1153 VMPROLOGUE(0);
1154 VMEPILOGUE(0);
1155
1156 memset(&ms,0,sizeof(ms));
1157 if(vmstat(Vmregion,&sb) >= 0)
1158 { ms.bytes_total = sb.extent;
1159 ms.chunks_used = sb.n_busy;
1160 ms.bytes_used = sb.s_busy;
1161 ms.chunks_free = sb.n_free;
1162 ms.bytes_free = sb.s_free;
1163 }
1164 return ms;
1165 }
1166 #endif /*_lib_mstats*/
1167
1168 #undef extern
1169
1170 #endif/*_hdr_malloc*/
1171
1172 #else
1173
1174 /*
1175 * even though there is no malloc override, still provide
1176 * _ast_* counterparts for object compatibility
1177 */
1178
1179 #undef calloc
1180 extern Void_t* calloc _ARG_((size_t, size_t));
1181
1182 #undef cfree
1183 extern void cfree _ARG_((Void_t*));
1184
1185 #undef free
1186 extern void free _ARG_((Void_t*));
1187
1188 #undef malloc
1189 extern Void_t* malloc _ARG_((size_t));
1190
1191 #if _lib_memalign
1192 #undef memalign
1193 extern Void_t* memalign _ARG_((size_t, size_t));
1194 #endif
1195
1196 #if _lib_pvalloc
1197 #undef pvalloc
1198 extern Void_t* pvalloc _ARG_((size_t));
1199 #endif
1200
1201 #undef realloc
1202 extern Void_t* realloc _ARG_((Void_t*, size_t));
1203
1204 #if _lib_valloc
1205 #undef valloc
1206 extern Void_t* valloc _ARG_((size_t));
1207 #endif
1208
1209 #if defined(__EXPORT__)
1210 #define extern __EXPORT__
1211 #endif
1212
F2(_ast_calloc,size_t,n,size_t,m)1213 extern Void_t* F2(_ast_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(_ast_cfree,Void_t *,p)1214 extern Void_t F1(_ast_cfree, Void_t*,p) { free(p); }
F1(_ast_free,Void_t *,p)1215 extern Void_t F1(_ast_free, Void_t*,p) { free(p); }
F1(_ast_malloc,size_t,n)1216 extern Void_t* F1(_ast_malloc, size_t,n) { return malloc(n); }
1217 #if _lib_memalign
F2(_ast_memalign,size_t,a,size_t,n)1218 extern Void_t* F2(_ast_memalign, size_t,a, size_t,n) { return memalign(a, n); }
1219 #endif
1220 #if _lib_pvalloc
F1(_ast_pvalloc,size_t,n)1221 extern Void_t* F1(_ast_pvalloc, size_t,n) { return pvalloc(n); }
1222 #endif
F2(_ast_realloc,Void_t *,p,size_t,n)1223 extern Void_t* F2(_ast_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
1224 #if _lib_valloc
F1(_ast_valloc,size_t,n)1225 extern Void_t* F1(_ast_valloc, size_t,n) { return valloc(n); }
1226 #endif
1227
1228 #undef extern
1229
1230 #if _hdr_malloc
1231
1232 #undef mallinfo
1233 #undef mallopt
1234 #undef mstats
1235
1236 #define calloc ______calloc
1237 #define cfree ______cfree
1238 #define free ______free
1239 #define malloc ______malloc
1240 #define pvalloc ______pvalloc
1241 #define realloc ______realloc
1242 #define valloc ______valloc
1243
1244 #if !_UWIN
1245
1246 #include <malloc.h>
1247
1248 typedef struct mallinfo Mallinfo_t;
1249 typedef struct mstats Mstats_t;
1250
1251 #endif
1252
1253 #if defined(__EXPORT__)
1254 #define extern __EXPORT__
1255 #endif
1256
1257 #if _lib_mallopt
F2(_ast_mallopt,int,cmd,int,value)1258 extern int F2(_ast_mallopt, int,cmd, int,value) { return mallopt(cmd, value); }
1259 #endif
1260
1261 #if _lib_mallinfo && _mem_arena_mallinfo
F0(_ast_mallinfo,void)1262 extern Mallinfo_t F0(_ast_mallinfo, void) { return mallinfo(); }
1263 #endif
1264
1265 #if _lib_mstats && _mem_bytes_total_mstats
F0(_ast_mstats,void)1266 extern Mstats_t F0(_ast_mstats, void) { return mstats(); }
1267 #endif
1268
1269 #undef extern
1270
1271 #endif /*_hdr_malloc*/
1272
1273 #endif /*!_std_malloc*/
1274
1275 /*
1276 * ast semi-private workaround for system functions
1277 * that misbehave by passing bogus addresses to free()
1278 *
1279 * not prototyped in any header to keep it ast semi-private
1280 *
1281 * to keep malloc() data by disabling free()
1282 * extern _vmkeep(int);
1283 * int r = _vmkeep(1);
1284 * and to restore to the previous state
1285 * (void)_vmkeep(r);
1286 */
1287
1288 #if defined(__EXPORT__)
1289 #define extern __EXPORT__
1290 #endif
1291
1292 extern int
1293 #if __STD_C
_vmkeep(int v)1294 _vmkeep(int v)
1295 #else
1296 _vmkeep(v)
1297 int v;
1298 #endif
1299 {
1300 int r;
1301
1302 r = !!(_Vmassert & VM_keep);
1303 if (v)
1304 _Vmassert |= VM_keep;
1305 else
1306 _Vmassert &= ~VM_keep;
1307 return r;
1308 }
1309
1310 #undef extern
1311
1312 #if USE_NATIVE
1313
1314 #undef realloc
1315
1316 extern void* realloc(void*, size_t);
1317
native_realloc(void * p,size_t n)1318 static void* native_realloc(void* p, size_t n)
1319 {
1320 return realloc(p, n);
1321 }
1322
1323 #undef free
1324
1325 extern void free(void*);
1326
native_free(void * p)1327 static void native_free(void* p)
1328 {
1329 free(p);
1330 }
1331
1332 #endif
1333
1334 #endif /*_UWIN*/
1335