1 /*
2 * mpatrol
3 * A library for controlling and tracing dynamic memory allocations.
4 * Copyright (C) 1997-2002 Graeme S. Roy <graeme.roy@analog.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
19 * MA 02111-1307, USA.
20 */
21
22
23 /*
24 * Allocation information. The functions in this module deal primarily with
25 * the secondary information associated with memory allocations.
26 */
27
28
29 #include "info.h"
30 #include "diag.h"
31 #if MP_THREADS_SUPPORT
32 #include "mutex.h"
33 #endif /* MP_THREADS_SUPPORT */
34 #include "utils.h"
35 #include <stdlib.h>
36 #include <errno.h>
37
38
39 #if MP_IDENT_SUPPORT
40 #ident "$Id: info.c,v 1.101 2002/01/08 20:13:59 graeme Exp $"
41 #else /* MP_IDENT_SUPPORT */
42 static MP_CONST MP_VOLATILE char *info_id = "$Id: info.c,v 1.101 2002/01/08 20:13:59 graeme Exp $";
43 #endif /* MP_IDENT_SUPPORT */
44
45
46 #ifdef __cplusplus
47 extern "C"
48 {
49 #endif /* __cplusplus */
50
51
52 MP_API void __mp_trap(void);
53
54
55 #if MP_INUSE_SUPPORT
56 void _Inuse_malloc(void *, unsigned long);
57 void _Inuse_realloc(void *, void *, unsigned long);
58 void _Inuse_free(void *);
59 #endif /* MP_INUSE_SUPPORT */
60
61
62 /* Initialise the fields of an infohead so that the mpatrol library
63 * is ready to perform dynamic memory allocations.
64 */
65
66 MP_GLOBAL
67 void
__mp_newinfo(infohead * h)68 __mp_newinfo(infohead *h)
69 {
70 struct { char x; allocanode y; } w;
71 struct { char x; infonode y; } z;
72 long n;
73
74 /* The signal table is initialised before this function is called
75 * because we have already entered the library at this point. The
76 * same goes for the recur field.
77 */
78 __mp_newallocs(&h->alloc, 0, MP_OVERFLOW, MP_OVERBYTE, MP_ALLOCBYTE,
79 MP_FREEBYTE, 0);
80 __mp_newaddrs(&h->addr, &h->alloc.heap);
81 __mp_newsymbols(&h->syms, &h->alloc.heap, h);
82 __mp_newleaktab(&h->ltable, &h->alloc.heap);
83 __mp_newprofile(&h->prof, &h->alloc.heap, &h->syms);
84 __mp_newtrace(&h->trace, &h->alloc.heap.memory);
85 /* Determine the minimum alignment for an allocation information node
86 * on this system and force the alignment to be a power of two. This
87 * information is used when initialising the slot table. Likewise for
88 * the slot table of allocanodes.
89 */
90 n = (char *) &z.y - &z.x;
91 __mp_newslots(&h->table, sizeof(infonode), __mp_poweroftwo(n));
92 n = (char *) &w.y - &w.x;
93 __mp_newslots(&h->atable, sizeof(allocanode), __mp_poweroftwo(n));
94 __mp_newlist(&h->list);
95 __mp_newlist(&h->alist);
96 __mp_newlist(&h->astack);
97 /* Initialise the settings to their default values.
98 */
99 h->size = h->event = h->count = h->cpeak = h->peak = h->limit = 0;
100 h->astop = h->rstop = h->fstop = h->uabort = 0;
101 h->lrange = h->urange = 0;
102 h->check = 1;
103 h->mcount = h->mtotal = 0;
104 h->dtotal = h->ltotal = h->ctotal = h->stotal = 0;
105 h->ffreq = h->fseed = 0;
106 h->prologue = NULL;
107 h->epilogue = NULL;
108 h->nomemory = NULL;
109 h->initcount = h->finicount = 0;
110 h->log = __mp_logfile(&h->alloc.heap.memory, NULL);
111 h->delpos = 0;
112 #if MP_PROTECT_SUPPORT
113 h->flags = 0;
114 #else /* MP_PROTECT_SUPPORT */
115 /* If the system does not support memory protection then we just set the
116 * NOPROTECT flag here, which saves us calling a function which does nothing
117 * each time we want to protect the library's internal structures.
118 */
119 h->flags = FLG_NOPROTECT;
120 #endif /* MP_PROTECT_SUPPORT */
121 h->pid = __mp_processid();
122 h->prot = MA_READWRITE;
123 /* Now that the infohead has valid fields we can now set the initialised
124 * flag. This means that the library can now recursively call malloc()
125 * or another memory allocation function without any problems. It just
126 * means that there will not be a log entry at that point, but generally
127 * we don't need one as the user will only want to see their memory
128 * allocations.
129 */
130 h->init = 1;
131 h->fini = 0;
132 }
133
134
135 /* Free up all memory used by the infohead.
136 */
137
138 MP_GLOBAL
139 void
__mp_deleteinfo(infohead * h)140 __mp_deleteinfo(infohead *h)
141 {
142 h->log = NULL;
143 __mp_deleteprofile(&h->prof);
144 __mp_deleteleaktab(&h->ltable);
145 __mp_deletesymbols(&h->syms);
146 __mp_deleteaddrs(&h->addr);
147 __mp_deleteallocs(&h->alloc);
148 h->table.free = NULL;
149 h->table.size = 0;
150 h->atable.free = NULL;
151 h->atable.size = 0;
152 __mp_newlist(&h->list);
153 __mp_newlist(&h->alist);
154 __mp_newlist(&h->astack);
155 h->size = h->event = h->count = h->cpeak = h->peak = 0;
156 h->mcount = h->mtotal = 0;
157 h->dtotal = h->ltotal = h->ctotal = h->stotal = 0;
158 h->initcount = h->finicount = 0;
159 h->delpos = 0;
160 }
161
162
163 /* Register an initialisation function to be called when the library is
164 * initialised.
165 */
166
167 MP_GLOBAL
168 int
__mp_atinit(infohead * h,void (* f)(void))169 __mp_atinit(infohead *h, void (*f)(void))
170 {
171 int r;
172
173 if (h->initcount == MP_MAXINITS)
174 r = 0;
175 else
176 {
177 h->inits[h->initcount++] = f;
178 r = 1;
179 }
180 return r;
181 }
182
183
184 /* Register a finalisation function to be called when the library is
185 * terminated.
186 */
187
188 MP_GLOBAL
189 int
__mp_atfini(infohead * h,void (* f)(void))190 __mp_atfini(infohead *h, void (*f)(void))
191 {
192 int r;
193
194 if (h->finicount == MP_MAXFINIS)
195 r = 0;
196 else
197 {
198 h->finis[h->finicount++] = f;
199 r = 1;
200 }
201 return r;
202 }
203
204
205 /* Allocate a new allocation information node.
206 */
207
208 static
209 infonode *
getinfonode(infohead * h)210 getinfonode(infohead *h)
211 {
212 infonode *n;
213 heapnode *p;
214
215 /* If we have no more allocation information node slots left then we
216 * must allocate some more memory for them. An extra MP_ALLOCFACTOR
217 * pages of memory should suffice.
218 */
219 if ((n = (infonode *) __mp_getslot(&h->table)) == NULL)
220 {
221 if ((p = __mp_heapalloc(&h->alloc.heap, h->alloc.heap.memory.page *
222 MP_ALLOCFACTOR, h->table.entalign, 1)) == NULL)
223 return NULL;
224 __mp_initslots(&h->table, p->block, p->size);
225 n = (infonode *) __mp_getslot(&h->table);
226 __mp_addtail(&h->list, &n->index.node);
227 n->index.block = p->block;
228 n->index.size = p->size;
229 h->size += p->size;
230 n = (infonode *) __mp_getslot(&h->table);
231 }
232 return n;
233 }
234
235
236 /* Allocate a new allocanode.
237 */
238
239 static
240 allocanode *
getallocanode(infohead * h)241 getallocanode(infohead *h)
242 {
243 allocanode *n;
244 heapnode *p;
245
246 /* If we have no more allocanode slots left then we must allocate some more
247 * memory for them. An extra MP_ALLOCFACTOR pages of memory should suffice.
248 */
249 if ((n = (allocanode *) __mp_getslot(&h->atable)) == NULL)
250 {
251 if ((p = __mp_heapalloc(&h->alloc.heap, h->alloc.heap.memory.page *
252 MP_ALLOCFACTOR, h->atable.entalign, 1)) == NULL)
253 return NULL;
254 __mp_initslots(&h->atable, p->block, p->size);
255 n = (allocanode *) __mp_getslot(&h->atable);
256 __mp_addtail(&h->alist, &n->node);
257 n->block = p->block;
258 n->data.size = p->size;
259 h->size += p->size;
260 n = (allocanode *) __mp_getslot(&h->atable);
261 }
262 return n;
263 }
264
265
266 /* Add an entry to the leak table.
267 */
268
269 static
270 void
leaktabentry(infohead * h,infonode * m,size_t l,int f)271 leaktabentry(infohead *h, infonode *m, size_t l, int f)
272 {
273 addrnode *a;
274 symnode *s;
275 char *t;
276 unsigned long u;
277
278 t = NULL;
279 u = 0;
280 if ((m->data.file != NULL) && (m->data.line != 0))
281 {
282 t = m->data.file;
283 u = m->data.line;
284 }
285 else if (m->data.func != NULL)
286 t = m->data.func;
287 else if (a = m->data.stack)
288 {
289 if ((a->data.name == NULL) &&
290 (s = __mp_findsymbol(&h->syms, a->data.addr)))
291 a->data.name = s->data.name;
292 if (a->data.name != NULL)
293 t = a->data.name;
294 else
295 u = (unsigned long) a->data.addr;
296 }
297 if (f == 0)
298 __mp_allocentry(&h->ltable, t, u, l);
299 else
300 __mp_freeentry(&h->ltable, t, u, l);
301 }
302
303
304 /* Allocate a new block of memory of a specified size and alignment.
305 */
306
307 MP_GLOBAL
308 void *
__mp_getmemory(infohead * h,size_t l,size_t a,loginfo * v)309 __mp_getmemory(infohead *h, size_t l, size_t a, loginfo *v)
310 {
311 allocnode *n;
312 allocanode *g;
313 infonode *m;
314 void *p;
315 unsigned long c, t;
316
317 p = NULL;
318 h->count++;
319 c = h->count;
320 v->ltype = LT_ALLOC;
321 v->variant.logalloc.size = l;
322 v->variant.logalloc.align = a;
323 if (h->flags & FLG_LOGALLOCS)
324 __mp_log(h, v);
325 if ((c == h->astop) && (h->rstop == 0))
326 {
327 /* Abort at the specified allocation index.
328 */
329 __mp_printsummary(h);
330 __mp_diag("\n");
331 __mp_diag("stopping at allocation %lu\n", h->astop);
332 __mp_trap();
333 }
334 if ((h->flags & FLG_CHECKALLOCS) && (l == 0))
335 {
336 __mp_log(h, v);
337 __mp_warn(ET_ALLZER, v->type, v->file, v->line, NULL);
338 __mp_diag("\n");
339 }
340 if (v->type == AT_MEMALIGN)
341 {
342 /* Check that the specified alignment is valid. This is only
343 * performed for memalign() so that we can report any problems
344 * in the log file. All other cases are checked silently.
345 */
346 if (a == 0)
347 {
348 if (h->flags & FLG_CHECKALLOCS)
349 {
350 __mp_log(h, v);
351 __mp_warn(ET_ZERALN, v->type, v->file, v->line, NULL);
352 __mp_diag("\n");
353 }
354 a = h->alloc.heap.memory.align;
355 }
356 else if (!__mp_ispoweroftwo(a))
357 {
358 if (h->flags & FLG_CHECKALLOCS)
359 {
360 __mp_log(h, v);
361 __mp_warn(ET_BADALN, v->type, v->file, v->line, NULL, a);
362 __mp_diag("\n");
363 }
364 a = __mp_poweroftwo(a);
365 }
366 else if (a > h->alloc.heap.memory.page)
367 {
368 if (h->flags & FLG_CHECKALLOCS)
369 {
370 __mp_log(h, v);
371 __mp_warn(ET_MAXALN, v->type, v->file, v->line, NULL, a);
372 __mp_diag("\n");
373 }
374 a = h->alloc.heap.memory.page;
375 }
376 }
377 else if ((v->type == AT_VALLOC) || (v->type == AT_PVALLOC))
378 {
379 /* Check that the specified size and alignment for valloc() and
380 * pvalloc() are valid.
381 */
382 if (v->type == AT_PVALLOC)
383 {
384 if (l == 0)
385 l = 1;
386 l = __mp_roundup(l, h->alloc.heap.memory.page);
387 }
388 a = h->alloc.heap.memory.page;
389 }
390 if ((h->recur == 1) && (((h->limit > 0) &&
391 (h->alloc.asize + l > h->limit)) ||
392 ((h->ffreq > 0) && ((rand() % h->ffreq) == 0))))
393 errno = ENOMEM;
394 else
395 {
396 if (!(h->flags & FLG_NOPROTECT))
397 __mp_protectinfo(h, MA_READWRITE);
398 if ((((v->type != AT_ALLOCA) && (v->type != AT_STRDUPA) &&
399 (v->type != AT_STRNDUPA)) || (g = getallocanode(h))) &&
400 (m = getinfonode(h)))
401 if (n = __mp_getalloc(&h->alloc, l, a, m))
402 {
403 #if MP_THREADS_SUPPORT
404 t = __mp_threadid();
405 #else /* MP_THREADS_SUPPORT */
406 t = 0;
407 #endif /* MP_THREADS_SUPPORT */
408 /* Fill in the details of the allocation information node.
409 */
410 m->data.type = v->type;
411 m->data.alloc = c;
412 m->data.realloc = 0;
413 #if MP_THREADS_SUPPORT
414 m->data.thread = t;
415 #endif /* MP_THREADS_SUPPORT */
416 m->data.event = h->event;
417 m->data.func = v->func;
418 m->data.file = v->file;
419 m->data.line = v->line;
420 m->data.stack = __mp_getaddrs(&h->addr, v->stack);
421 m->data.typestr = v->typestr;
422 m->data.typesize = v->typesize;
423 m->data.userdata = NULL;
424 if (h->recur > 1)
425 m->data.flags = FLG_INTERNAL;
426 else
427 m->data.flags = 0;
428 p = n->block;
429 if ((v->type == AT_CALLOC) || (v->type == AT_XCALLOC) ||
430 (v->type == AT_RECALLOC))
431 __mp_memset(p, 0, l);
432 else
433 __mp_memset(p, h->alloc.abyte, l);
434 if (h->recur == 1)
435 {
436 if (h->ltable.tracing)
437 leaktabentry(h, m, l, 0);
438 if (h->prof.profiling &&
439 __mp_profilealloc(&h->prof, n->size, m,
440 !(h->flags & FLG_NOPROTECT)))
441 m->data.flags |= FLG_PROFILED;
442 if (h->trace.tracing)
443 {
444 __mp_tracealloc(&h->trace, c, p, l, t, v->func, v->file,
445 v->line);
446 m->data.flags |= FLG_TRACED;
447 }
448 }
449 #if MP_INUSE_SUPPORT
450 _Inuse_malloc(p, l);
451 #endif /* MP_INUSE_SUPPORT */
452 }
453 else
454 __mp_freeslot(&h->table, m);
455 if (((v->type == AT_ALLOCA) || (v->type == AT_STRDUPA) ||
456 (v->type == AT_STRNDUPA)) && (g != NULL))
457 if (p != NULL)
458 {
459 __mp_addhead(&h->astack, &g->node);
460 g->block = p;
461 #if MP_FULLSTACK
462 /* If we support full stack tracebacks then we can more
463 * accurately determine when we can free up any allocations
464 * made by alloca(), strdupa() or strndupa() that are now out
465 * of scope.
466 */
467 g->data.frame = (void *) m->data.stack;
468 #else /* MP_FULLSTACK */
469 /* Otherwise, we take the address of a local variable in the
470 * calling function in order to determine if subsequent calls
471 * are closer to or further away from the program's entry point.
472 * This information can later be used to free up any
473 * allocations made by alloca(), strdupa() or strndupa() that
474 * are now out of scope.
475 */
476 g->data.frame = (void *) &v->stack->frame;
477 #endif /* MP_FULLSTACK */
478 }
479 else
480 __mp_freeslot(&h->atable, g);
481 if ((h->recur == 1) && !(h->flags & FLG_NOPROTECT))
482 __mp_protectinfo(h, MA_READONLY);
483 if (h->cpeak < h->alloc.atree.size)
484 h->cpeak = h->alloc.atree.size;
485 if (h->peak < h->alloc.asize)
486 h->peak = h->alloc.asize;
487 }
488 if ((h->flags & FLG_LOGALLOCS) && (h->recur == 1))
489 __mp_diag("returns " MP_POINTER "\n\n", p);
490 return p;
491 }
492
493
494 /* Resize an existing block of memory to a new size and alignment.
495 */
496
497 MP_GLOBAL
498 void *
__mp_resizememory(infohead * h,void * p,size_t l,size_t a,loginfo * v)499 __mp_resizememory(infohead *h, void *p, size_t l, size_t a, loginfo *v)
500 {
501 allocnode *n, *r;
502 infonode *i, *m;
503 size_t d;
504 unsigned long t;
505
506 v->ltype = LT_REALLOC;
507 v->variant.logrealloc.block = p;
508 v->variant.logrealloc.size = l;
509 v->variant.logrealloc.align = a;
510 if (h->flags & FLG_LOGREALLOCS)
511 __mp_log(h, v);
512 if (p == NULL)
513 {
514 if (h->flags & FLG_CHECKREALLOCS)
515 {
516 __mp_log(h, v);
517 __mp_warn(ET_RSZNUL, v->type, v->file, v->line, NULL);
518 __mp_diag("\n");
519 }
520 p = __mp_getmemory(h, l, a, v);
521 }
522 else if (n = __mp_findfreed(&h->alloc, p))
523 {
524 /* This block of memory has already been freed but has not been
525 * returned to the free tree.
526 */
527 m = (infonode *) n->info;
528 __mp_log(h, v);
529 __mp_error(ET_PRVFRD, v->type, v->file, v->line, NULL, p,
530 __mp_functionnames[m->data.type]);
531 __mp_printalloc(&h->syms, n);
532 __mp_diag("\n");
533 p = NULL;
534 }
535 else if (((n = __mp_findalloc(&h->alloc, p)) == NULL) ||
536 ((m = (infonode *) n->info) == NULL))
537 {
538 /* We know nothing about this block of memory.
539 */
540 __mp_log(h, v);
541 __mp_error(ET_NOTALL, v->type, v->file, v->line, NULL, p);
542 __mp_diag("\n");
543 p = NULL;
544 }
545 else if (p != n->block)
546 {
547 /* The address of the block passed in does not match the start
548 * address of the block we know about.
549 */
550 __mp_log(h, v);
551 __mp_error(ET_MISMAT, v->type, v->file, v->line, NULL, p, n->block);
552 __mp_printalloc(&h->syms, n);
553 __mp_diag("\n");
554 p = NULL;
555 }
556 else if ((m->data.type == AT_ALLOCA) || (m->data.type == AT_STRDUPA) ||
557 (m->data.type == AT_STRNDUPA) || (m->data.type == AT_NEW) ||
558 (m->data.type == AT_NEWVEC))
559 {
560 /* The function used to allocate the block is incompatible with
561 * alloca(), strdupa(), strndupa(), operator new or operator new[].
562 */
563 __mp_log(h, v);
564 __mp_error(ET_INCOMP, v->type, v->file, v->line, NULL, p,
565 __mp_functionnames[m->data.type]);
566 __mp_printalloc(&h->syms, n);
567 __mp_diag("\n");
568 p = NULL;
569 }
570 else if (l == 0)
571 {
572 if (h->flags & FLG_CHECKREALLOCS)
573 {
574 __mp_log(h, v);
575 __mp_warn(ET_RSZZER, v->type, v->file, v->line, NULL);
576 __mp_diag("\n");
577 }
578 __mp_freememory(h, p, v);
579 p = NULL;
580 }
581 else
582 {
583 if ((h->flags & FLG_LOGREALLOCS) && (h->recur == 1))
584 {
585 __mp_printalloc(&h->syms, n);
586 __mp_diag("\n");
587 }
588 if ((m->data.realloc + 1 == h->rstop) && ((h->astop == 0) ||
589 (m->data.alloc == h->astop)))
590 {
591 /* Abort at the specified reallocation index.
592 */
593 __mp_printsummary(h);
594 __mp_diag("\n");
595 if (h->astop == 0)
596 __mp_diag("stopping at reallocation %lu\n", h->rstop);
597 else
598 __mp_diag("stopping at reallocation %lu of allocation %lu\n",
599 h->rstop, h->astop);
600 __mp_trap();
601 }
602 if ((h->recur == 1) && (((h->limit > 0) && (l > n->size) &&
603 (h->alloc.asize + l - n->size > h->limit)) ||
604 ((h->ffreq > 0) && ((rand() % h->ffreq) == 0))))
605 {
606 errno = ENOMEM;
607 p = NULL;
608 }
609 else
610 {
611 #if MP_THREADS_SUPPORT
612 t = __mp_threadid();
613 #else /* MP_THREADS_SUPPORT */
614 t = 0;
615 #endif /* MP_THREADS_SUPPORT */
616 d = n->size;
617 if (!(h->flags & FLG_NOPROTECT))
618 __mp_protectinfo(h, MA_READWRITE);
619 m->data.realloc++;
620 if ((v->type != AT_EXPAND) && (h->alloc.flags & FLG_NOFREE))
621 /* We are not going to even attempt to resize the memory if
622 * we are preserving free blocks, and instead we will just
623 * create a new block all the time and preserve the old block.
624 */
625 if ((i = getinfonode(h)) &&
626 (r = __mp_getalloc(&h->alloc, l, a, m)))
627 {
628 /* Fill in the details of the allocation information node.
629 */
630 i->data.type = v->type;
631 i->data.alloc = m->data.alloc;
632 i->data.realloc = m->data.realloc - 1;
633 #if MP_THREADS_SUPPORT
634 i->data.thread = t;
635 #endif /* MP_THREADS_SUPPORT */
636 i->data.event = h->event;
637 i->data.func = v->func;
638 i->data.file = v->file;
639 i->data.line = v->line;
640 i->data.stack = __mp_getaddrs(&h->addr, v->stack);
641 i->data.typestr = m->data.typestr;
642 i->data.typesize = m->data.typesize;
643 i->data.userdata = m->data.userdata;
644 i->data.flags = m->data.flags | FLG_FREED;
645 __mp_memcopy(r->block, n->block, (l > d) ? d : l);
646 if (m->data.flags & FLG_TRACED)
647 __mp_tracerealloc(&h->trace, m->data.alloc, r->block,
648 l, t, v->func, v->file, v->line);
649 #if MP_INUSE_SUPPORT
650 _Inuse_realloc(n->block, r->block, l);
651 #endif /* MP_INUSE_SUPPORT */
652 __mp_freealloc(&h->alloc, n, i);
653 p = r->block;
654 }
655 else
656 {
657 if (i != NULL)
658 __mp_freeslot(&h->table, i);
659 p = NULL;
660 }
661 else if (l == d)
662 /* The old size is the same as the new size, so we just
663 * return an address to the start of the existing block.
664 */
665 p = n->block;
666 else if (!__mp_resizealloc(&h->alloc, n, l))
667 /* If __mp_resizealloc() failed and all allocations are to
668 * be aligned to the end of pages or the size requested is
669 * greater than the existing size then we must allocate a
670 * new block, copy the contents and free the old block.
671 */
672 if ((v->type != AT_EXPAND) &&
673 (((h->alloc.flags & FLG_PAGEALLOC) &&
674 (h->alloc.flags & FLG_ALLOCUPPER)) || (l > d)) &&
675 (r = __mp_getalloc(&h->alloc, l, a, m)))
676 {
677 __mp_memcopy(r->block, n->block, (l > d) ? d : l);
678 if (m->data.flags & FLG_TRACED)
679 __mp_tracerealloc(&h->trace, m->data.alloc, r->block,
680 l, t, v->func, v->file, v->line);
681 #if MP_INUSE_SUPPORT
682 _Inuse_realloc(n->block, r->block, l);
683 #endif /* MP_INUSE_SUPPORT */
684 __mp_freealloc(&h->alloc, n, NULL);
685 p = r->block;
686 }
687 else
688 p = NULL;
689 else
690 {
691 /* We have been able to increase or decrease the size of the
692 * block without having to relocate it.
693 */
694 if (m->data.flags & FLG_TRACED)
695 __mp_tracerealloc(&h->trace, m->data.alloc, n->block, l, t,
696 v->func, v->file, v->line);
697 #if MP_INUSE_SUPPORT
698 _Inuse_realloc(n->block, n->block, l);
699 #endif /* MP_INUSE_SUPPORT */
700 }
701 if (p != NULL)
702 {
703 if (m->data.flags & FLG_MARKED)
704 {
705 h->mtotal -= d;
706 h->mtotal += l;
707 }
708 if (h->ltable.tracing)
709 leaktabentry(h, m, d, 1);
710 if (m->data.flags & FLG_PROFILED)
711 __mp_profilefree(&h->prof, d, m,
712 !(h->flags & FLG_NOPROTECT));
713 m->data.type = v->type;
714 #if MP_THREADS_SUPPORT
715 m->data.thread = t;
716 #endif /* MP_THREADS_SUPPORT */
717 m->data.event = h->event;
718 m->data.func = v->func;
719 m->data.file = v->file;
720 m->data.line = v->line;
721 __mp_freeaddrs(&h->addr, m->data.stack);
722 m->data.stack = __mp_getaddrs(&h->addr, v->stack);
723 m->data.typestr = v->typestr;
724 m->data.typesize = v->typesize;
725 if (h->ltable.tracing)
726 leaktabentry(h, m, l, 0);
727 if (m->data.flags & FLG_PROFILED)
728 __mp_profilealloc(&h->prof, l, m,
729 !(h->flags & FLG_NOPROTECT));
730 }
731 if ((h->recur == 1) && !(h->flags & FLG_NOPROTECT))
732 __mp_protectinfo(h, MA_READONLY);
733 if ((p != NULL) && (l > d))
734 if (v->type == AT_RECALLOC)
735 __mp_memset((char *) p + d, 0, l - d);
736 else
737 __mp_memset((char *) p + d, h->alloc.abyte, l - d);
738 if (h->cpeak < h->alloc.atree.size)
739 h->cpeak = h->alloc.atree.size;
740 if (h->peak < h->alloc.asize)
741 h->peak = h->alloc.asize;
742 }
743 /* If we are returning NULL from a call to reallocf() then we must
744 * also free the original allocation.
745 */
746 if ((p == NULL) && (v->type == AT_REALLOCF))
747 __mp_freememory(h, n->block, v);
748 }
749 if ((h->flags & FLG_LOGREALLOCS) && (h->recur == 1))
750 __mp_diag("returns " MP_POINTER "\n\n", p);
751 return p;
752 }
753
754
755 /* Free an existing block of memory.
756 */
757
758 MP_GLOBAL
759 void
__mp_freememory(infohead * h,void * p,loginfo * v)760 __mp_freememory(infohead *h, void *p, loginfo *v)
761 {
762 allocnode *n;
763 allocanode *g;
764 infonode *m;
765 unsigned long t;
766 int o;
767
768 v->ltype = LT_FREE;
769 v->variant.logfree.block = p;
770 if (h->flags & FLG_LOGFREES)
771 __mp_log(h, v);
772 if (p == NULL)
773 {
774 if (h->flags & FLG_CHECKFREES)
775 {
776 __mp_log(h, v);
777 __mp_warn(ET_FRENUL, v->type, v->file, v->line, NULL);
778 __mp_diag("\n");
779 }
780 return;
781 }
782 if (n = __mp_findfreed(&h->alloc, p))
783 {
784 /* This block of memory has already been freed but has not been
785 * returned to the free tree.
786 */
787 m = (infonode *) n->info;
788 __mp_log(h, v);
789 __mp_error(ET_PRVFRD, v->type, v->file, v->line, NULL, p,
790 __mp_functionnames[m->data.type]);
791 __mp_printalloc(&h->syms, n);
792 __mp_diag("\n");
793 }
794 else if (((n = __mp_findalloc(&h->alloc, p)) == NULL) ||
795 ((m = (infonode *) n->info) == NULL))
796 {
797 /* We know nothing about this block of memory.
798 */
799 __mp_log(h, v);
800 __mp_error(ET_NOTALL, v->type, v->file, v->line, NULL, p);
801 __mp_diag("\n");
802 }
803 else if (p != n->block)
804 {
805 /* The address of the block passed in does not match the start
806 * address of the block we know about.
807 */
808 __mp_log(h, v);
809 __mp_error(ET_MISMAT, v->type, v->file, v->line, NULL, p, n->block);
810 __mp_printalloc(&h->syms, n);
811 __mp_diag("\n");
812 }
813 else if ((((m->data.type == AT_ALLOCA) || (m->data.type == AT_STRDUPA) ||
814 (m->data.type == AT_STRNDUPA)) && (v->type != AT_ALLOCA) &&
815 (v->type != AT_DEALLOCA)) ||
816 ((m->data.type != AT_ALLOCA) && (m->data.type != AT_STRDUPA) &&
817 (m->data.type != AT_STRNDUPA) && ((v->type == AT_ALLOCA) ||
818 (v->type == AT_DEALLOCA))) ||
819 ((m->data.type == AT_NEW) && (v->type != AT_DELETE)) ||
820 ((m->data.type != AT_NEW) && (v->type == AT_DELETE)) ||
821 ((m->data.type == AT_NEWVEC) && (v->type != AT_DELETEVEC)) ||
822 ((m->data.type != AT_NEWVEC) && (v->type == AT_DELETEVEC)))
823 {
824 /* The function used to allocate the block is incompatible with
825 * the function used to free the block.
826 */
827 __mp_log(h, v);
828 __mp_error(ET_INCOMP, v->type, v->file, v->line, NULL, p,
829 __mp_functionnames[m->data.type]);
830 __mp_printalloc(&h->syms, n);
831 __mp_diag("\n");
832 }
833 else if (m->data.flags & FLG_MARKED)
834 {
835 /* An attempt was made to free a marked memory allocation.
836 */
837 __mp_log(h, v);
838 __mp_error(ET_FREMRK, v->type, v->file, v->line, NULL, p);
839 __mp_printalloc(&h->syms, n);
840 __mp_diag("\n");
841 }
842 else
843 {
844 #if MP_THREADS_SUPPORT
845 t = __mp_threadid();
846 #else /* MP_THREADS_SUPPORT */
847 t = 0;
848 #endif /* MP_THREADS_SUPPORT */
849 if ((h->flags & FLG_LOGFREES) && (h->recur == 1))
850 {
851 __mp_printalloc(&h->syms, n);
852 __mp_diag("\n");
853 }
854 if (m->data.alloc == h->fstop)
855 {
856 /* Abort at the specified allocation index.
857 */
858 __mp_printsummary(h);
859 __mp_diag("\n");
860 __mp_diag("stopping at freeing of allocation %lu\n", h->fstop);
861 __mp_trap();
862 }
863 if (!(h->flags & FLG_NOPROTECT))
864 __mp_protectinfo(h, MA_READWRITE);
865 if (h->ltable.tracing)
866 leaktabentry(h, m, n->size, 1);
867 if (m->data.flags & FLG_PROFILED)
868 __mp_profilefree(&h->prof, n->size, m, !(h->flags & FLG_NOPROTECT));
869 if (m->data.flags & FLG_TRACED)
870 __mp_tracefree(&h->trace, m->data.alloc, t, v->func, v->file,
871 v->line);
872 __mp_freeaddrs(&h->addr, m->data.stack);
873 if (h->alloc.flags & FLG_NOFREE)
874 {
875 /* Fill in the details of the allocation information node but only
876 * if we are keeping the freed block.
877 */
878 m->data.type = v->type;
879 #if MP_THREADS_SUPPORT
880 m->data.thread = t;
881 #endif /* MP_THREADS_SUPPORT */
882 m->data.event = h->event;
883 m->data.func = v->func;
884 m->data.file = v->file;
885 m->data.line = v->line;
886 m->data.stack = __mp_getaddrs(&h->addr, v->stack);
887 m->data.flags |= FLG_FREED;
888 }
889 else
890 {
891 __mp_freeslot(&h->table, m);
892 m = NULL;
893 }
894 if ((v->type == AT_ALLOCA) || (v->type == AT_DEALLOCA))
895 {
896 /* Search the alloca allocation stack for the allocanode to free.
897 * We need to do this instead of just blindly removing the top of
898 * the stack since it is possible for the user to manually free an
899 * allocation that was created by one of the alloca() family of
900 * functions through the use of the dealloca() function.
901 */
902 o = 0;
903 for (g = (allocanode *) h->astack.head; g->node.next != NULL;
904 g = (allocanode *) g->node.next)
905 if (g->block == p)
906 {
907 o = 1;
908 break;
909 }
910 if (o == 1)
911 {
912 __mp_remove(&h->astack, &g->node);
913 __mp_freeslot(&h->atable, g);
914 }
915 }
916 #if MP_INUSE_SUPPORT
917 _Inuse_free(p);
918 #endif /* MP_INUSE_SUPPORT */
919 __mp_freealloc(&h->alloc, n, m);
920 if ((h->recur == 1) && !(h->flags & FLG_NOPROTECT))
921 __mp_protectinfo(h, MA_READONLY);
922 }
923 }
924
925
926 /* Set a block of memory to contain a specific byte.
927 */
928
929 MP_GLOBAL
930 void
__mp_setmemory(infohead * h,void * p,size_t l,unsigned char c,loginfo * v)931 __mp_setmemory(infohead *h, void *p, size_t l, unsigned char c, loginfo *v)
932 {
933 v->ltype = LT_SET;
934 v->variant.logmemset.block = p;
935 v->variant.logmemset.size = l;
936 v->variant.logmemset.byte = c;
937 if (h->flags & FLG_LOGMEMORY)
938 __mp_log(h, v);
939 /* If the pointer is not NULL and does not overflow any memory blocks then
940 * proceed to set the memory.
941 */
942 if (__mp_checkrange(h, p, l, v))
943 {
944 __mp_memset(p, c, l);
945 h->stotal += l;
946 }
947 }
948
949
950 /* Copy a block of memory from one address to another.
951 */
952
953 MP_GLOBAL
954 void *
__mp_copymemory(infohead * h,void * p,void * q,size_t l,unsigned char c,loginfo * v)955 __mp_copymemory(infohead *h, void *p, void *q, size_t l, unsigned char c,
956 loginfo *v)
957 {
958 void *r;
959
960 v->ltype = LT_COPY;
961 v->variant.logmemcopy.srcblock = p;
962 v->variant.logmemcopy.dstblock = q;
963 v->variant.logmemcopy.size = l;
964 v->variant.logmemcopy.byte = c;
965 if (h->flags & FLG_LOGMEMORY)
966 __mp_log(h, v);
967 /* We must ensure that the memory to be copied does not overlap when
968 * memcpy() or memccpy() are called. This does not matter when calling
969 * __mp_memcopy() but it will matter when calling the normal system
970 * functions, in which case memmove() should be used instead.
971 */
972 if (((v->type == AT_MEMCPY) || (v->type == AT_MEMCCPY)) && (l > 0) &&
973 (((p < q) && ((char *) p + l > (char *) q)) ||
974 ((q < p) && ((char *) q + l > (char *) p))))
975 {
976 __mp_log(h, v);
977 __mp_warn(ET_RNGOVL, v->type, v->file, v->line, NULL, p,
978 (char *) p + l - 1, q, (char *) q + l - 1);
979 __mp_diag("\n");
980 }
981 /* If the pointers are not NULL and do not overflow any memory blocks then
982 * proceed to copy the memory.
983 */
984 if (__mp_checkrange(h, p, l, v) && __mp_checkrange(h, q, l, v))
985 {
986 if (v->type == AT_MEMCCPY)
987 {
988 if (r = __mp_memfind(p, l, &c, 1))
989 l = (size_t) ((char *) r - (char *) p) + 1;
990 __mp_memcopy(q, p, l);
991 if (r != NULL)
992 q = (char *) q + l;
993 else
994 q = NULL;
995 }
996 else
997 __mp_memcopy(q, p, l);
998 h->ctotal += l;
999 }
1000 if ((h->flags & FLG_LOGMEMORY) && (h->recur == 1))
1001 __mp_diag("returns " MP_POINTER "\n\n", q);
1002 return q;
1003 }
1004
1005
1006 /* Attempt to locate the position of one block of memory in another block.
1007 */
1008
1009 MP_GLOBAL
1010 void *
__mp_locatememory(infohead * h,void * p,size_t l,void * q,size_t m,loginfo * v)1011 __mp_locatememory(infohead *h, void *p, size_t l, void *q, size_t m, loginfo *v)
1012 {
1013 void *r;
1014
1015 r = NULL;
1016 v->ltype = LT_LOCATE;
1017 v->variant.logmemlocate.block = p;
1018 v->variant.logmemlocate.size = l;
1019 v->variant.logmemlocate.patblock = q;
1020 v->variant.logmemlocate.patsize = m;
1021 if (h->flags & FLG_LOGMEMORY)
1022 __mp_log(h, v);
1023 /* If the pointers are not NULL and do not overflow any memory blocks then
1024 * proceed to start the search.
1025 */
1026 if (__mp_checkrange(h, p, l, v) && __mp_checkrange(h, q, m, v))
1027 {
1028 r = __mp_memfind(p, l, q, m);
1029 h->ltotal += m;
1030 }
1031 if ((h->flags & FLG_LOGMEMORY) && (h->recur == 1))
1032 __mp_diag("returns " MP_POINTER "\n\n", r);
1033 return r;
1034 }
1035
1036
1037 /* Compare two blocks of memory.
1038 */
1039
1040 MP_GLOBAL
1041 int
__mp_comparememory(infohead * h,void * p,void * q,size_t l,loginfo * v)1042 __mp_comparememory(infohead *h, void *p, void *q, size_t l, loginfo *v)
1043 {
1044 void *r;
1045 int c;
1046
1047 c = 0;
1048 v->ltype = LT_COMPARE;
1049 v->variant.logmemcompare.block1 = p;
1050 v->variant.logmemcompare.block2 = q;
1051 v->variant.logmemcompare.size = l;
1052 if (h->flags & FLG_LOGMEMORY)
1053 __mp_log(h, v);
1054 /* If the pointers are not NULL and do not overflow any memory blocks then
1055 * proceed to compare the memory.
1056 */
1057 if (__mp_checkrange(h, p, l, v) && __mp_checkrange(h, q, l, v))
1058 {
1059 h->dtotal += l;
1060 if (r = __mp_memcompare(p, q, l))
1061 {
1062 l = (char *) r - (char *) p;
1063 c = (int) ((unsigned char *) p)[l] - (int) ((unsigned char *) q)[l];
1064 }
1065 }
1066 if ((h->flags & FLG_LOGMEMORY) && (h->recur == 1))
1067 __mp_diag("returns %d\n\n", c);
1068 return c;
1069 }
1070
1071
1072 /* Protect the internal memory blocks used by the mpatrol library
1073 * with the supplied access permission.
1074 */
1075
1076 MP_GLOBAL
1077 int
__mp_protectinfo(infohead * h,memaccess a)1078 __mp_protectinfo(infohead *h, memaccess a)
1079 {
1080 allocanode *m;
1081 infonode *n;
1082
1083 /* The library already knows what its protection status is so we don't
1084 * need to do anything if the request has already been done.
1085 */
1086 if (a == h->prot)
1087 return 1;
1088 h->prot = a;
1089 for (n = (infonode *) h->list.head; n->index.node.next != NULL;
1090 n = (infonode *) n->index.node.next)
1091 if (!__mp_memprotect(&h->alloc.heap.memory, n->index.block,
1092 n->index.size, a))
1093 return 0;
1094 for (m = (allocanode *) h->alist.head; m->node.next != NULL;
1095 m = (allocanode *) m->node.next)
1096 if (!__mp_memprotect(&h->alloc.heap.memory, m->block, m->data.size, a))
1097 return 0;
1098 if (!__mp_protectaddrs(&h->addr, a) ||
1099 !__mp_protectleaktab(&h->ltable, a) ||
1100 !__mp_protectprofile(&h->prof, a))
1101 return 0;
1102 return __mp_protectalloc(&h->alloc, a);
1103 }
1104
1105
1106 /* Check the validity of all memory blocks that have been filled with
1107 * a predefined pattern.
1108 */
1109
1110 MP_GLOBAL
1111 void
__mp_checkinfo(infohead * h,loginfo * v)1112 __mp_checkinfo(infohead *h, loginfo *v)
1113 {
1114 allocnode *n;
1115 infonode *m;
1116 void *b, *p;
1117 size_t l, s;
1118
1119 for (n = (allocnode *) h->alloc.list.head; n->lnode.next != NULL;
1120 n = (allocnode *) n->lnode.next)
1121 {
1122 if ((m = (infonode *) n->info) == NULL)
1123 /* Check that all free blocks are filled with the free byte, but
1124 * only if all allocations are not pages since they will be read
1125 * and write protected in that case.
1126 */
1127 if (!(h->alloc.flags & FLG_PAGEALLOC) &&
1128 (p = __mp_memcheck(n->block, h->alloc.fbyte, n->size)))
1129 {
1130 __mp_log(h, v);
1131 __mp_printsummary(h);
1132 __mp_diag("\n");
1133 __mp_error(ET_FRECOR, AT_MAX, v->file, v->line, NULL, p);
1134 if ((l = (char *) n->block + n->size - (char *) p) > 256)
1135 __mp_printmemory(p, 256);
1136 else
1137 __mp_printmemory(p, l);
1138 h->fini = 1;
1139 __mp_abort();
1140 }
1141 else
1142 continue;
1143 if ((m->data.flags & FLG_FREED) && !(h->alloc.flags & FLG_PAGEALLOC) &&
1144 !(h->alloc.flags & FLG_PRESERVE))
1145 /* Check that all freed blocks are filled with the free byte, but
1146 * only if all allocations are not pages and the original contents
1147 * were not preserved.
1148 */
1149 if (p = __mp_memcheck(n->block, h->alloc.fbyte, n->size))
1150 {
1151 __mp_log(h, v);
1152 __mp_printsummary(h);
1153 __mp_diag("\n");
1154 __mp_error(ET_FRDCOR, AT_MAX, v->file, v->line, NULL, n->block,
1155 p);
1156 if ((l = (char *) n->block + n->size - (char *) p) > 256)
1157 __mp_printmemory(p, 256);
1158 else
1159 __mp_printmemory(p, l);
1160 __mp_diag("\n");
1161 __mp_printalloc(&h->syms, n);
1162 h->fini = 1;
1163 __mp_abort();
1164 }
1165 if (h->alloc.flags & FLG_OFLOWWATCH)
1166 /* If we have watch areas on every overflow buffer then we don't
1167 * need to perform the following checks.
1168 */
1169 continue;
1170 if ((h->alloc.flags & FLG_PAGEALLOC) && !(m->data.flags & FLG_FREED))
1171 {
1172 /* Check that all allocated blocks have overflow buffers filled with
1173 * the overflow byte, but only if all allocations are pages as this
1174 * check examines the overflow buffers within the page boundaries.
1175 * This does not have to be done for freed allocations as their
1176 * overflow buffers will be at least read-only.
1177 */
1178 b = (void *) __mp_rounddown((unsigned long) n->block,
1179 h->alloc.heap.memory.page);
1180 s = (char *) n->block - (char *) b;
1181 l = __mp_roundup(n->size + s, h->alloc.heap.memory.page);
1182 if ((p = __mp_memcheck(b, h->alloc.obyte, s)) ||
1183 (p = __mp_memcheck((char *) n->block + n->size, h->alloc.obyte,
1184 l - n->size - s)))
1185 {
1186 __mp_log(h, v);
1187 __mp_printsummary(h);
1188 __mp_diag("\n");
1189 if (m->data.flags & FLG_FREED)
1190 __mp_error(ET_FRDOVF, AT_MAX, v->file, v->line, NULL,
1191 n->block, p);
1192 else
1193 __mp_error(ET_ALLOVF, AT_MAX, v->file, v->line, NULL,
1194 n->block, p);
1195 if (p < n->block)
1196 __mp_printmemory(b, s);
1197 else
1198 __mp_printmemory((char *) n->block + n->size,
1199 l - n->size - s);
1200 __mp_diag("\n");
1201 __mp_printalloc(&h->syms, n);
1202 h->fini = 1;
1203 __mp_abort();
1204 }
1205 }
1206 if (!(h->alloc.flags & FLG_PAGEALLOC) && ((l = h->alloc.oflow) > 0))
1207 /* Check that all allocated and freed blocks have overflow buffers
1208 * filled with the overflow byte, but only if all allocations are
1209 * not pages and the overflow buffer size is greater than zero.
1210 */
1211 if ((p = __mp_memcheck((char *) n->block - l, h->alloc.obyte, l)) ||
1212 (p = __mp_memcheck((char *) n->block + n->size, h->alloc.obyte,
1213 l)))
1214 {
1215 __mp_log(h, v);
1216 __mp_printsummary(h);
1217 __mp_diag("\n");
1218 if (m->data.flags & FLG_FREED)
1219 __mp_error(ET_FRDOVF, AT_MAX, v->file, v->line, NULL,
1220 n->block, p);
1221 else
1222 __mp_error(ET_ALLOVF, AT_MAX, v->file, v->line, NULL,
1223 n->block, p);
1224 if (p < n->block)
1225 __mp_printmemory((char *) n->block - l, l);
1226 else
1227 __mp_printmemory((char *) n->block + n->size, l);
1228 __mp_diag("\n");
1229 __mp_printalloc(&h->syms, n);
1230 h->fini = 1;
1231 __mp_abort();
1232 }
1233 }
1234 }
1235
1236
1237 /* Check that a memory operation does not overflow the boundaries of a
1238 * memory block.
1239 */
1240
1241 MP_GLOBAL
1242 int
__mp_checkrange(infohead * h,void * p,size_t s,loginfo * v)1243 __mp_checkrange(infohead *h, void *p, size_t s, loginfo *v)
1244 {
1245 allocnode *n;
1246 infonode *m;
1247 void *b;
1248 size_t l;
1249 int e;
1250
1251 if (p == NULL)
1252 {
1253 if ((s > 0) || (h->flags & FLG_CHECKMEMORY))
1254 {
1255 __mp_log(h, v);
1256 __mp_error(ET_NULOPN, v->type, v->file, v->line, NULL);
1257 }
1258 return 0;
1259 }
1260 e = 1;
1261 if (s == 0)
1262 s = 1;
1263 if (n = __mp_findnode(&h->alloc, p, s))
1264 if ((m = (infonode *) n->info) == NULL)
1265 {
1266 __mp_log(h, v);
1267 __mp_error(ET_FREOPN, v->type, v->file, v->line, NULL);
1268 e = 0;
1269 }
1270 else if (m->data.flags & FLG_FREED)
1271 {
1272 __mp_log(h, v);
1273 __mp_error(ET_FRDOPN, v->type, v->file, v->line, NULL);
1274 __mp_printalloc(&h->syms, n);
1275 __mp_diag("\n");
1276 e = 0;
1277 }
1278 else if ((p < n->block) ||
1279 ((char *) p + s > (char *) n->block + n->size))
1280 {
1281 if (h->alloc.flags & FLG_PAGEALLOC)
1282 {
1283 b = (void *) __mp_rounddown((unsigned long) n->block,
1284 h->alloc.heap.memory.page);
1285 l = __mp_roundup(n->size + ((char *) n->block - (char *) b),
1286 h->alloc.heap.memory.page);
1287 }
1288 else
1289 {
1290 b = n->block;
1291 l = n->size;
1292 }
1293 b = (char *) b - h->alloc.oflow;
1294 l += h->alloc.oflow << 1;
1295 __mp_log(h, v);
1296 if (h->flags & FLG_ALLOWOFLOW)
1297 __mp_warn(ET_RNGOVF, v->type, v->file, v->line, NULL, p,
1298 (char *) p + s - 1, b, (char *) b + l - 1);
1299 else
1300 __mp_error(ET_RNGOVF, v->type, v->file, v->line, NULL, p,
1301 (char *) p + s - 1, b, (char *) b + l - 1);
1302 __mp_printalloc(&h->syms, n);
1303 __mp_diag("\n");
1304 e = ((h->flags & FLG_ALLOWOFLOW) != 0);
1305 }
1306 return e;
1307 }
1308
1309
1310 /* Check that a string does not overflow the boundaries of a memory block and
1311 * then return the length of the string.
1312 */
1313
1314 MP_GLOBAL
1315 int
__mp_checkstring(infohead * h,char * p,size_t * s,loginfo * v,int g)1316 __mp_checkstring(infohead *h, char *p, size_t *s, loginfo *v, int g)
1317 {
1318 allocnode *n;
1319 infonode *m;
1320 treenode *t;
1321 void *b;
1322 char *c, *u;
1323 size_t l;
1324 int e;
1325
1326 if (g == 1)
1327 u = p + *s;
1328 else
1329 u = NULL;
1330 *s = 0;
1331 if (p == NULL)
1332 {
1333 if ((g == 0) || (u > p) || (h->flags & FLG_CHECKMEMORY))
1334 {
1335 __mp_log(h, v);
1336 __mp_error(ET_NULOPN, v->type, v->file, v->line, NULL);
1337 }
1338 return 0;
1339 }
1340 e = 0;
1341 if ((n = __mp_findnode(&h->alloc, p, 1)) == NULL)
1342 {
1343 if ((t = __mp_searchhigher(h->alloc.atree.root, (unsigned long) p)) ||
1344 (t = __mp_searchhigher(h->alloc.gtree.root, (unsigned long) p)))
1345 {
1346 n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
1347 if (h->alloc.flags & FLG_PAGEALLOC)
1348 b = (void *) __mp_rounddown((unsigned long) n->block,
1349 h->alloc.heap.memory.page);
1350 else
1351 b = n->block;
1352 b = (char *) b - h->alloc.oflow;
1353 if (g == 1)
1354 {
1355 for (c = p; (c < u) && (c < (char *) b) && (*c != '\0'); c++);
1356 if (u > (char *) b)
1357 if (c == b)
1358 e = 1;
1359 else if (!(h->flags & FLG_ALLOWOFLOW))
1360 e = 2;
1361 }
1362 else
1363 {
1364 for (c = p; (c < (char *) b) && (*c != '\0'); c++);
1365 if (c == b)
1366 e = 1;
1367 }
1368 }
1369 else if (g == 1)
1370 for (c = p; (c < u) && (*c != '\0'); c++);
1371 else
1372 for (c = p; *c != '\0'; c++);
1373 *s = (size_t) (c - p);
1374 }
1375 else if ((m = (infonode *) n->info) == NULL)
1376 {
1377 __mp_log(h, v);
1378 __mp_error(ET_FREOPN, v->type, v->file, v->line, NULL);
1379 return 0;
1380 }
1381 else if (m->data.flags & FLG_FREED)
1382 {
1383 __mp_log(h, v);
1384 __mp_error(ET_FRDOPN, v->type, v->file, v->line, NULL);
1385 __mp_printalloc(&h->syms, n);
1386 __mp_diag("\n");
1387 return 0;
1388 }
1389 else if ((p >= (char *) n->block) && (p < (char *) n->block + n->size))
1390 {
1391 b = (char *) n->block + n->size;
1392 if (g == 1)
1393 {
1394 for (c = p; (c < u) && (c < (char *) b) && (*c != '\0'); c++);
1395 if (u > (char *) b)
1396 if (c == b)
1397 e = 1;
1398 else if (!(h->flags & FLG_ALLOWOFLOW))
1399 e = 2;
1400 }
1401 else
1402 {
1403 for (c = p; (c < (char *) b) && (*c != '\0'); c++);
1404 if (c == b)
1405 e = 1;
1406 }
1407 *s = (size_t) (c - p);
1408 }
1409 else
1410 e = 1;
1411 if (e != 0)
1412 {
1413 if (h->alloc.flags & FLG_PAGEALLOC)
1414 {
1415 b = (void *) __mp_rounddown((unsigned long) n->block,
1416 h->alloc.heap.memory.page);
1417 l = __mp_roundup(n->size + ((char *) n->block - (char *) b),
1418 h->alloc.heap.memory.page);
1419 }
1420 else
1421 {
1422 b = n->block;
1423 l = n->size;
1424 }
1425 b = (char *) b - h->alloc.oflow;
1426 l += h->alloc.oflow << 1;
1427 __mp_log(h, v);
1428 if (e == 1)
1429 __mp_error(ET_STROVF, v->type, v->file, v->line, NULL, p, b,
1430 (char *) b + l - 1);
1431 else
1432 __mp_warn(ET_RNGOVF, v->type, v->file, v->line, NULL, p, u - 1, b,
1433 (char *) b + l - 1);
1434 __mp_printalloc(&h->syms, n);
1435 __mp_diag("\n");
1436 return (e == 2);
1437 }
1438 return 1;
1439 }
1440
1441
1442 /* Fix the alignment required by a specified allocation function.
1443 */
1444
1445 MP_GLOBAL
1446 size_t
__mp_fixalign(infohead * h,alloctype f,size_t a)1447 __mp_fixalign(infohead *h, alloctype f, size_t a)
1448 {
1449 size_t r;
1450
1451 if ((f == AT_VALLOC) || (f == AT_PVALLOC))
1452 r = h->alloc.heap.memory.page;
1453 else
1454 {
1455 r = a;
1456 if (f == AT_MEMALIGN)
1457 {
1458 if (r > h->alloc.heap.memory.page)
1459 r = h->alloc.heap.memory.page;
1460 else if (!__mp_ispoweroftwo(r))
1461 r = __mp_poweroftwo(r);
1462 }
1463 if (r == 0)
1464 r = h->alloc.heap.memory.align;
1465 }
1466 return r;
1467 }
1468
1469
1470 #ifdef __cplusplus
1471 }
1472 #endif /* __cplusplus */
1473