1 /*
2  * mpatrol
3  * A library for controlling and tracing dynamic memory allocations.
4  * Copyright (C) 1997-2002 Graeme S. Roy <graeme.roy@analog.com>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Library General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Library General Public License for more details.
15  *
16  * You should have received a copy of the GNU Library General Public
17  * License along with this library; if not, write to the Free
18  * Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
19  * MA 02111-1307, USA.
20  */
21 
22 
23 /*
24  * Memory allocations.  The associated information that will be stored
25  * with each memory allocation is dealt with by the allocation information
26  * module, and a void pointer is passed in from there so as to hide the
27  * details from this module.  Only the efficient implementation of memory
28  * allocation, deallocation and reuse is performed here.
29  */
30 
31 
32 #include "alloc.h"
33 #include "utils.h"
34 
35 
36 #if MP_IDENT_SUPPORT
37 #ident "$Id: alloc.c,v 1.18 2002/01/08 20:13:59 graeme Exp $"
38 #else /* MP_IDENT_SUPPORT */
39 static MP_CONST MP_VOLATILE char *alloc_id = "$Id: alloc.c,v 1.18 2002/01/08 20:13:59 graeme Exp $";
40 #endif /* MP_IDENT_SUPPORT */
41 
42 
43 #ifdef __cplusplus
44 extern "C"
45 {
46 #endif /* __cplusplus */
47 
48 
49 /* Initialise the fields of an allochead so that there are no allocated,
50  * freed or free blocks.
51  */
52 
53 MP_GLOBAL
54 void
__mp_newallocs(allochead * h,size_t m,size_t s,unsigned char o,unsigned char a,unsigned char f,unsigned long u)55 __mp_newallocs(allochead *h, size_t m, size_t s, unsigned char o,
56                unsigned char a, unsigned char f, unsigned long u)
57 {
58     struct { char x; allocnode y; } z;
59     long n;
60 
61     __mp_newheap(&h->heap);
62     /* Determine the minimum alignment for an allocation node on this
63      * system and force the alignment to be a power of two.  This
64      * information is used when initialising the slot table.
65      */
66     n = (char *) &z.y - &z.x;
67     __mp_newslots(&h->table, sizeof(allocnode), __mp_poweroftwo(n));
68     __mp_newlist(&h->list);
69     __mp_newlist(&h->flist);
70     __mp_newtree(&h->itree);
71     __mp_newtree(&h->atree);
72     __mp_newtree(&h->gtree);
73     __mp_newtree(&h->ftree);
74     h->isize = h->asize = h->gsize = h->fsize = 0;
75     h->fmax = m;
76     h->oflow = __mp_poweroftwo(s);
77     h->obyte = o;
78     h->abyte = a;
79     h->fbyte = f;
80     h->flags = u;
81     if (h->flags & FLG_PAGEALLOC)
82     {
83         if (h->oflow == 0)
84             h->oflow = 1;
85         h->oflow = __mp_roundup(h->oflow, h->heap.memory.page);
86     }
87     h->prot = MA_NOACCESS;
88     h->protrecur = 0;
89 }
90 
91 
92 /* Forget all allocated, freed or free blocks.
93  */
94 
95 MP_GLOBAL
96 void
__mp_deleteallocs(allochead * h)97 __mp_deleteallocs(allochead *h)
98 {
99     /* We don't need to explicitly free any memory as this is dealt with
100      * at a lower level by the heap manager.
101      */
102     __mp_deleteheap(&h->heap);
103     h->table.free = NULL;
104     h->table.size = 0;
105     __mp_newlist(&h->list);
106     __mp_newlist(&h->flist);
107     __mp_newtree(&h->itree);
108     __mp_newtree(&h->atree);
109     __mp_newtree(&h->gtree);
110     __mp_newtree(&h->ftree);
111     h->isize = h->asize = h->gsize = h->fsize = 0;
112     h->prot = MA_NOACCESS;
113     h->protrecur = 0;
114 }
115 
116 
117 /* Allocate a new allocation node.
118  */
119 
120 static
121 allocnode *
getnode(allochead * h)122 getnode(allochead *h)
123 {
124     allocnode *n;
125     heapnode *p;
126 
127     /* If we have no more allocation node slots left then we must allocate
128      * some more memory for them.  An extra MP_ALLOCFACTOR pages of memory
129      * should suffice.
130      */
131     if ((n = (allocnode *) __mp_getslot(&h->table)) == NULL)
132     {
133         if ((p = __mp_heapalloc(&h->heap, h->heap.memory.page * MP_ALLOCFACTOR,
134               h->table.entalign, 1)) == NULL)
135             return NULL;
136         __mp_initslots(&h->table, p->block, p->size);
137         n = (allocnode *) __mp_getslot(&h->table);
138         n->lnode.next = n->lnode.prev = NULL;
139         __mp_treeinsert(&h->itree, &n->tnode, (unsigned long) p->block);
140         n->block = p->block;
141         n->size = p->size;
142         n->info = NULL;
143         h->isize += p->size;
144         n = (allocnode *) __mp_getslot(&h->table);
145     }
146     return n;
147 }
148 
149 
150 /* Split a free node into an allocated node of a certain size and alignment
151  * and up to two new free nodes.
152  */
153 
154 static
155 allocnode *
splitnode(allochead * h,allocnode * n,size_t l,size_t a,void * i)156 splitnode(allochead *h, allocnode *n, size_t l, size_t a, void *i)
157 {
158     allocnode *p, *q;
159     size_t m, s;
160 
161     /* We choose the worst case scenario here and allocate new nodes for
162      * both the left and right nodes.  This is so that we can easily recover
163      * from lack of system memory at this point rather than rebuild the
164      * original free node if we discover that we are out of memory later.
165      */
166     if (((p = getnode(h)) == NULL) || ((q = getnode(h)) == NULL))
167     {
168         if (p != NULL)
169             __mp_freeslot(&h->table, p);
170         return NULL;
171     }
172     /* Remove the free node from the free tree.
173      */
174     __mp_treeremove(&h->ftree, &n->tnode);
175     h->fsize -= n->size;
176     n->block = (char *) n->block + h->oflow;
177     n->size -= h->oflow << 1;
178     /* Check to see if we have space left over to create a free node to the
179      * left of the new node.  This is never done if all allocations are pages.
180      */
181     if (!(h->flags & FLG_PAGEALLOC) &&
182         ((m = __mp_roundup((unsigned long) n->block, a) -
183           (unsigned long) n->block) > 0))
184     {
185         __mp_prepend(&h->list, &n->lnode, &p->lnode);
186         __mp_treeinsert(&h->ftree, &p->tnode, m);
187         p->block = (char *) n->block - h->oflow;
188         p->size = m;
189         p->info = NULL;
190         n->block = (char *) n->block + m;
191         n->size -= m;
192         h->fsize += m;
193     }
194     else
195         __mp_freeslot(&h->table, p);
196     /* If we are allocating pages then the effective block size is the
197      * original size rounded up to a multiple of the system page size.
198      */
199     if (h->flags & FLG_PAGEALLOC)
200         s = __mp_roundup(l, h->heap.memory.page);
201     else
202         s = l;
203     /* Check to see if we have space left over to create a free node to the
204      * right of the new node.  This free node will always have a size which is
205      * a multiple of the system page size if all allocations are pages.
206      */
207     if ((m = n->size - s) > 0)
208     {
209         __mp_insert(&h->list, &n->lnode, &q->lnode);
210         __mp_treeinsert(&h->ftree, &q->tnode, m);
211         q->block = (char *) n->block + s + h->oflow;
212         q->size = m;
213         q->info = NULL;
214         n->size = s;
215         h->fsize += m;
216     }
217     else
218         __mp_freeslot(&h->table, q);
219     /* Initialise the details of the newly allocated node and insert it in
220      * the allocation tree.
221      */
222     n->info = i;
223     if (h->flags & FLG_PAGEALLOC)
224     {
225         __mp_memprotect(&h->heap.memory, n->block, n->size, MA_READWRITE);
226         /* If we are aligning the end of allocations to the upper end of pages
227          * then we may have to shift the start of the block up by a certain
228          * number of bytes.  This will then also lead to us having to prefill
229          * the unused space with the overflow byte or place a watch point area
230          * there.
231          */
232         if ((h->flags & FLG_ALLOCUPPER) &&
233             ((m = __mp_rounddown(n->size - l, a)) > 0))
234         {
235             if (h->flags & FLG_OFLOWWATCH)
236                 __mp_memwatch(&h->heap.memory, n->block, m, MA_NOACCESS);
237             else
238                 __mp_memset(n->block, h->obyte, m);
239             n->block = (char *) n->block + m;
240             n->size -= m;
241         }
242         /* We may need to prefill any unused space at the end of the block with
243          * the overflow byte, or place a watch point area there.
244          */
245         if ((m = n->size - l) > 0)
246             if (h->flags & FLG_OFLOWWATCH)
247                 __mp_memwatch(&h->heap.memory, (char *) n->block + l, m,
248                               MA_NOACCESS);
249             else
250                 __mp_memset((char *) n->block + l, h->obyte, m);
251         n->size = l;
252     }
253     else if (h->flags & FLG_OFLOWWATCH)
254     {
255         __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
256                       MA_NOACCESS);
257         __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
258                       MA_NOACCESS);
259     }
260     else
261     {
262         __mp_memset((char *) n->block - h->oflow, h->obyte, h->oflow);
263         __mp_memset((char *) n->block + n->size, h->obyte, h->oflow);
264     }
265     __mp_treeinsert(&h->atree, &n->tnode, (unsigned long) n->block);
266     h->asize += n->size;
267     return n;
268 }
269 
270 
271 /* Attempt to merge a free node with any bordering free nodes.
272  */
273 
274 static
275 allocnode *
mergenode(allochead * h,allocnode * n)276 mergenode(allochead *h, allocnode *n)
277 {
278     allocnode *l, *r;
279 
280     /* See if the left node is free and borders on this node.
281      */
282     l = (allocnode *) n->lnode.prev;
283     if ((l->lnode.prev == NULL) || (l->info != NULL) ||
284         ((char *) l->block + l->size < (char *) n->block))
285         l = NULL;
286     /* See if the right node is free and borders on this node.
287      */
288     r = (allocnode *) n->lnode.next;
289     if ((r->lnode.next == NULL) || (r->info != NULL) ||
290         ((char *) n->block + n->size < (char *) r->block))
291         r = NULL;
292     /* If either or both of the left or right node is suitable for
293      * merging then perform the merge.
294      */
295     if ((l != NULL) || (r != NULL))
296     {
297         __mp_treeremove(&h->ftree, &n->tnode);
298         if (l != NULL)
299         {
300             __mp_remove(&h->list, &l->lnode);
301             __mp_treeremove(&h->ftree, &l->tnode);
302             n->block = l->block;
303             n->size += l->size;
304             __mp_freeslot(&h->table, l);
305         }
306         if (r != NULL)
307         {
308             __mp_remove(&h->list, &r->lnode);
309             __mp_treeremove(&h->ftree, &r->tnode);
310             n->size += r->size;
311             __mp_freeslot(&h->table, r);
312         }
313         __mp_treeinsert(&h->ftree, &n->tnode, n->size);
314     }
315     return n;
316 }
317 
318 
319 /* Create a new allocation node of a specified size and alignment.
320  */
321 
322 MP_GLOBAL
323 allocnode *
__mp_getalloc(allochead * h,size_t l,size_t a,void * i)324 __mp_getalloc(allochead *h, size_t l, size_t a, void *i)
325 {
326     allocnode *n, *r, *s;
327     heapnode *p;
328     treenode *t;
329     size_t b, m;
330 
331     b = h->oflow << 1;
332     if (l == 0)
333         l = 1;
334     if (a == 0)
335         a = h->heap.memory.align;
336     else if (a > h->heap.memory.page)
337         a = h->heap.memory.page;
338     else
339         a = __mp_poweroftwo(a);
340     /* If all allocations are not pages then we must add more bytes to the
341      * allocation request to account for alignment.
342      */
343     if (h->flags & FLG_PAGEALLOC)
344         m = 0;
345     else
346         m = a - 1;
347     /* If we have no suitable space for this allocation then we must allocate
348      * memory via the heap manager.
349      */
350     if ((t = __mp_searchhigher(h->ftree.root, l + b + m)) == NULL)
351     {
352         if ((n = getnode(h)) == NULL)
353             return NULL;
354         /* If all allocations are pages then we must specify that we want our
355          * heap allocation to be page-aligned.
356          */
357         if (h->flags & FLG_PAGEALLOC)
358             m = h->heap.memory.page;
359         else
360             m = a;
361         if ((p = __mp_heapalloc(&h->heap,
362               __mp_roundup(l + b, h->heap.memory.page), m, 0)) == NULL)
363         {
364             __mp_freeslot(&h->table, n);
365             return NULL;
366         }
367         /* Initialise the free memory.  If all allocations are pages then we
368          * prevent any free memory from being both read from and written to.
369          */
370         if (h->flags & FLG_PAGEALLOC)
371             __mp_memprotect(&h->heap.memory, p->block, p->size, MA_NOACCESS);
372         else
373             __mp_memset(p->block, h->fbyte, p->size);
374         /* Insert the new memory block into the correct position in the
375          * memory block list.  This is vital for merging free nodes.
376          */
377         if ((t = __mp_searchlower(h->atree.root, (unsigned long) p->block)) ||
378             (t = __mp_searchlower(h->gtree.root, (unsigned long) p->block)))
379             r = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
380         else
381             r = (allocnode *) &h->list;
382         while (((s = (allocnode *) r->lnode.next)->lnode.next != NULL) &&
383                (s->block < p->block))
384             r = s;
385         __mp_insert(&h->list, &r->lnode, &n->lnode);
386         __mp_treeinsert(&h->ftree, &n->tnode, p->size);
387         n->block = p->block;
388         n->size = p->size;
389         n->info = NULL;
390         h->fsize += p->size;
391         /* Merge the memory block with any bordering free nodes.  This
392          * is also vital to maintain the property that the memory block
393          * list does not ever contain two bordering free nodes.
394          */
395         n = mergenode(h, n);
396     }
397     else
398         n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
399     /* Split the free node as requested.
400      */
401     return splitnode(h, n, l, a, i);
402 }
403 
404 
405 /* Attempt to resize an existing allocation node.
406  */
407 
408 MP_GLOBAL
409 int
__mp_resizealloc(allochead * h,allocnode * n,size_t l)410 __mp_resizealloc(allochead *h, allocnode *n, size_t l)
411 {
412     allocnode *p;
413     size_t m, s;
414     long d;
415 
416     /* If all allocations are pages and the allocations are to be aligned
417      * to the end of a page then the easiest solution is to fail here since
418      * the majority of cases would require relocation of the original memory
419      * allocation.
420      */
421     if ((h->flags & FLG_PAGEALLOC) && (h->flags & FLG_ALLOCUPPER))
422         return 0;
423     if (l == 0)
424         l = 1;
425     d = l - n->size;
426     /* If we are allocating pages then the effective block size is the
427      * original size rounded up to a multiple of the system page size.
428      */
429     if (h->flags & FLG_PAGEALLOC)
430         m = __mp_roundup(n->size, h->heap.memory.page);
431     else
432         m = n->size;
433     /* Obtain the bordering free node to the right of this node, if one
434      * exists.  There is no need to look any further right as it is
435      * guaranteed that it will not be another bordering free node.
436      */
437     p = (allocnode *) n->lnode.next;
438     if ((p->lnode.next == NULL) || (p->info != NULL) ||
439         ((char *) n->block + m + h->oflow < (char *) p->block))
440         p = NULL;
441     if ((h->flags & FLG_PAGEALLOC) && (l <= m) && (l > m - h->heap.memory.page))
442     {
443         /* There is space in the existing allocated pages to perform the
444          * resize without requiring the modification or creation of a
445          * neighbouring free node so we remove the watch point area if it
446          * exists.
447          */
448         if (h->flags & FLG_OFLOWWATCH)
449             __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
450                           m - n->size, MA_READWRITE);
451     }
452     else if (d > 0)
453     {
454         /* If the request was to increase the size of the node and we have no
455          * suitable node to merge with or the total size of both nodes is still
456          * too small then we just fail.  The relocation to a larger memory
457          * allocation is done by the calling function.
458          */
459         if ((p == NULL) || (m + p->size < l))
460             return 0;
461         __mp_treeremove(&h->ftree, &p->tnode);
462         if (h->flags & FLG_PAGEALLOC)
463         {
464             s = __mp_roundup(l, h->heap.memory.page) - m;
465             /* Remove any memory protection and the watch point area if it
466              * exists.
467              */
468             __mp_memprotect(&h->heap.memory, (char *) p->block - h->oflow, s,
469                             MA_READWRITE);
470             if (h->flags & FLG_OFLOWWATCH)
471                 __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
472                               m - n->size, MA_READWRITE);
473         }
474         else
475         {
476             s = d;
477             /* Remove the right-most watch point area if it exists.
478              */
479             if (h->flags & FLG_OFLOWWATCH)
480                 __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,
481                               MA_READWRITE);
482         }
483         p->block = (char *) p->block + s;
484         p->size -= s;
485         /* If the resulting size of the free block we merged with is zero then
486          * we can just delete it, otherwise we must insert it back into the
487          * free tree.
488          */
489         if (p->size == 0)
490         {
491             __mp_remove(&h->list, &p->lnode);
492             __mp_freeslot(&h->table, p);
493         }
494         else
495             __mp_treeinsert(&h->ftree, &p->tnode, p->size);
496         h->fsize -= s;
497     }
498     else if (d < 0)
499     {
500         /* If the request was to decrease the size of the node then we
501          * must either increase the size of the bordering node, or create
502          * a new free node.
503          */
504         if (p == NULL)
505         {
506             if ((p = getnode(h)) == NULL)
507                 return 0;
508             __mp_insert(&h->list, &n->lnode, &p->lnode);
509             p->block = (char *) n->block + m + h->oflow;
510             p->size = 0;
511             p->info = NULL;
512         }
513         else
514             __mp_treeremove(&h->ftree, &p->tnode);
515         if (h->flags & FLG_PAGEALLOC)
516         {
517             s = m - __mp_roundup(l, h->heap.memory.page);
518             /* Remove the watch point area if it exists.
519              */
520             if (h->flags & FLG_OFLOWWATCH)
521                 __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
522                               m - n->size, MA_READWRITE);
523         }
524         else
525         {
526             s = -d;
527             /* Remove the right-most watch point area if it exists.
528              */
529             if (h->flags & FLG_OFLOWWATCH)
530                 __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,
531                               MA_READWRITE);
532         }
533         p->block = (char *) p->block - s;
534         p->size += s;
535         if (h->flags & FLG_PAGEALLOC)
536             __mp_memprotect(&h->heap.memory, p->block, s, MA_NOACCESS);
537         else
538             __mp_memset(p->block, h->fbyte, s);
539         __mp_treeinsert(&h->ftree, &p->tnode, p->size);
540         h->fsize += s;
541     }
542     if (h->flags & FLG_PAGEALLOC)
543     {
544         s = __mp_roundup(l, h->heap.memory.page) - l;
545         if (h->flags & FLG_OFLOWWATCH)
546             __mp_memwatch(&h->heap.memory, (char *) n->block + l, s,
547                           MA_NOACCESS);
548         else
549             __mp_memset((char *) n->block + l, h->obyte, s);
550     }
551     else if (h->flags & FLG_OFLOWWATCH)
552         __mp_memwatch(&h->heap.memory, (char *) n->block + l, h->oflow,
553                       MA_NOACCESS);
554     else
555         __mp_memset((char *) n->block + l, h->obyte, h->oflow);
556     n->size = l;
557     h->asize += d;
558     return 1;
559 }
560 
561 
562 /* Free an existing allocation node.
563  */
564 
565 MP_GLOBAL
566 void
__mp_freealloc(allochead * h,allocnode * n,void * i)567 __mp_freealloc(allochead *h, allocnode *n, void *i)
568 {
569     void *p;
570     size_t l, s;
571 
572     /* If we are keeping the details (and possibly the contents) of a specified
573      * number of recently freed memory allocations then we may have to recycle
574      * the oldest freed allocation if the length of the queue would extend past
575      * the user-specified limit.
576      */
577     if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax))
578         __mp_recyclefreed(h);
579     /* Remove the allocated node from the allocation tree.
580      */
581     __mp_treeremove(&h->atree, &n->tnode);
582     h->asize -= n->size;
583     if (h->flags & FLG_PAGEALLOC)
584     {
585         p = (void *) __mp_rounddown((unsigned long) n->block,
586                                     h->heap.memory.page);
587         s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
588                          h->heap.memory.page);
589         if (h->flags & FLG_OFLOWWATCH)
590         {
591             /* Remove any watch points within the allocated pages.
592              */
593             if ((l = (char *) n->block - (char *) p) > 0)
594                 __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
595             if ((l = s - n->size - l) > 0)
596                 __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
597                               MA_READWRITE);
598         }
599     }
600     if (i != NULL)
601     {
602         /* We are keeping this node and so place it on the freed tree.
603          * If all allocations are pages then we either prevent the original
604          * contents from being both read or written to, or prevent the
605          * allocation from being written to.  If not then we may optionally
606          * preserve its contents, otherwise it will be filled with the free
607          * byte.
608          */
609         n->info = i;
610         if (h->flags & FLG_PAGEALLOC)
611             if (h->flags & FLG_PRESERVE)
612             {
613                 __mp_memprotect(&h->heap.memory, n->block, n->size,
614                                 MA_READONLY);
615                 if (h->flags & FLG_OFLOWWATCH)
616                 {
617                     /* Replace any watch points within the allocated pages.
618                      * We have to do this here because when we change the
619                      * memory protection we may trigger a watch point on some
620                      * systems.
621                      */
622                     if ((l = (char *) n->block - (char *) p) > 0)
623                         __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS);
624                     if ((l = s - n->size - l) > 0)
625                         __mp_memwatch(&h->heap.memory, (char *) n->block +
626                                       n->size, l, MA_NOACCESS);
627                 }
628             }
629             else
630                 __mp_memprotect(&h->heap.memory, n->block, n->size,
631                                 MA_NOACCESS);
632         else if (!(h->flags & FLG_PRESERVE))
633             __mp_memset(n->block, h->fbyte, n->size);
634         __mp_addtail(&h->flist, &n->fnode);
635         __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block);
636         h->gsize += n->size;
637     }
638     else
639     {
640         /* We are placing this node on the free tree and so it will become
641          * available for reuse.  If all allocations are pages then we prevent
642          * the contents from being read or written to, otherwise the contents
643          * will be filled with the free byte.
644          */
645         if (h->flags & FLG_PAGEALLOC)
646         {
647             /* Any watch points will have already been removed, and the
648              * surrounding overflow buffers will already be protected with
649              * the MA_NOACCESS flag.
650              */
651             __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
652             n->block = p;
653             n->size = s;
654         }
655         else if (h->flags & FLG_OFLOWWATCH)
656         {
657             /* Remove any watch points that were made to monitor the overflow
658              * buffers.
659              */
660             __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow,
661                           h->oflow, MA_READWRITE);
662             __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,
663                           h->oflow, MA_READWRITE);
664         }
665         n->block = (char *) n->block - h->oflow;
666         n->size += h->oflow << 1;
667         n->info = NULL;
668         if (!(h->flags & FLG_PAGEALLOC))
669             __mp_memset(n->block, h->fbyte, n->size);
670         __mp_treeinsert(&h->ftree, &n->tnode, n->size);
671         h->fsize += n->size;
672         mergenode(h, n);
673     }
674 }
675 
676 
677 /* Recycle a freed allocation node.
678  */
679 
680 MP_GLOBAL
681 void
__mp_recyclefreed(allochead * h)682 __mp_recyclefreed(allochead *h)
683 {
684     allocnode *n;
685     void *p;
686     size_t l, s;
687 
688     n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode));
689     /* Remove the freed node from the freed list and the freed tree.
690      */
691     __mp_remove(&h->flist, &n->fnode);
692     __mp_treeremove(&h->gtree, &n->tnode);
693     h->gsize -= n->size;
694     if (h->flags & FLG_PAGEALLOC)
695     {
696         p = (void *) __mp_rounddown((unsigned long) n->block,
697                                     h->heap.memory.page);
698         s = __mp_roundup(n->size + ((char *) n->block - (char *) p),
699                          h->heap.memory.page);
700         if (h->flags & FLG_OFLOWWATCH)
701         {
702             /* Remove any watch points within the allocated pages.
703              */
704             if ((l = (char *) n->block - (char *) p) > 0)
705                 __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);
706             if ((l = s - n->size - l) > 0)
707                 __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,
708                               MA_READWRITE);
709         }
710     }
711     /* We are placing this node on the free tree and so it will become
712      * available for reuse.  If all allocations are pages then we prevent
713      * the contents from being read or written to, otherwise the contents
714      * will be filled with the free byte.
715      */
716     if (h->flags & FLG_PAGEALLOC)
717     {
718         /* Any watch points will have already been removed, and the
719          * surrounding overflow buffers will already be protected with
720          * the MA_NOACCESS flag.
721          */
722         __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);
723         n->block = p;
724         n->size = s;
725     }
726     else if (h->flags & FLG_OFLOWWATCH)
727     {
728         /* Remove any watch points that were made to monitor the overflow
729          * buffers.
730          */
731         __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,
732                       MA_READWRITE);
733         __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,
734                       MA_READWRITE);
735     }
736     n->block = (char *) n->block - h->oflow;
737     n->size += h->oflow << 1;
738     n->info = NULL;
739     if (!(h->flags & FLG_PAGEALLOC))
740         __mp_memset(n->block, h->fbyte, n->size);
741     __mp_treeinsert(&h->ftree, &n->tnode, n->size);
742     h->fsize += n->size;
743     mergenode(h, n);
744 }
745 
746 
747 /* Protect the internal memory blocks used by the allocation manager with the
748  * supplied access permission.
749  */
750 
751 MP_GLOBAL
752 int
__mp_protectalloc(allochead * h,memaccess a)753 __mp_protectalloc(allochead *h, memaccess a)
754 {
755     allocnode *n;
756     treenode *t;
757 
758     if (!__mp_heapprotect(&h->heap, a))
759         return 0;
760     /* The library already knows what its protection status is so we don't
761      * need to do anything if the request has already been done.
762      */
763     if (h->prot == a)
764     {
765         h->protrecur++;
766         return 1;
767     }
768     else if (h->protrecur > 0)
769     {
770         h->protrecur--;
771         return 1;
772     }
773     h->prot = a;
774     for (t = __mp_minimum(h->itree.root); t != NULL; t = __mp_successor(t))
775     {
776         n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
777         if (!__mp_memprotect(&h->heap.memory, n->block, n->size, a))
778             return 0;
779     }
780     return 1;
781 }
782 
783 
784 /* Search for an allocated node which contains a given address.
785  */
786 
787 MP_GLOBAL
788 allocnode *
__mp_findalloc(allochead * h,void * p)789 __mp_findalloc(allochead *h, void *p)
790 {
791     allocnode *n;
792     treenode *t;
793 
794     if (t = __mp_searchlower(h->atree.root, (unsigned long) p))
795     {
796         n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
797         if ((char *) n->block + n->size > (char *) p)
798             return n;
799     }
800     return NULL;
801 }
802 
803 
804 /* Search for a freed node which contains a given address.
805  */
806 
807 MP_GLOBAL
808 allocnode *
__mp_findfreed(allochead * h,void * p)809 __mp_findfreed(allochead *h, void *p)
810 {
811     allocnode *n;
812     treenode *t;
813 
814     if (t = __mp_searchlower(h->gtree.root, (unsigned long) p))
815     {
816         n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
817         if ((char *) n->block + n->size > (char *) p)
818             return n;
819     }
820     return NULL;
821 }
822 
823 
824 /* Search for a node which contains a given address, either within
825  * its allocation or as part of an overflow buffer.
826  */
827 
828 MP_GLOBAL
829 allocnode *
__mp_findnode(allochead * h,void * p,size_t s)830 __mp_findnode(allochead *h, void *p, size_t s)
831 {
832     allocnode *n;
833     treenode *t;
834     void *b;
835     size_t l;
836 
837     /* Search for the lowest node that is closest to the given address.
838      */
839     if ((t = __mp_searchlower(h->atree.root, (unsigned long) p)) ||
840         (t = __mp_searchlower(h->gtree.root, (unsigned long) p)))
841         n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));
842     else
843         n = (allocnode *) h->list.head;
844     /* Loop through the list of suitable nodes looking for a likely
845      * candidate.
846      */
847     while (n->lnode.next != NULL)
848     {
849         if ((h->flags & FLG_PAGEALLOC) && (n->info != NULL))
850         {
851             b = (void *) __mp_rounddown((unsigned long) n->block,
852                                         h->heap.memory.page);
853             l = __mp_roundup(n->size + ((char *) n->block - (char *) b),
854                              h->heap.memory.page);
855         }
856         else
857         {
858             b = n->block;
859             l = n->size;
860         }
861         if (n->info != NULL)
862         {
863             b = (char *) b - h->oflow;
864             l += h->oflow << 1;
865         }
866         if (p < b)
867             if ((char *) p + s > (char *) b)
868                 return n;
869             else
870                 break;
871         else if ((char *) b + l > (char *) p)
872             return n;
873         n = (allocnode *) n->lnode.next;
874     }
875     return NULL;
876 }
877 
878 
879 #ifdef __cplusplus
880 }
881 #endif /* __cplusplus */
882