1 /* vmem.h
2  *
3  * (c) 1999 Microsoft Corporation. All rights reserved.
4  * Portions (c) 1999 ActiveState Tool Corp, http://www.ActiveState.com/
5  *
6  *    You may distribute under the terms of either the GNU General Public
7  *    License or the Artistic License, as specified in the README file.
8  *
9  * Options:
10  *
11  * Defining _USE_MSVCRT_MEM_ALLOC will cause all memory allocations
12  * to be forwarded to the compiler's MSVCR*.DLL. Defining _USE_LINKED_LIST as
13  * well will track all allocations in a doubly linked list, so that the host can
14  * free all memory allocated when it goes away.
15  * If _USE_MSVCRT_MEM_ALLOC is not defined then Knuth's boundary tag algorithm
16  * is used; defining _USE_BUDDY_BLOCKS will use Knuth's algorithm R
17  * (Buddy system reservation)
18  *
19  */
20 
21 #ifndef ___VMEM_H_INC___
22 #define ___VMEM_H_INC___
23 
24 #ifndef UNDER_CE
25 #define _USE_MSVCRT_MEM_ALLOC
26 #endif
27 #define _USE_LINKED_LIST
28 
29 // #define _USE_BUDDY_BLOCKS
30 
31 // #define _DEBUG_MEM
32 #ifdef _DEBUG_MEM
33 #define ASSERT(f) if(!(f)) DebugBreak();
34 
MEMODS(char * str)35 inline void MEMODS(char *str)
36 {
37     OutputDebugString(str);
38     OutputDebugString("\n");
39 }
40 
MEMODSlx(char * str,long x)41 inline void MEMODSlx(char *str, long x)
42 {
43     char szBuffer[512];
44     sprintf(szBuffer, "%s %lx\n", str, x);
45     OutputDebugString(szBuffer);
46 }
47 
48 #define WALKHEAP() WalkHeap(0)
49 #define WALKHEAPTRACE() WalkHeap(1)
50 
51 #else
52 
53 #define ASSERT(f)
54 #define MEMODS(x)
55 #define MEMODSlx(x, y)
56 #define WALKHEAP()
57 #define WALKHEAPTRACE()
58 
59 #endif
60 
61 #ifdef _USE_MSVCRT_MEM_ALLOC
62 
63 #ifndef _USE_LINKED_LIST
64 // #define _USE_LINKED_LIST
65 #endif
66 
67 /*
68  * Pass all memory requests through to the compiler's msvcr*.dll.
69  * Optionaly track by using a doubly linked header.
70  */
71 
72 #ifdef _USE_LINKED_LIST
73 class VMem;
74 typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER;
75 typedef struct _MemoryBlockHeader {
76     PMEMORY_BLOCK_HEADER    pNext;
77     PMEMORY_BLOCK_HEADER    pPrev;
78     VMem *owner;
79 } MEMORY_BLOCK_HEADER, *PMEMORY_BLOCK_HEADER;
80 #endif
81 
82 class VMem
83 {
84 public:
85     VMem();
86     ~VMem();
87     void* Malloc(size_t size);
88     void* Realloc(void* pMem, size_t size);
89     void Free(void* pMem);
90     void GetLock(void);
91     void FreeLock(void);
92     int IsLocked(void);
93     long Release(void);
94     long AddRef(void);
95 
CreateOk(void)96     inline BOOL CreateOk(void)
97     {
98 	return TRUE;
99     };
100 
101 protected:
102 #ifdef _USE_LINKED_LIST
LinkBlock(PMEMORY_BLOCK_HEADER ptr)103     void LinkBlock(PMEMORY_BLOCK_HEADER ptr)
104     {
105 	PMEMORY_BLOCK_HEADER next = m_Dummy.pNext;
106 	m_Dummy.pNext = ptr;
107 	ptr->pPrev = &m_Dummy;
108 	ptr->pNext = next;
109         ptr->owner = this;
110 	next->pPrev = ptr;
111     }
UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)112     void UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)
113     {
114 	PMEMORY_BLOCK_HEADER next = ptr->pNext;
115 	PMEMORY_BLOCK_HEADER prev = ptr->pPrev;
116 	prev->pNext = next;
117 	next->pPrev = prev;
118     }
119 
120     MEMORY_BLOCK_HEADER	m_Dummy;
121     CRITICAL_SECTION	m_cs;		// access lock
122 #endif
123 
124     long		m_lRefCount;	// number of current users
125 };
126 
VMem()127 VMem::VMem()
128 {
129     m_lRefCount = 1;
130 #ifdef _USE_LINKED_LIST
131     InitializeCriticalSection(&m_cs);
132     m_Dummy.pNext = m_Dummy.pPrev =  &m_Dummy;
133     m_Dummy.owner = this;
134 #endif
135 }
136 
~VMem(void)137 VMem::~VMem(void)
138 {
139 #ifdef _USE_LINKED_LIST
140     while (m_Dummy.pNext != &m_Dummy) {
141 	Free(m_Dummy.pNext+1);
142     }
143     DeleteCriticalSection(&m_cs);
144 #endif
145 }
146 
Malloc(size_t size)147 void* VMem::Malloc(size_t size)
148 {
149 #ifdef _USE_LINKED_LIST
150     GetLock();
151     PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)malloc(size+sizeof(MEMORY_BLOCK_HEADER));
152     if (!ptr) {
153 	FreeLock();
154 	return NULL;
155     }
156     LinkBlock(ptr);
157     FreeLock();
158     return (ptr+1);
159 #else
160     return malloc(size);
161 #endif
162 }
163 
Realloc(void * pMem,size_t size)164 void* VMem::Realloc(void* pMem, size_t size)
165 {
166 #ifdef _USE_LINKED_LIST
167     if (!pMem)
168 	return Malloc(size);
169 
170     if (!size) {
171 	Free(pMem);
172 	return NULL;
173     }
174 
175     GetLock();
176     PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
177     UnlinkBlock(ptr);
178     ptr = (PMEMORY_BLOCK_HEADER)realloc(ptr, size+sizeof(MEMORY_BLOCK_HEADER));
179     if (!ptr) {
180 	FreeLock();
181 	return NULL;
182     }
183     LinkBlock(ptr);
184     FreeLock();
185 
186     return (ptr+1);
187 #else
188     return realloc(pMem, size);
189 #endif
190 }
191 
Free(void * pMem)192 void VMem::Free(void* pMem)
193 {
194 #ifdef _USE_LINKED_LIST
195     if (pMem) {
196 	PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
197         if (ptr->owner != this) {
198 	    if (ptr->owner) {
199 #if 1
200 	    	int *nowhere = NULL;
201 	    	Perl_warn_nocontext("Free to wrong pool %p not %p",this,ptr->owner);
202             	*nowhere = 0; /* this segfault is deliberate,
203             	                 so you can see the stack trace */
204 #else
205                 ptr->owner->Free(pMem);
206 #endif
207 	    }
208 	    return;
209         }
210 	GetLock();
211 	UnlinkBlock(ptr);
212 	ptr->owner = NULL;
213 	free(ptr);
214 	FreeLock();
215     }
216 #else /*_USE_LINKED_LIST*/
217     free(pMem);
218 #endif
219 }
220 
GetLock(void)221 void VMem::GetLock(void)
222 {
223 #ifdef _USE_LINKED_LIST
224     EnterCriticalSection(&m_cs);
225 #endif
226 }
227 
FreeLock(void)228 void VMem::FreeLock(void)
229 {
230 #ifdef _USE_LINKED_LIST
231     LeaveCriticalSection(&m_cs);
232 #endif
233 }
234 
IsLocked(void)235 int VMem::IsLocked(void)
236 {
237 #if 0
238     /* XXX TryEnterCriticalSection() is not available in some versions
239      * of Windows 95.  Since this code is not used anywhere yet, we
240      * skirt the issue for now. */
241     BOOL bAccessed = TryEnterCriticalSection(&m_cs);
242     if(bAccessed) {
243 	LeaveCriticalSection(&m_cs);
244     }
245     return !bAccessed;
246 #else
247     ASSERT(0);	/* alarm bells for when somebody calls this */
248     return 0;
249 #endif
250 }
251 
Release(void)252 long VMem::Release(void)
253 {
254     long lCount = InterlockedDecrement(&m_lRefCount);
255     if(!lCount)
256 	delete this;
257     return lCount;
258 }
259 
AddRef(void)260 long VMem::AddRef(void)
261 {
262     long lCount = InterlockedIncrement(&m_lRefCount);
263     return lCount;
264 }
265 
266 #else	/* _USE_MSVCRT_MEM_ALLOC */
267 
268 /*
269  * Knuth's boundary tag algorithm Vol #1, Page 440.
270  *
271  * Each block in the heap has tag words before and after it,
272  *  TAG
273  *  block
274  *  TAG
275  * The size is stored in these tags as a long word, and includes the 8 bytes
276  * of overhead that the boundary tags consume.  Blocks are allocated on long
277  * word boundaries, so the size is always multiples of long words.  When the
278  * block is allocated, bit 0, (the tag bit), of the size is set to 1.  When
279  * a block is freed, it is merged with adjacent free blocks, and the tag bit
280  * is set to 0.
281  *
282  * A linked list is used to manage the free list. The first two long words of
283  * the block contain double links.  These links are only valid when the block
284  * is freed, therefore space needs to be reserved for them.  Thus, the minimum
285  * block size (not counting the tags) is 8 bytes.
286  *
287  * Since memory allocation may occur on a single threaded, explicit locks are not
288  * provided.
289  *
290  */
291 
292 const long lAllocStart = 0x00020000; /* start at 128K */
293 const long minBlockSize = sizeof(void*)*2;
294 const long sizeofTag = sizeof(long);
295 const long blockOverhead = sizeofTag*2;
296 const long minAllocSize = minBlockSize+blockOverhead;
297 #ifdef _USE_BUDDY_BLOCKS
298 const long lSmallBlockSize = 1024;
299 const size_t nListEntries = ((lSmallBlockSize-minAllocSize)/sizeof(long));
300 
CalcEntry(size_t size)301 inline size_t CalcEntry(size_t size)
302 {
303     ASSERT((size&(sizeof(long)-1)) == 0);
304     return ((size - minAllocSize) / sizeof(long));
305 }
306 #endif
307 
308 typedef BYTE* PBLOCK;	/* pointer to a memory block */
309 
310 /*
311  * Macros for accessing hidden fields in a memory block:
312  *
313  * SIZE	    size of this block (tag bit 0 is 1 if block is allocated)
314  * PSIZE    size of previous physical block
315  */
316 
317 #define SIZE(block)	(*(ULONG*)(((PBLOCK)(block))-sizeofTag))
318 #define PSIZE(block)	(*(ULONG*)(((PBLOCK)(block))-(blockOverhead)))
SetTags(PBLOCK block,long size)319 inline void SetTags(PBLOCK block, long size)
320 {
321     SIZE(block) = size;
322     PSIZE(block+(size&~1)) = size;
323 }
324 
325 /*
326  * Free list pointers
327  * PREV	pointer to previous block
328  * NEXT	pointer to next block
329  */
330 
331 #define PREV(block)	(*(PBLOCK*)(block))
332 #define NEXT(block)	(*(PBLOCK*)((block)+sizeof(PBLOCK)))
SetLink(PBLOCK block,PBLOCK prev,PBLOCK next)333 inline void SetLink(PBLOCK block, PBLOCK prev, PBLOCK next)
334 {
335     PREV(block) = prev;
336     NEXT(block) = next;
337 }
Unlink(PBLOCK p)338 inline void Unlink(PBLOCK p)
339 {
340     PBLOCK next = NEXT(p);
341     PBLOCK prev = PREV(p);
342     NEXT(prev) = next;
343     PREV(next) = prev;
344 }
345 #ifndef _USE_BUDDY_BLOCKS
AddToFreeList(PBLOCK block,PBLOCK pInList)346 inline void AddToFreeList(PBLOCK block, PBLOCK pInList)
347 {
348     PBLOCK next = NEXT(pInList);
349     NEXT(pInList) = block;
350     SetLink(block, pInList, next);
351     PREV(next) = block;
352 }
353 #endif
354 
355 /* Macro for rounding up to the next sizeof(long) */
356 #define ROUND_UP(n)	(((ULONG)(n)+sizeof(long)-1)&~(sizeof(long)-1))
357 #define ROUND_UP64K(n)	(((ULONG)(n)+0x10000-1)&~(0x10000-1))
358 #define ROUND_DOWN(n)	((ULONG)(n)&~(sizeof(long)-1))
359 
360 /*
361  * HeapRec - a list of all non-contiguous heap areas
362  *
363  * Each record in this array contains information about a non-contiguous heap area.
364  */
365 
366 const int maxHeaps = 32; /* 64 was overkill */
367 const long lAllocMax   = 0x80000000; /* max size of allocation */
368 
369 #ifdef _USE_BUDDY_BLOCKS
370 typedef struct _FreeListEntry
371 {
372     BYTE    Dummy[minAllocSize];	// dummy free block
373 } FREE_LIST_ENTRY, *PFREE_LIST_ENTRY;
374 #endif
375 
376 #ifndef _USE_BUDDY_BLOCKS
377 #define USE_BIGBLOCK_ALLOC
378 #endif
379 /*
380  * performance tuning
381  * Use VirtualAlloc() for blocks bigger than nMaxHeapAllocSize since
382  * Windows 95/98/Me have heap managers that are designed for memory
383  * blocks smaller than four megabytes.
384  */
385 
386 #ifdef USE_BIGBLOCK_ALLOC
387 const int nMaxHeapAllocSize = (1024*512);  /* don't allocate anything larger than this from the heap */
388 #endif
389 
390 typedef struct _HeapRec
391 {
392     PBLOCK	base;	/* base of heap area */
393     ULONG	len;	/* size of heap area */
394 #ifdef USE_BIGBLOCK_ALLOC
395     BOOL	bBigBlock;  /* was allocate using VirtualAlloc */
396 #endif
397 } HeapRec;
398 
399 class VMem
400 {
401 public:
402     VMem();
403     ~VMem();
404     void* Malloc(size_t size);
405     void* Realloc(void* pMem, size_t size);
406     void Free(void* pMem);
407     void GetLock(void);
408     void FreeLock(void);
409     int IsLocked(void);
410     long Release(void);
411     long AddRef(void);
412 
CreateOk(void)413     inline BOOL CreateOk(void)
414     {
415 #ifdef _USE_BUDDY_BLOCKS
416 	return TRUE;
417 #else
418 	return m_hHeap != NULL;
419 #endif
420     };
421 
422     void ReInit(void);
423 
424 protected:
425     void Init(void);
426     int Getmem(size_t size);
427 
428     int HeapAdd(void* ptr, size_t size
429 #ifdef USE_BIGBLOCK_ALLOC
430 	, BOOL bBigBlock
431 #endif
432     );
433 
434     void* Expand(void* block, size_t size);
435 
436 #ifdef _USE_BUDDY_BLOCKS
GetFreeListLink(int index)437     inline PBLOCK GetFreeListLink(int index)
438     {
439 	if (index >= nListEntries)
440 	    index = nListEntries-1;
441 	return &m_FreeList[index].Dummy[sizeofTag];
442     }
GetOverSizeFreeList(void)443     inline PBLOCK GetOverSizeFreeList(void)
444     {
445 	return &m_FreeList[nListEntries-1].Dummy[sizeofTag];
446     }
GetEOLFreeList(void)447     inline PBLOCK GetEOLFreeList(void)
448     {
449 	return &m_FreeList[nListEntries].Dummy[sizeofTag];
450     }
451 
AddToFreeList(PBLOCK block,size_t size)452     void AddToFreeList(PBLOCK block, size_t size)
453     {
454 	PBLOCK pFreeList = GetFreeListLink(CalcEntry(size));
455 	PBLOCK next = NEXT(pFreeList);
456 	NEXT(pFreeList) = block;
457 	SetLink(block, pFreeList, next);
458 	PREV(next) = block;
459     }
460 #endif
CalcAllocSize(size_t size)461     inline size_t CalcAllocSize(size_t size)
462     {
463 	/*
464 	 * Adjust the real size of the block to be a multiple of sizeof(long), and add
465 	 * the overhead for the boundary tags.  Disallow negative or zero sizes.
466 	 */
467 	return (size < minBlockSize) ? minAllocSize : (size_t)ROUND_UP(size) + blockOverhead;
468     }
469 
470 #ifdef _USE_BUDDY_BLOCKS
471     FREE_LIST_ENTRY	m_FreeList[nListEntries+1];	// free list with dummy end of list entry as well
472 #else
473     HANDLE		m_hHeap;		    // memory heap for this script
474     char		m_FreeDummy[minAllocSize];  // dummy free block
475     PBLOCK		m_pFreeList;		    // pointer to first block on free list
476 #endif
477     PBLOCK		m_pRover;		    // roving pointer into the free list
478     HeapRec		m_heaps[maxHeaps];	    // list of all non-contiguous heap areas
479     int			m_nHeaps;		    // no. of heaps in m_heaps
480     long		m_lAllocSize;		    // current alloc size
481     long		m_lRefCount;		    // number of current users
482     CRITICAL_SECTION	m_cs;			    // access lock
483 
484 #ifdef _DEBUG_MEM
485     void WalkHeap(int complete);
486     void MemoryUsageMessage(char *str, long x, long y, int c);
487     FILE*		m_pLog;
488 #endif
489 };
490 
VMem()491 VMem::VMem()
492 {
493     m_lRefCount = 1;
494 #ifndef _USE_BUDDY_BLOCKS
495     BOOL bRet = (NULL != (m_hHeap = HeapCreate(HEAP_NO_SERIALIZE,
496 				lAllocStart,	/* initial size of heap */
497 				0)));		/* no upper limit on size of heap */
498     ASSERT(bRet);
499 #endif
500 
501     InitializeCriticalSection(&m_cs);
502 #ifdef _DEBUG_MEM
503     m_pLog = 0;
504 #endif
505 
506     Init();
507 }
508 
~VMem(void)509 VMem::~VMem(void)
510 {
511 #ifndef _USE_BUDDY_BLOCKS
512     ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, NULL));
513 #endif
514     WALKHEAPTRACE();
515 
516     DeleteCriticalSection(&m_cs);
517 #ifdef _USE_BUDDY_BLOCKS
518     for(int index = 0; index < m_nHeaps; ++index) {
519 	VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
520     }
521 #else /* !_USE_BUDDY_BLOCKS */
522 #ifdef USE_BIGBLOCK_ALLOC
523     for(int index = 0; index < m_nHeaps; ++index) {
524 	if (m_heaps[index].bBigBlock) {
525 	    VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
526 	}
527     }
528 #endif
529     BOOL bRet = HeapDestroy(m_hHeap);
530     ASSERT(bRet);
531 #endif /* _USE_BUDDY_BLOCKS */
532 }
533 
ReInit(void)534 void VMem::ReInit(void)
535 {
536     for(int index = 0; index < m_nHeaps; ++index) {
537 #ifdef _USE_BUDDY_BLOCKS
538 	VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
539 #else
540 #ifdef USE_BIGBLOCK_ALLOC
541 	if (m_heaps[index].bBigBlock) {
542 	    VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
543 	}
544 	else
545 #endif
546 	    HeapFree(m_hHeap, HEAP_NO_SERIALIZE, m_heaps[index].base);
547 #endif /* _USE_BUDDY_BLOCKS */
548     }
549 
550     Init();
551 }
552 
Init(void)553 void VMem::Init(void)
554 {
555 #ifdef _USE_BUDDY_BLOCKS
556     PBLOCK pFreeList;
557     /*
558      * Initialize the free list by placing a dummy zero-length block on it.
559      * Set the end of list marker.
560      * Set the number of non-contiguous heaps to zero.
561      * Set the next allocation size.
562      */
563     for (int index = 0; index < nListEntries; ++index) {
564 	pFreeList = GetFreeListLink(index);
565 	SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
566 	PREV(pFreeList) = NEXT(pFreeList) = pFreeList;
567     }
568     pFreeList = GetEOLFreeList();
569     SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
570     PREV(pFreeList) = NEXT(pFreeList) = NULL;
571     m_pRover = GetOverSizeFreeList();
572 #else
573     /*
574      * Initialize the free list by placing a dummy zero-length block on it.
575      * Set the number of non-contiguous heaps to zero.
576      */
577     m_pFreeList = m_pRover = (PBLOCK)(&m_FreeDummy[sizeofTag]);
578     PSIZE(m_pFreeList+minAllocSize) = SIZE(m_pFreeList) = 0;
579     PREV(m_pFreeList) = NEXT(m_pFreeList) = m_pFreeList;
580 #endif
581 
582     m_nHeaps = 0;
583     m_lAllocSize = lAllocStart;
584 }
585 
Malloc(size_t size)586 void* VMem::Malloc(size_t size)
587 {
588     WALKHEAP();
589 
590     PBLOCK ptr;
591     size_t lsize, rem;
592     /*
593      * Disallow negative or zero sizes.
594      */
595     size_t realsize = CalcAllocSize(size);
596     if((int)realsize < minAllocSize || size == 0)
597 	return NULL;
598 
599 #ifdef _USE_BUDDY_BLOCKS
600     /*
601      * Check the free list of small blocks if this is free use it
602      * Otherwise check the rover if it has no blocks then
603      * Scan the free list entries use the first free block
604      * split the block if needed, stop at end of list marker
605      */
606     {
607 	int index = CalcEntry(realsize);
608 	if (index < nListEntries-1) {
609 	    ptr = GetFreeListLink(index);
610 	    lsize = SIZE(ptr);
611 	    if (lsize >= realsize) {
612 		rem = lsize - realsize;
613 		if(rem < minAllocSize) {
614 		    /* Unlink the block from the free list. */
615 		    Unlink(ptr);
616 		}
617 		else {
618 		    /*
619 		     * split the block
620 		     * The remainder is big enough to split off into a new block.
621 		     * Use the end of the block, resize the beginning of the block
622 		     * no need to change the free list.
623 		     */
624 		    SetTags(ptr, rem);
625 		    ptr += SIZE(ptr);
626 		    lsize = realsize;
627 		}
628 		SetTags(ptr, lsize | 1);
629 		return ptr;
630 	    }
631 	    ptr = m_pRover;
632 	    lsize = SIZE(ptr);
633 	    if (lsize >= realsize) {
634 		rem = lsize - realsize;
635 		if(rem < minAllocSize) {
636 		    /* Unlink the block from the free list. */
637 		    Unlink(ptr);
638 		}
639 		else {
640 		    /*
641 		     * split the block
642 		     * The remainder is big enough to split off into a new block.
643 		     * Use the end of the block, resize the beginning of the block
644 		     * no need to change the free list.
645 		     */
646 		    SetTags(ptr, rem);
647 		    ptr += SIZE(ptr);
648 		    lsize = realsize;
649 		}
650 		SetTags(ptr, lsize | 1);
651 		return ptr;
652 	    }
653 	    ptr = GetFreeListLink(index+1);
654 	    while (NEXT(ptr)) {
655 		lsize = SIZE(ptr);
656 		if (lsize >= realsize) {
657 		    size_t rem = lsize - realsize;
658 		    if(rem < minAllocSize) {
659 			/* Unlink the block from the free list. */
660 			Unlink(ptr);
661 		    }
662 		    else {
663 			/*
664 			 * split the block
665 			 * The remainder is big enough to split off into a new block.
666 			 * Use the end of the block, resize the beginning of the block
667 			 * no need to change the free list.
668 			 */
669 			SetTags(ptr, rem);
670 			ptr += SIZE(ptr);
671 			lsize = realsize;
672 		    }
673 		    SetTags(ptr, lsize | 1);
674 		    return ptr;
675 		}
676 		ptr += sizeof(FREE_LIST_ENTRY);
677 	    }
678 	}
679     }
680 #endif
681 
682     /*
683      * Start searching the free list at the rover.  If we arrive back at rover without
684      * finding anything, allocate some memory from the heap and try again.
685      */
686     ptr = m_pRover;	/* start searching at rover */
687     int loops = 2;	/* allow two times through the loop  */
688     for(;;) {
689 	lsize = SIZE(ptr);
690 	ASSERT((lsize&1)==0);
691 	/* is block big enough? */
692 	if(lsize >= realsize) {
693 	    /* if the remainder is too small, don't bother splitting the block. */
694 	    rem = lsize - realsize;
695 	    if(rem < minAllocSize) {
696 		if(m_pRover == ptr)
697 		    m_pRover = NEXT(ptr);
698 
699 		/* Unlink the block from the free list. */
700 		Unlink(ptr);
701 	    }
702 	    else {
703 		/*
704 		 * split the block
705 		 * The remainder is big enough to split off into a new block.
706 		 * Use the end of the block, resize the beginning of the block
707 		 * no need to change the free list.
708 		 */
709 		SetTags(ptr, rem);
710 		ptr += SIZE(ptr);
711 		lsize = realsize;
712 	    }
713 	    /* Set the boundary tags to mark it as allocated. */
714 	    SetTags(ptr, lsize | 1);
715 	    return ((void *)ptr);
716 	}
717 
718 	/*
719 	 * This block was unsuitable.  If we've gone through this list once already without
720 	 * finding anything, allocate some new memory from the heap and try again.
721 	 */
722 	ptr = NEXT(ptr);
723 	if(ptr == m_pRover) {
724 	    if(!(loops-- && Getmem(realsize))) {
725 		return NULL;
726 	    }
727 	    ptr = m_pRover;
728 	}
729     }
730 }
731 
Realloc(void * block,size_t size)732 void* VMem::Realloc(void* block, size_t size)
733 {
734     WALKHEAP();
735 
736     /* if size is zero, free the block. */
737     if(size == 0) {
738 	Free(block);
739 	return (NULL);
740     }
741 
742     /* if block pointer is NULL, do a Malloc(). */
743     if(block == NULL)
744 	return Malloc(size);
745 
746     /*
747      * Grow or shrink the block in place.
748      * if the block grows then the next block will be used if free
749      */
750     if(Expand(block, size) != NULL)
751 	return block;
752 
753     size_t realsize = CalcAllocSize(size);
754     if((int)realsize < minAllocSize)
755 	return NULL;
756 
757     /*
758      * see if the previous block is free, and is it big enough to cover the new size
759      * if merged with the current block.
760      */
761     PBLOCK ptr = (PBLOCK)block;
762     size_t cursize = SIZE(ptr) & ~1;
763     size_t psize = PSIZE(ptr);
764     if((psize&1) == 0 && (psize + cursize) >= realsize) {
765 	PBLOCK prev = ptr - psize;
766 	if(m_pRover == prev)
767 	    m_pRover = NEXT(prev);
768 
769 	/* Unlink the next block from the free list. */
770 	Unlink(prev);
771 
772 	/* Copy contents of old block to new location, make it the current block. */
773 	memmove(prev, ptr, cursize);
774 	cursize += psize;	/* combine sizes */
775 	ptr = prev;
776 
777 	size_t rem = cursize - realsize;
778 	if(rem >= minAllocSize) {
779 	    /*
780 	     * The remainder is big enough to be a new block.  Set boundary
781 	     * tags for the resized block and the new block.
782 	     */
783 	    prev = ptr + realsize;
784 	    /*
785 	     * add the new block to the free list.
786 	     * next block cannot be free
787 	     */
788 	    SetTags(prev, rem);
789 #ifdef _USE_BUDDY_BLOCKS
790 	    AddToFreeList(prev, rem);
791 #else
792 	    AddToFreeList(prev, m_pFreeList);
793 #endif
794 	    cursize = realsize;
795         }
796 	/* Set the boundary tags to mark it as allocated. */
797 	SetTags(ptr, cursize | 1);
798         return ((void *)ptr);
799     }
800 
801     /* Allocate a new block, copy the old to the new, and free the old. */
802     if((ptr = (PBLOCK)Malloc(size)) != NULL) {
803 	memmove(ptr, block, cursize-blockOverhead);
804 	Free(block);
805     }
806     return ((void *)ptr);
807 }
808 
Free(void * p)809 void VMem::Free(void* p)
810 {
811     WALKHEAP();
812 
813     /* Ignore null pointer. */
814     if(p == NULL)
815 	return;
816 
817     PBLOCK ptr = (PBLOCK)p;
818 
819     /* Check for attempt to free a block that's already free. */
820     size_t size = SIZE(ptr);
821     if((size&1) == 0) {
822 	MEMODSlx("Attempt to free previously freed block", (long)p);
823 	return;
824     }
825     size &= ~1;	/* remove allocated tag */
826 
827     /* if previous block is free, add this block to it. */
828 #ifndef _USE_BUDDY_BLOCKS
829     int linked = FALSE;
830 #endif
831     size_t psize = PSIZE(ptr);
832     if((psize&1) == 0) {
833 	ptr -= psize;	/* point to previous block */
834 	size += psize;	/* merge the sizes of the two blocks */
835 #ifdef _USE_BUDDY_BLOCKS
836 	Unlink(ptr);
837 #else
838 	linked = TRUE;	/* it's already on the free list */
839 #endif
840     }
841 
842     /* if the next physical block is free, merge it with this block. */
843     PBLOCK next = ptr + size;	/* point to next physical block */
844     size_t nsize = SIZE(next);
845     if((nsize&1) == 0) {
846 	/* block is free move rover if needed */
847 	if(m_pRover == next)
848 	    m_pRover = NEXT(next);
849 
850 	/* unlink the next block from the free list. */
851 	Unlink(next);
852 
853 	/* merge the sizes of this block and the next block. */
854 	size += nsize;
855     }
856 
857     /* Set the boundary tags for the block; */
858     SetTags(ptr, size);
859 
860     /* Link the block to the head of the free list. */
861 #ifdef _USE_BUDDY_BLOCKS
862 	AddToFreeList(ptr, size);
863 #else
864     if(!linked) {
865 	AddToFreeList(ptr, m_pFreeList);
866     }
867 #endif
868 }
869 
GetLock(void)870 void VMem::GetLock(void)
871 {
872     EnterCriticalSection(&m_cs);
873 }
874 
FreeLock(void)875 void VMem::FreeLock(void)
876 {
877     LeaveCriticalSection(&m_cs);
878 }
879 
IsLocked(void)880 int VMem::IsLocked(void)
881 {
882 #if 0
883     /* XXX TryEnterCriticalSection() is not available in some versions
884      * of Windows 95.  Since this code is not used anywhere yet, we
885      * skirt the issue for now. */
886     BOOL bAccessed = TryEnterCriticalSection(&m_cs);
887     if(bAccessed) {
888 	LeaveCriticalSection(&m_cs);
889     }
890     return !bAccessed;
891 #else
892     ASSERT(0);	/* alarm bells for when somebody calls this */
893     return 0;
894 #endif
895 }
896 
897 
Release(void)898 long VMem::Release(void)
899 {
900     long lCount = InterlockedDecrement(&m_lRefCount);
901     if(!lCount)
902 	delete this;
903     return lCount;
904 }
905 
AddRef(void)906 long VMem::AddRef(void)
907 {
908     long lCount = InterlockedIncrement(&m_lRefCount);
909     return lCount;
910 }
911 
912 
Getmem(size_t requestSize)913 int VMem::Getmem(size_t requestSize)
914 {   /* returns -1 is successful 0 if not */
915 #ifdef USE_BIGBLOCK_ALLOC
916     BOOL bBigBlock;
917 #endif
918     void *ptr;
919 
920     /* Round up size to next multiple of 64K. */
921     size_t size = (size_t)ROUND_UP64K(requestSize);
922 
923     /*
924      * if the size requested is smaller than our current allocation size
925      * adjust up
926      */
927     if(size < (unsigned long)m_lAllocSize)
928 	size = m_lAllocSize;
929 
930     /* Update the size to allocate on the next request */
931     if(m_lAllocSize != lAllocMax)
932 	m_lAllocSize <<= 2;
933 
934 #ifndef _USE_BUDDY_BLOCKS
935     if(m_nHeaps != 0
936 #ifdef USE_BIGBLOCK_ALLOC
937 	&& !m_heaps[m_nHeaps-1].bBigBlock
938 #endif
939 		    ) {
940 	/* Expand the last allocated heap */
941 	ptr = HeapReAlloc(m_hHeap, HEAP_REALLOC_IN_PLACE_ONLY|HEAP_NO_SERIALIZE,
942 		m_heaps[m_nHeaps-1].base,
943 		m_heaps[m_nHeaps-1].len + size);
944 	if(ptr != 0) {
945 	    HeapAdd(((char*)ptr) + m_heaps[m_nHeaps-1].len, size
946 #ifdef USE_BIGBLOCK_ALLOC
947 		, FALSE
948 #endif
949 		);
950 	    return -1;
951 	}
952     }
953 #endif /* _USE_BUDDY_BLOCKS */
954 
955     /*
956      * if we didn't expand a block to cover the requested size
957      * allocate a new Heap
958      * the size of this block must include the additional dummy tags at either end
959      * the above ROUND_UP64K may not have added any memory to include this.
960      */
961     if(size == requestSize)
962 	size = (size_t)ROUND_UP64K(requestSize+(blockOverhead));
963 
964 Restart:
965 #ifdef _USE_BUDDY_BLOCKS
966     ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
967 #else
968 #ifdef USE_BIGBLOCK_ALLOC
969     bBigBlock = FALSE;
970     if (size >= nMaxHeapAllocSize) {
971 	bBigBlock = TRUE;
972 	ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
973     }
974     else
975 #endif
976     ptr = HeapAlloc(m_hHeap, HEAP_NO_SERIALIZE, size);
977 #endif /* _USE_BUDDY_BLOCKS */
978 
979     if (!ptr) {
980 	/* try to allocate a smaller chunk */
981 	size >>= 1;
982 	if(size > requestSize)
983 	    goto Restart;
984     }
985 
986     if(ptr == 0) {
987 	MEMODSlx("HeapAlloc failed on size!!!", size);
988 	return 0;
989     }
990 
991 #ifdef _USE_BUDDY_BLOCKS
992     if (HeapAdd(ptr, size)) {
993 	VirtualFree(ptr, 0, MEM_RELEASE);
994 	return 0;
995     }
996 #else
997 #ifdef USE_BIGBLOCK_ALLOC
998     if (HeapAdd(ptr, size, bBigBlock)) {
999 	if (bBigBlock) {
1000 	    VirtualFree(ptr, 0, MEM_RELEASE);
1001 	}
1002     }
1003 #else
1004     HeapAdd(ptr, size);
1005 #endif
1006 #endif /* _USE_BUDDY_BLOCKS */
1007     return -1;
1008 }
1009 
HeapAdd(void * p,size_t size,BOOL bBigBlock)1010 int VMem::HeapAdd(void* p, size_t size
1011 #ifdef USE_BIGBLOCK_ALLOC
1012     , BOOL bBigBlock
1013 #endif
1014     )
1015 {   /* if the block can be successfully added to the heap, returns 0; otherwise -1. */
1016     int index;
1017 
1018     /* Check size, then round size down to next long word boundary. */
1019     if(size < minAllocSize)
1020 	return -1;
1021 
1022     size = (size_t)ROUND_DOWN(size);
1023     PBLOCK ptr = (PBLOCK)p;
1024 
1025 #ifdef USE_BIGBLOCK_ALLOC
1026     if (!bBigBlock) {
1027 #endif
1028 	/*
1029 	 * Search for another heap area that's contiguous with the bottom of this new area.
1030 	 * (It should be extremely unusual to find one that's contiguous with the top).
1031 	 */
1032 	for(index = 0; index < m_nHeaps; ++index) {
1033 	    if(ptr == m_heaps[index].base + (int)m_heaps[index].len) {
1034 		/*
1035 		 * The new block is contiguous with a previously allocated heap area.  Add its
1036 		 * length to that of the previous heap.  Merge it with the dummy end-of-heap
1037 		 * area marker of the previous heap.
1038 		 */
1039 		m_heaps[index].len += size;
1040 		break;
1041 	    }
1042 	}
1043 #ifdef USE_BIGBLOCK_ALLOC
1044     }
1045     else {
1046 	index = m_nHeaps;
1047     }
1048 #endif
1049 
1050     if(index == m_nHeaps) {
1051 	/* The new block is not contiguous, or is BigBlock.  Add it to the heap list. */
1052 	if(m_nHeaps == maxHeaps) {
1053 	    return -1;	/* too many non-contiguous heaps */
1054 	}
1055 	m_heaps[m_nHeaps].base = ptr;
1056 	m_heaps[m_nHeaps].len = size;
1057 #ifdef USE_BIGBLOCK_ALLOC
1058 	m_heaps[m_nHeaps].bBigBlock = bBigBlock;
1059 #endif
1060 	m_nHeaps++;
1061 
1062 	/*
1063 	 * Reserve the first LONG in the block for the ending boundary tag of a dummy
1064 	 * block at the start of the heap area.
1065 	 */
1066 	size -= blockOverhead;
1067 	ptr += blockOverhead;
1068 	PSIZE(ptr) = 1;	/* mark the dummy previous block as allocated */
1069     }
1070 
1071     /*
1072      * Convert the heap to one large block.  Set up its boundary tags, and those of
1073      * marker block after it.  The marker block before the heap will already have
1074      * been set up if this heap is not contiguous with the end of another heap.
1075      */
1076     SetTags(ptr, size | 1);
1077     PBLOCK next = ptr + size;	/* point to dummy end block */
1078     SIZE(next) = 1;	/* mark the dummy end block as allocated */
1079 
1080     /*
1081      * Link the block to the start of the free list by calling free().
1082      * This will merge the block with any adjacent free blocks.
1083      */
1084     Free(ptr);
1085     return 0;
1086 }
1087 
1088 
Expand(void * block,size_t size)1089 void* VMem::Expand(void* block, size_t size)
1090 {
1091     /*
1092      * Disallow negative or zero sizes.
1093      */
1094     size_t realsize = CalcAllocSize(size);
1095     if((int)realsize < minAllocSize || size == 0)
1096 	return NULL;
1097 
1098     PBLOCK ptr = (PBLOCK)block;
1099 
1100     /* if the current size is the same as requested, do nothing. */
1101     size_t cursize = SIZE(ptr) & ~1;
1102     if(cursize == realsize) {
1103 	return block;
1104     }
1105 
1106     /* if the block is being shrunk, convert the remainder of the block into a new free block. */
1107     if(realsize <= cursize) {
1108 	size_t nextsize = cursize - realsize;	/* size of new remainder block */
1109 	if(nextsize >= minAllocSize) {
1110 	    /*
1111 	     * Split the block
1112 	     * Set boundary tags for the resized block and the new block.
1113 	     */
1114 	    SetTags(ptr, realsize | 1);
1115 	    ptr += realsize;
1116 
1117 	    /*
1118 	     * add the new block to the free list.
1119 	     * call Free to merge this block with next block if free
1120 	     */
1121 	    SetTags(ptr, nextsize | 1);
1122 	    Free(ptr);
1123 	}
1124 
1125 	return block;
1126     }
1127 
1128     PBLOCK next = ptr + cursize;
1129     size_t nextsize = SIZE(next);
1130 
1131     /* Check the next block for consistency.*/
1132     if((nextsize&1) == 0 && (nextsize + cursize) >= realsize) {
1133 	/*
1134 	 * The next block is free and big enough.  Add the part that's needed
1135 	 * to our block, and split the remainder off into a new block.
1136 	 */
1137 	if(m_pRover == next)
1138 	    m_pRover = NEXT(next);
1139 
1140 	/* Unlink the next block from the free list. */
1141 	Unlink(next);
1142 	cursize += nextsize;	/* combine sizes */
1143 
1144 	size_t rem = cursize - realsize;	/* size of remainder */
1145 	if(rem >= minAllocSize) {
1146 	    /*
1147 	     * The remainder is big enough to be a new block.
1148 	     * Set boundary tags for the resized block and the new block.
1149 	     */
1150 	    next = ptr + realsize;
1151 	    /*
1152 	     * add the new block to the free list.
1153 	     * next block cannot be free
1154 	     */
1155 	    SetTags(next, rem);
1156 #ifdef _USE_BUDDY_BLOCKS
1157 	    AddToFreeList(next, rem);
1158 #else
1159 	    AddToFreeList(next, m_pFreeList);
1160 #endif
1161 	    cursize = realsize;
1162         }
1163 	/* Set the boundary tags to mark it as allocated. */
1164 	SetTags(ptr, cursize | 1);
1165 	return ((void *)ptr);
1166     }
1167     return NULL;
1168 }
1169 
1170 #ifdef _DEBUG_MEM
1171 #define LOG_FILENAME ".\\MemLog.txt"
1172 
MemoryUsageMessage(char * str,long x,long y,int c)1173 void VMem::MemoryUsageMessage(char *str, long x, long y, int c)
1174 {
1175     char szBuffer[512];
1176     if(str) {
1177 	if(!m_pLog)
1178 	    m_pLog = fopen(LOG_FILENAME, "w");
1179 	sprintf(szBuffer, str, x, y, c);
1180 	fputs(szBuffer, m_pLog);
1181     }
1182     else {
1183 	if(m_pLog) {
1184 	    fflush(m_pLog);
1185 	    fclose(m_pLog);
1186 	    m_pLog = 0;
1187 	}
1188     }
1189 }
1190 
WalkHeap(int complete)1191 void VMem::WalkHeap(int complete)
1192 {
1193     if(complete) {
1194 	MemoryUsageMessage(NULL, 0, 0, 0);
1195 	size_t total = 0;
1196 	for(int i = 0; i < m_nHeaps; ++i) {
1197 	    total += m_heaps[i].len;
1198 	}
1199 	MemoryUsageMessage("VMem heaps used %d. Total memory %08x\n", m_nHeaps, total, 0);
1200 
1201 	/* Walk all the heaps - verify structures */
1202 	for(int index = 0; index < m_nHeaps; ++index) {
1203 	    PBLOCK ptr = m_heaps[index].base;
1204 	    size_t size = m_heaps[index].len;
1205 #ifndef _USE_BUDDY_BLOCKS
1206 #ifdef USE_BIGBLOCK_ALLOC
1207 	    if (!m_heaps[m_nHeaps].bBigBlock)
1208 #endif
1209 		ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, ptr));
1210 #endif
1211 
1212 	    /* set over reserved header block */
1213 	    size -= blockOverhead;
1214 	    ptr += blockOverhead;
1215 	    PBLOCK pLast = ptr + size;
1216 	    ASSERT(PSIZE(ptr) == 1); /* dummy previous block is allocated */
1217 	    ASSERT(SIZE(pLast) == 1); /* dummy next block is allocated */
1218 	    while(ptr < pLast) {
1219 		ASSERT(ptr > m_heaps[index].base);
1220 		size_t cursize = SIZE(ptr) & ~1;
1221 		ASSERT((PSIZE(ptr+cursize) & ~1) == cursize);
1222 		MemoryUsageMessage("Memory Block %08x: Size %08x %c\n", (long)ptr, cursize, (SIZE(ptr)&1) ? 'x' : ' ');
1223 		if(!(SIZE(ptr)&1)) {
1224 		    /* this block is on the free list */
1225 		    PBLOCK tmp = NEXT(ptr);
1226 		    while(tmp != ptr) {
1227 			ASSERT((SIZE(tmp)&1)==0);
1228 			if(tmp == m_pFreeList)
1229 			    break;
1230 			ASSERT(NEXT(tmp));
1231 			tmp = NEXT(tmp);
1232 		    }
1233 		    if(tmp == ptr) {
1234 			MemoryUsageMessage("Memory Block %08x: Size %08x free but not in free list\n", (long)ptr, cursize, 0);
1235 		    }
1236 		}
1237 		ptr += cursize;
1238 	    }
1239 	}
1240 	MemoryUsageMessage(NULL, 0, 0, 0);
1241     }
1242 }
1243 #endif	/* _DEBUG_MEM */
1244 
1245 #endif	/* _USE_MSVCRT_MEM_ALLOC */
1246 
1247 #endif	/* ___VMEM_H_INC___ */
1248