1 /* vmem.h
2 *
3 * (c) 1999 Microsoft Corporation. All rights reserved.
4 * Portions (c) 1999 ActiveState Tool Corp, http://www.ActiveState.com/
5 *
6 * You may distribute under the terms of either the GNU General Public
7 * License or the Artistic License, as specified in the README file.
8 *
9 * Options:
10 *
11 * Defining _USE_MSVCRT_MEM_ALLOC will cause all memory allocations
12 * to be forwarded to the compiler's MSVCR*.DLL. Defining _USE_LINKED_LIST as
13 * well will track all allocations in a doubly linked list, so that the host can
14 * free all memory allocated when it goes away.
15 * If _USE_MSVCRT_MEM_ALLOC is not defined then Knuth's boundary tag algorithm
16 * is used; defining _USE_BUDDY_BLOCKS will use Knuth's algorithm R
17 * (Buddy system reservation)
18 *
19 */
20
21 #ifndef ___VMEM_H_INC___
22 #define ___VMEM_H_INC___
23
24 #define _USE_MSVCRT_MEM_ALLOC
25 #define _USE_LINKED_LIST
26
27 // #define _USE_BUDDY_BLOCKS
28
29 // #define _DEBUG_MEM
30 #ifdef _DEBUG_MEM
31 #define ASSERT(f) if(!(f)) DebugBreak();
32
MEMODS(char * str)33 inline void MEMODS(char *str)
34 {
35 OutputDebugString(str);
36 OutputDebugString("\n");
37 }
38
MEMODSlx(char * str,long x)39 inline void MEMODSlx(char *str, long x)
40 {
41 char szBuffer[512];
42 sprintf(szBuffer, "%s %lx\n", str, x);
43 OutputDebugString(szBuffer);
44 }
45
46 #define WALKHEAP() WalkHeap(0)
47 #define WALKHEAPTRACE() WalkHeap(1)
48
49 #else
50
51 #define ASSERT(f)
52 #define MEMODS(x)
53 #define MEMODSlx(x, y)
54 #define WALKHEAP()
55 #define WALKHEAPTRACE()
56
57 #endif
58
59 #ifdef _USE_MSVCRT_MEM_ALLOC
60
61 #ifndef _USE_LINKED_LIST
62 // #define _USE_LINKED_LIST
63 #endif
64
65 /*
66 * Pass all memory requests through to the compiler's msvcr*.dll.
67 * Optionally track by using a doubly linked header.
68 */
69
70 #ifdef _USE_LINKED_LIST
71 class VMem;
72
73 /*
74 * Address an alignment issue with x64 mingw-w64 ports of gcc-12 and
75 * (presumably) later. We do the same thing again 16 lines further down.
76 * See https://github.com/Perl/perl5/issues/19824
77 */
78
79 #if defined(__MINGW64__) && __GNUC__ > 11
80 typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER __attribute__ ((aligned(16)));
81 #else
82 typedef struct _MemoryBlockHeader* PMEMORY_BLOCK_HEADER;
83 #endif
84
85 typedef struct _MemoryBlockHeader {
86 PMEMORY_BLOCK_HEADER pNext;
87 PMEMORY_BLOCK_HEADER pPrev;
88 VMem *owner;
89
90 #if defined(__MINGW64__) && __GNUC__ > 11
91 } MEMORY_BLOCK_HEADER __attribute__ ((aligned(16))), *PMEMORY_BLOCK_HEADER;
92 #else
93 } MEMORY_BLOCK_HEADER, *PMEMORY_BLOCK_HEADER;
94 #endif
95
96 #endif
97
98 class VMem
99 {
100 public:
101 VMem();
102 ~VMem();
103 void* Malloc(size_t size);
104 void* Realloc(void* pMem, size_t size);
105 void Free(void* pMem);
106 void GetLock(void);
107 void FreeLock(void);
108 int IsLocked(void);
109 long Release(void);
110 long AddRef(void);
111
CreateOk(void)112 inline BOOL CreateOk(void)
113 {
114 return TRUE;
115 };
116
117 protected:
118 #ifdef _USE_LINKED_LIST
LinkBlock(PMEMORY_BLOCK_HEADER ptr)119 void LinkBlock(PMEMORY_BLOCK_HEADER ptr)
120 {
121 PMEMORY_BLOCK_HEADER next = m_Dummy.pNext;
122 m_Dummy.pNext = ptr;
123 ptr->pPrev = &m_Dummy;
124 ptr->pNext = next;
125 ptr->owner = this;
126 next->pPrev = ptr;
127 }
UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)128 void UnlinkBlock(PMEMORY_BLOCK_HEADER ptr)
129 {
130 PMEMORY_BLOCK_HEADER next = ptr->pNext;
131 PMEMORY_BLOCK_HEADER prev = ptr->pPrev;
132 prev->pNext = next;
133 next->pPrev = prev;
134 }
135
136 MEMORY_BLOCK_HEADER m_Dummy;
137 CRITICAL_SECTION m_cs; // access lock
138 #endif
139
140 long m_lRefCount; // number of current users
141 };
142
VMem()143 VMem::VMem()
144 {
145 m_lRefCount = 1;
146 #ifdef _USE_LINKED_LIST
147 InitializeCriticalSection(&m_cs);
148 m_Dummy.pNext = m_Dummy.pPrev = &m_Dummy;
149 m_Dummy.owner = this;
150 #endif
151 }
152
~VMem(void)153 VMem::~VMem(void)
154 {
155 #ifdef _USE_LINKED_LIST
156 while (m_Dummy.pNext != &m_Dummy) {
157 Free(m_Dummy.pNext+1);
158 }
159 DeleteCriticalSection(&m_cs);
160 #endif
161 }
162
Malloc(size_t size)163 void* VMem::Malloc(size_t size)
164 {
165 #ifdef _USE_LINKED_LIST
166 GetLock();
167 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)malloc(size+sizeof(MEMORY_BLOCK_HEADER));
168 if (!ptr) {
169 FreeLock();
170 return NULL;
171 }
172 LinkBlock(ptr);
173 FreeLock();
174 return (ptr+1);
175 #else
176 return malloc(size);
177 #endif
178 }
179
Realloc(void * pMem,size_t size)180 void* VMem::Realloc(void* pMem, size_t size)
181 {
182 #ifdef _USE_LINKED_LIST
183 if (!pMem)
184 return Malloc(size);
185
186 if (!size) {
187 Free(pMem);
188 return NULL;
189 }
190
191 GetLock();
192 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
193 UnlinkBlock(ptr);
194 ptr = (PMEMORY_BLOCK_HEADER)realloc(ptr, size+sizeof(MEMORY_BLOCK_HEADER));
195 if (!ptr) {
196 FreeLock();
197 return NULL;
198 }
199 LinkBlock(ptr);
200 FreeLock();
201
202 return (ptr+1);
203 #else
204 return realloc(pMem, size);
205 #endif
206 }
207
Free(void * pMem)208 void VMem::Free(void* pMem)
209 {
210 #ifdef _USE_LINKED_LIST
211 if (pMem) {
212 PMEMORY_BLOCK_HEADER ptr = (PMEMORY_BLOCK_HEADER)(((char*)pMem)-sizeof(MEMORY_BLOCK_HEADER));
213 if (ptr->owner != this) {
214 if (ptr->owner) {
215 #if 1
216 int *nowhere = NULL;
217 Perl_warn_nocontext("Free to wrong pool %p not %p",this,ptr->owner);
218 *nowhere = 0; /* this segfault is deliberate,
219 so you can see the stack trace */
220 #else
221 ptr->owner->Free(pMem);
222 #endif
223 }
224 return;
225 }
226 GetLock();
227 UnlinkBlock(ptr);
228 ptr->owner = NULL;
229 free(ptr);
230 FreeLock();
231 }
232 #else /*_USE_LINKED_LIST*/
233 free(pMem);
234 #endif
235 }
236
GetLock(void)237 void VMem::GetLock(void)
238 {
239 #ifdef _USE_LINKED_LIST
240 EnterCriticalSection(&m_cs);
241 #endif
242 }
243
FreeLock(void)244 void VMem::FreeLock(void)
245 {
246 #ifdef _USE_LINKED_LIST
247 LeaveCriticalSection(&m_cs);
248 #endif
249 }
250
IsLocked(void)251 int VMem::IsLocked(void)
252 {
253 #if 0
254 /* XXX TryEnterCriticalSection() is not available in some versions
255 * of Windows 95. Since this code is not used anywhere yet, we
256 * skirt the issue for now. */
257 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
258 if(bAccessed) {
259 LeaveCriticalSection(&m_cs);
260 }
261 return !bAccessed;
262 #else
263 ASSERT(0); /* alarm bells for when somebody calls this */
264 return 0;
265 #endif
266 }
267
Release(void)268 long VMem::Release(void)
269 {
270 long lCount = InterlockedDecrement(&m_lRefCount);
271 if(!lCount)
272 delete this;
273 return lCount;
274 }
275
AddRef(void)276 long VMem::AddRef(void)
277 {
278 long lCount = InterlockedIncrement(&m_lRefCount);
279 return lCount;
280 }
281
282 #else /* _USE_MSVCRT_MEM_ALLOC */
283
284 /*
285 * Knuth's boundary tag algorithm Vol #1, Page 440.
286 *
287 * Each block in the heap has tag words before and after it,
288 * TAG
289 * block
290 * TAG
291 * The size is stored in these tags as a long word, and includes the 8 bytes
292 * of overhead that the boundary tags consume. Blocks are allocated on long
293 * word boundaries, so the size is always multiples of long words. When the
294 * block is allocated, bit 0, (the tag bit), of the size is set to 1. When
295 * a block is freed, it is merged with adjacent free blocks, and the tag bit
296 * is set to 0.
297 *
298 * A linked list is used to manage the free list. The first two long words of
299 * the block contain double links. These links are only valid when the block
300 * is freed, therefore space needs to be reserved for them. Thus, the minimum
301 * block size (not counting the tags) is 8 bytes.
302 *
303 * Since memory allocation may occur on a single threaded, explicit locks are not
304 * provided.
305 *
306 */
307
308 const long lAllocStart = 0x00020000; /* start at 128K */
309 const long minBlockSize = sizeof(void*)*2;
310 const long sizeofTag = sizeof(long);
311 const long blockOverhead = sizeofTag*2;
312 const long minAllocSize = minBlockSize+blockOverhead;
313 #ifdef _USE_BUDDY_BLOCKS
314 const long lSmallBlockSize = 1024;
315 const size_t nListEntries = ((lSmallBlockSize-minAllocSize)/sizeof(long));
316
CalcEntry(size_t size)317 inline size_t CalcEntry(size_t size)
318 {
319 ASSERT((size&(sizeof(long)-1)) == 0);
320 return ((size - minAllocSize) / sizeof(long));
321 }
322 #endif
323
324 typedef BYTE* PBLOCK; /* pointer to a memory block */
325
326 /*
327 * Macros for accessing hidden fields in a memory block:
328 *
329 * SIZE size of this block (tag bit 0 is 1 if block is allocated)
330 * PSIZE size of previous physical block
331 */
332
333 #define SIZE(block) (*(ULONG*)(((PBLOCK)(block))-sizeofTag))
334 #define PSIZE(block) (*(ULONG*)(((PBLOCK)(block))-(blockOverhead)))
SetTags(PBLOCK block,long size)335 inline void SetTags(PBLOCK block, long size)
336 {
337 SIZE(block) = size;
338 PSIZE(block+(size&~1)) = size;
339 }
340
341 /*
342 * Free list pointers
343 * PREV pointer to previous block
344 * NEXT pointer to next block
345 */
346
347 #define PREV(block) (*(PBLOCK*)(block))
348 #define NEXT(block) (*(PBLOCK*)((block)+sizeof(PBLOCK)))
SetLink(PBLOCK block,PBLOCK prev,PBLOCK next)349 inline void SetLink(PBLOCK block, PBLOCK prev, PBLOCK next)
350 {
351 PREV(block) = prev;
352 NEXT(block) = next;
353 }
Unlink(PBLOCK p)354 inline void Unlink(PBLOCK p)
355 {
356 PBLOCK next = NEXT(p);
357 PBLOCK prev = PREV(p);
358 NEXT(prev) = next;
359 PREV(next) = prev;
360 }
361 #ifndef _USE_BUDDY_BLOCKS
AddToFreeList(PBLOCK block,PBLOCK pInList)362 inline void AddToFreeList(PBLOCK block, PBLOCK pInList)
363 {
364 PBLOCK next = NEXT(pInList);
365 NEXT(pInList) = block;
366 SetLink(block, pInList, next);
367 PREV(next) = block;
368 }
369 #endif
370
371 /* Macro for rounding up to the next sizeof(long) */
372 #define ROUND_UP(n) (((ULONG)(n)+sizeof(long)-1)&~(sizeof(long)-1))
373 #define ROUND_UP64K(n) (((ULONG)(n)+0x10000-1)&~(0x10000-1))
374 #define ROUND_DOWN(n) ((ULONG)(n)&~(sizeof(long)-1))
375
376 /*
377 * HeapRec - a list of all non-contiguous heap areas
378 *
379 * Each record in this array contains information about a non-contiguous heap area.
380 */
381
382 const int maxHeaps = 32; /* 64 was overkill */
383 const long lAllocMax = 0x80000000; /* max size of allocation */
384
385 #ifdef _USE_BUDDY_BLOCKS
386 typedef struct _FreeListEntry
387 {
388 BYTE Dummy[minAllocSize]; // dummy free block
389 } FREE_LIST_ENTRY, *PFREE_LIST_ENTRY;
390 #endif
391
392 #ifndef _USE_BUDDY_BLOCKS
393 #define USE_BIGBLOCK_ALLOC
394 #endif
395 /*
396 * performance tuning
397 * Use VirtualAlloc() for blocks bigger than nMaxHeapAllocSize since
398 * Windows 95/98/Me have heap managers that are designed for memory
399 * blocks smaller than four megabytes.
400 */
401
402 #ifdef USE_BIGBLOCK_ALLOC
403 const int nMaxHeapAllocSize = (1024*512); /* don't allocate anything larger than this from the heap */
404 #endif
405
406 typedef struct _HeapRec
407 {
408 PBLOCK base; /* base of heap area */
409 ULONG len; /* size of heap area */
410 #ifdef USE_BIGBLOCK_ALLOC
411 BOOL bBigBlock; /* was allocate using VirtualAlloc */
412 #endif
413 } HeapRec;
414
415 class VMem
416 {
417 public:
418 VMem();
419 ~VMem();
420 void* Malloc(size_t size);
421 void* Realloc(void* pMem, size_t size);
422 void Free(void* pMem);
423 void GetLock(void);
424 void FreeLock(void);
425 int IsLocked(void);
426 long Release(void);
427 long AddRef(void);
428
CreateOk(void)429 inline BOOL CreateOk(void)
430 {
431 #ifdef _USE_BUDDY_BLOCKS
432 return TRUE;
433 #else
434 return m_hHeap != NULL;
435 #endif
436 };
437
438 void ReInit(void);
439
440 protected:
441 void Init(void);
442 int Getmem(size_t size);
443
444 int HeapAdd(void* ptr, size_t size
445 #ifdef USE_BIGBLOCK_ALLOC
446 , BOOL bBigBlock
447 #endif
448 );
449
450 void* Expand(void* block, size_t size);
451
452 #ifdef _USE_BUDDY_BLOCKS
GetFreeListLink(int index)453 inline PBLOCK GetFreeListLink(int index)
454 {
455 if (index >= nListEntries)
456 index = nListEntries-1;
457 return &m_FreeList[index].Dummy[sizeofTag];
458 }
GetOverSizeFreeList(void)459 inline PBLOCK GetOverSizeFreeList(void)
460 {
461 return &m_FreeList[nListEntries-1].Dummy[sizeofTag];
462 }
GetEOLFreeList(void)463 inline PBLOCK GetEOLFreeList(void)
464 {
465 return &m_FreeList[nListEntries].Dummy[sizeofTag];
466 }
467
AddToFreeList(PBLOCK block,size_t size)468 void AddToFreeList(PBLOCK block, size_t size)
469 {
470 PBLOCK pFreeList = GetFreeListLink(CalcEntry(size));
471 PBLOCK next = NEXT(pFreeList);
472 NEXT(pFreeList) = block;
473 SetLink(block, pFreeList, next);
474 PREV(next) = block;
475 }
476 #endif
CalcAllocSize(size_t size)477 inline size_t CalcAllocSize(size_t size)
478 {
479 /*
480 * Adjust the real size of the block to be a multiple of sizeof(long), and add
481 * the overhead for the boundary tags. Disallow negative or zero sizes.
482 */
483 return (size < minBlockSize) ? minAllocSize : (size_t)ROUND_UP(size) + blockOverhead;
484 }
485
486 #ifdef _USE_BUDDY_BLOCKS
487 FREE_LIST_ENTRY m_FreeList[nListEntries+1]; // free list with dummy end of list entry as well
488 #else
489 HANDLE m_hHeap; // memory heap for this script
490 char m_FreeDummy[minAllocSize]; // dummy free block
491 PBLOCK m_pFreeList; // pointer to first block on free list
492 #endif
493 PBLOCK m_pRover; // roving pointer into the free list
494 HeapRec m_heaps[maxHeaps]; // list of all non-contiguous heap areas
495 int m_nHeaps; // no. of heaps in m_heaps
496 long m_lAllocSize; // current alloc size
497 long m_lRefCount; // number of current users
498 CRITICAL_SECTION m_cs; // access lock
499
500 #ifdef _DEBUG_MEM
501 void WalkHeap(int complete);
502 void MemoryUsageMessage(char *str, long x, long y, int c);
503 FILE* m_pLog;
504 #endif
505 };
506
VMem()507 VMem::VMem()
508 {
509 m_lRefCount = 1;
510 #ifndef _USE_BUDDY_BLOCKS
511 BOOL bRet = (NULL != (m_hHeap = HeapCreate(HEAP_NO_SERIALIZE,
512 lAllocStart, /* initial size of heap */
513 0))); /* no upper limit on size of heap */
514 ASSERT(bRet);
515 #endif
516
517 InitializeCriticalSection(&m_cs);
518 #ifdef _DEBUG_MEM
519 m_pLog = 0;
520 #endif
521
522 Init();
523 }
524
~VMem(void)525 VMem::~VMem(void)
526 {
527 #ifndef _USE_BUDDY_BLOCKS
528 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, NULL));
529 #endif
530 WALKHEAPTRACE();
531
532 DeleteCriticalSection(&m_cs);
533 #ifdef _USE_BUDDY_BLOCKS
534 for(int index = 0; index < m_nHeaps; ++index) {
535 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
536 }
537 #else /* !_USE_BUDDY_BLOCKS */
538 #ifdef USE_BIGBLOCK_ALLOC
539 for(int index = 0; index < m_nHeaps; ++index) {
540 if (m_heaps[index].bBigBlock) {
541 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
542 }
543 }
544 #endif
545 BOOL bRet = HeapDestroy(m_hHeap);
546 ASSERT(bRet);
547 #endif /* _USE_BUDDY_BLOCKS */
548 }
549
ReInit(void)550 void VMem::ReInit(void)
551 {
552 for(int index = 0; index < m_nHeaps; ++index) {
553 #ifdef _USE_BUDDY_BLOCKS
554 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
555 #else
556 #ifdef USE_BIGBLOCK_ALLOC
557 if (m_heaps[index].bBigBlock) {
558 VirtualFree(m_heaps[index].base, 0, MEM_RELEASE);
559 }
560 else
561 #endif
562 HeapFree(m_hHeap, HEAP_NO_SERIALIZE, m_heaps[index].base);
563 #endif /* _USE_BUDDY_BLOCKS */
564 }
565
566 Init();
567 }
568
Init(void)569 void VMem::Init(void)
570 {
571 #ifdef _USE_BUDDY_BLOCKS
572 PBLOCK pFreeList;
573 /*
574 * Initialize the free list by placing a dummy zero-length block on it.
575 * Set the end of list marker.
576 * Set the number of non-contiguous heaps to zero.
577 * Set the next allocation size.
578 */
579 for (int index = 0; index < nListEntries; ++index) {
580 pFreeList = GetFreeListLink(index);
581 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
582 PREV(pFreeList) = NEXT(pFreeList) = pFreeList;
583 }
584 pFreeList = GetEOLFreeList();
585 SIZE(pFreeList) = PSIZE(pFreeList+minAllocSize) = 0;
586 PREV(pFreeList) = NEXT(pFreeList) = NULL;
587 m_pRover = GetOverSizeFreeList();
588 #else
589 /*
590 * Initialize the free list by placing a dummy zero-length block on it.
591 * Set the number of non-contiguous heaps to zero.
592 */
593 m_pFreeList = m_pRover = (PBLOCK)(&m_FreeDummy[sizeofTag]);
594 PSIZE(m_pFreeList+minAllocSize) = SIZE(m_pFreeList) = 0;
595 PREV(m_pFreeList) = NEXT(m_pFreeList) = m_pFreeList;
596 #endif
597
598 m_nHeaps = 0;
599 m_lAllocSize = lAllocStart;
600 }
601
Malloc(size_t size)602 void* VMem::Malloc(size_t size)
603 {
604 WALKHEAP();
605
606 PBLOCK ptr;
607 size_t lsize, rem;
608 /*
609 * Disallow negative or zero sizes.
610 */
611 size_t realsize = CalcAllocSize(size);
612 if((int)realsize < minAllocSize || size == 0)
613 return NULL;
614
615 #ifdef _USE_BUDDY_BLOCKS
616 /*
617 * Check the free list of small blocks if this is free use it
618 * Otherwise check the rover if it has no blocks then
619 * Scan the free list entries use the first free block
620 * split the block if needed, stop at end of list marker
621 */
622 {
623 int index = CalcEntry(realsize);
624 if (index < nListEntries-1) {
625 ptr = GetFreeListLink(index);
626 lsize = SIZE(ptr);
627 if (lsize >= realsize) {
628 rem = lsize - realsize;
629 if(rem < minAllocSize) {
630 /* Unlink the block from the free list. */
631 Unlink(ptr);
632 }
633 else {
634 /*
635 * split the block
636 * The remainder is big enough to split off into a new block.
637 * Use the end of the block, resize the beginning of the block
638 * no need to change the free list.
639 */
640 SetTags(ptr, rem);
641 ptr += SIZE(ptr);
642 lsize = realsize;
643 }
644 SetTags(ptr, lsize | 1);
645 return ptr;
646 }
647 ptr = m_pRover;
648 lsize = SIZE(ptr);
649 if (lsize >= realsize) {
650 rem = lsize - realsize;
651 if(rem < minAllocSize) {
652 /* Unlink the block from the free list. */
653 Unlink(ptr);
654 }
655 else {
656 /*
657 * split the block
658 * The remainder is big enough to split off into a new block.
659 * Use the end of the block, resize the beginning of the block
660 * no need to change the free list.
661 */
662 SetTags(ptr, rem);
663 ptr += SIZE(ptr);
664 lsize = realsize;
665 }
666 SetTags(ptr, lsize | 1);
667 return ptr;
668 }
669 ptr = GetFreeListLink(index+1);
670 while (NEXT(ptr)) {
671 lsize = SIZE(ptr);
672 if (lsize >= realsize) {
673 size_t rem = lsize - realsize;
674 if(rem < minAllocSize) {
675 /* Unlink the block from the free list. */
676 Unlink(ptr);
677 }
678 else {
679 /*
680 * split the block
681 * The remainder is big enough to split off into a new block.
682 * Use the end of the block, resize the beginning of the block
683 * no need to change the free list.
684 */
685 SetTags(ptr, rem);
686 ptr += SIZE(ptr);
687 lsize = realsize;
688 }
689 SetTags(ptr, lsize | 1);
690 return ptr;
691 }
692 ptr += sizeof(FREE_LIST_ENTRY);
693 }
694 }
695 }
696 #endif
697
698 /*
699 * Start searching the free list at the rover. If we arrive back at rover without
700 * finding anything, allocate some memory from the heap and try again.
701 */
702 ptr = m_pRover; /* start searching at rover */
703 int loops = 2; /* allow two times through the loop */
704 for(;;) {
705 lsize = SIZE(ptr);
706 ASSERT((lsize&1)==0);
707 /* is block big enough? */
708 if(lsize >= realsize) {
709 /* if the remainder is too small, don't bother splitting the block. */
710 rem = lsize - realsize;
711 if(rem < minAllocSize) {
712 if(m_pRover == ptr)
713 m_pRover = NEXT(ptr);
714
715 /* Unlink the block from the free list. */
716 Unlink(ptr);
717 }
718 else {
719 /*
720 * split the block
721 * The remainder is big enough to split off into a new block.
722 * Use the end of the block, resize the beginning of the block
723 * no need to change the free list.
724 */
725 SetTags(ptr, rem);
726 ptr += SIZE(ptr);
727 lsize = realsize;
728 }
729 /* Set the boundary tags to mark it as allocated. */
730 SetTags(ptr, lsize | 1);
731 return ((void *)ptr);
732 }
733
734 /*
735 * This block was unsuitable. If we've gone through this list once already without
736 * finding anything, allocate some new memory from the heap and try again.
737 */
738 ptr = NEXT(ptr);
739 if(ptr == m_pRover) {
740 if(!(loops-- && Getmem(realsize))) {
741 return NULL;
742 }
743 ptr = m_pRover;
744 }
745 }
746 }
747
Realloc(void * block,size_t size)748 void* VMem::Realloc(void* block, size_t size)
749 {
750 WALKHEAP();
751
752 /* if size is zero, free the block. */
753 if(size == 0) {
754 Free(block);
755 return (NULL);
756 }
757
758 /* if block pointer is NULL, do a Malloc(). */
759 if(block == NULL)
760 return Malloc(size);
761
762 /*
763 * Grow or shrink the block in place.
764 * if the block grows then the next block will be used if free
765 */
766 if(Expand(block, size) != NULL)
767 return block;
768
769 size_t realsize = CalcAllocSize(size);
770 if((int)realsize < minAllocSize)
771 return NULL;
772
773 /*
774 * see if the previous block is free, and is it big enough to cover the new size
775 * if merged with the current block.
776 */
777 PBLOCK ptr = (PBLOCK)block;
778 size_t cursize = SIZE(ptr) & ~1;
779 size_t psize = PSIZE(ptr);
780 if((psize&1) == 0 && (psize + cursize) >= realsize) {
781 PBLOCK prev = ptr - psize;
782 if(m_pRover == prev)
783 m_pRover = NEXT(prev);
784
785 /* Unlink the next block from the free list. */
786 Unlink(prev);
787
788 /* Copy contents of old block to new location, make it the current block. */
789 memmove(prev, ptr, cursize);
790 cursize += psize; /* combine sizes */
791 ptr = prev;
792
793 size_t rem = cursize - realsize;
794 if(rem >= minAllocSize) {
795 /*
796 * The remainder is big enough to be a new block. Set boundary
797 * tags for the resized block and the new block.
798 */
799 prev = ptr + realsize;
800 /*
801 * add the new block to the free list.
802 * next block cannot be free
803 */
804 SetTags(prev, rem);
805 #ifdef _USE_BUDDY_BLOCKS
806 AddToFreeList(prev, rem);
807 #else
808 AddToFreeList(prev, m_pFreeList);
809 #endif
810 cursize = realsize;
811 }
812 /* Set the boundary tags to mark it as allocated. */
813 SetTags(ptr, cursize | 1);
814 return ((void *)ptr);
815 }
816
817 /* Allocate a new block, copy the old to the new, and free the old. */
818 if((ptr = (PBLOCK)Malloc(size)) != NULL) {
819 memmove(ptr, block, cursize-blockOverhead);
820 Free(block);
821 }
822 return ((void *)ptr);
823 }
824
Free(void * p)825 void VMem::Free(void* p)
826 {
827 WALKHEAP();
828
829 /* Ignore null pointer. */
830 if(p == NULL)
831 return;
832
833 PBLOCK ptr = (PBLOCK)p;
834
835 /* Check for attempt to free a block that's already free. */
836 size_t size = SIZE(ptr);
837 if((size&1) == 0) {
838 MEMODSlx("Attempt to free previously freed block", (long)p);
839 return;
840 }
841 size &= ~1; /* remove allocated tag */
842
843 /* if previous block is free, add this block to it. */
844 #ifndef _USE_BUDDY_BLOCKS
845 int linked = FALSE;
846 #endif
847 size_t psize = PSIZE(ptr);
848 if((psize&1) == 0) {
849 ptr -= psize; /* point to previous block */
850 size += psize; /* merge the sizes of the two blocks */
851 #ifdef _USE_BUDDY_BLOCKS
852 Unlink(ptr);
853 #else
854 linked = TRUE; /* it's already on the free list */
855 #endif
856 }
857
858 /* if the next physical block is free, merge it with this block. */
859 PBLOCK next = ptr + size; /* point to next physical block */
860 size_t nsize = SIZE(next);
861 if((nsize&1) == 0) {
862 /* block is free move rover if needed */
863 if(m_pRover == next)
864 m_pRover = NEXT(next);
865
866 /* unlink the next block from the free list. */
867 Unlink(next);
868
869 /* merge the sizes of this block and the next block. */
870 size += nsize;
871 }
872
873 /* Set the boundary tags for the block; */
874 SetTags(ptr, size);
875
876 /* Link the block to the head of the free list. */
877 #ifdef _USE_BUDDY_BLOCKS
878 AddToFreeList(ptr, size);
879 #else
880 if(!linked) {
881 AddToFreeList(ptr, m_pFreeList);
882 }
883 #endif
884 }
885
GetLock(void)886 void VMem::GetLock(void)
887 {
888 EnterCriticalSection(&m_cs);
889 }
890
FreeLock(void)891 void VMem::FreeLock(void)
892 {
893 LeaveCriticalSection(&m_cs);
894 }
895
IsLocked(void)896 int VMem::IsLocked(void)
897 {
898 #if 0
899 /* XXX TryEnterCriticalSection() is not available in some versions
900 * of Windows 95. Since this code is not used anywhere yet, we
901 * skirt the issue for now. */
902 BOOL bAccessed = TryEnterCriticalSection(&m_cs);
903 if(bAccessed) {
904 LeaveCriticalSection(&m_cs);
905 }
906 return !bAccessed;
907 #else
908 ASSERT(0); /* alarm bells for when somebody calls this */
909 return 0;
910 #endif
911 }
912
913
Release(void)914 long VMem::Release(void)
915 {
916 long lCount = InterlockedDecrement(&m_lRefCount);
917 if(!lCount)
918 delete this;
919 return lCount;
920 }
921
AddRef(void)922 long VMem::AddRef(void)
923 {
924 long lCount = InterlockedIncrement(&m_lRefCount);
925 return lCount;
926 }
927
928
Getmem(size_t requestSize)929 int VMem::Getmem(size_t requestSize)
930 { /* returns -1 is successful 0 if not */
931 #ifdef USE_BIGBLOCK_ALLOC
932 BOOL bBigBlock;
933 #endif
934 void *ptr;
935
936 /* Round up size to next multiple of 64K. */
937 size_t size = (size_t)ROUND_UP64K(requestSize);
938
939 /*
940 * if the size requested is smaller than our current allocation size
941 * adjust up
942 */
943 if(size < (unsigned long)m_lAllocSize)
944 size = m_lAllocSize;
945
946 /* Update the size to allocate on the next request */
947 if(m_lAllocSize != lAllocMax)
948 m_lAllocSize <<= 2;
949
950 #ifndef _USE_BUDDY_BLOCKS
951 if(m_nHeaps != 0
952 #ifdef USE_BIGBLOCK_ALLOC
953 && !m_heaps[m_nHeaps-1].bBigBlock
954 #endif
955 ) {
956 /* Expand the last allocated heap */
957 ptr = HeapReAlloc(m_hHeap, HEAP_REALLOC_IN_PLACE_ONLY|HEAP_NO_SERIALIZE,
958 m_heaps[m_nHeaps-1].base,
959 m_heaps[m_nHeaps-1].len + size);
960 if(ptr != 0) {
961 HeapAdd(((char*)ptr) + m_heaps[m_nHeaps-1].len, size
962 #ifdef USE_BIGBLOCK_ALLOC
963 , FALSE
964 #endif
965 );
966 return -1;
967 }
968 }
969 #endif /* _USE_BUDDY_BLOCKS */
970
971 /*
972 * if we didn't expand a block to cover the requested size
973 * allocate a new Heap
974 * the size of this block must include the additional dummy tags at either end
975 * the above ROUND_UP64K may not have added any memory to include this.
976 */
977 if(size == requestSize)
978 size = (size_t)ROUND_UP64K(requestSize+(blockOverhead));
979
980 Restart:
981 #ifdef _USE_BUDDY_BLOCKS
982 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
983 #else
984 #ifdef USE_BIGBLOCK_ALLOC
985 bBigBlock = FALSE;
986 if (size >= nMaxHeapAllocSize) {
987 bBigBlock = TRUE;
988 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
989 }
990 else
991 #endif
992 ptr = HeapAlloc(m_hHeap, HEAP_NO_SERIALIZE, size);
993 #endif /* _USE_BUDDY_BLOCKS */
994
995 if (!ptr) {
996 /* try to allocate a smaller chunk */
997 size >>= 1;
998 if(size > requestSize)
999 goto Restart;
1000 }
1001
1002 if(ptr == 0) {
1003 MEMODSlx("HeapAlloc failed on size!!!", size);
1004 return 0;
1005 }
1006
1007 #ifdef _USE_BUDDY_BLOCKS
1008 if (HeapAdd(ptr, size)) {
1009 VirtualFree(ptr, 0, MEM_RELEASE);
1010 return 0;
1011 }
1012 #else
1013 #ifdef USE_BIGBLOCK_ALLOC
1014 if (HeapAdd(ptr, size, bBigBlock)) {
1015 if (bBigBlock) {
1016 VirtualFree(ptr, 0, MEM_RELEASE);
1017 }
1018 }
1019 #else
1020 HeapAdd(ptr, size);
1021 #endif
1022 #endif /* _USE_BUDDY_BLOCKS */
1023 return -1;
1024 }
1025
HeapAdd(void * p,size_t size,BOOL bBigBlock)1026 int VMem::HeapAdd(void* p, size_t size
1027 #ifdef USE_BIGBLOCK_ALLOC
1028 , BOOL bBigBlock
1029 #endif
1030 )
1031 { /* if the block can be successfully added to the heap, returns 0; otherwise -1. */
1032 int index;
1033
1034 /* Check size, then round size down to next long word boundary. */
1035 if(size < minAllocSize)
1036 return -1;
1037
1038 size = (size_t)ROUND_DOWN(size);
1039 PBLOCK ptr = (PBLOCK)p;
1040
1041 #ifdef USE_BIGBLOCK_ALLOC
1042 if (!bBigBlock) {
1043 #endif
1044 /*
1045 * Search for another heap area that's contiguous with the bottom of this new area.
1046 * (It should be extremely unusual to find one that's contiguous with the top).
1047 */
1048 for(index = 0; index < m_nHeaps; ++index) {
1049 if(ptr == m_heaps[index].base + (int)m_heaps[index].len) {
1050 /*
1051 * The new block is contiguous with a previously allocated heap area. Add its
1052 * length to that of the previous heap. Merge it with the dummy end-of-heap
1053 * area marker of the previous heap.
1054 */
1055 m_heaps[index].len += size;
1056 break;
1057 }
1058 }
1059 #ifdef USE_BIGBLOCK_ALLOC
1060 }
1061 else {
1062 index = m_nHeaps;
1063 }
1064 #endif
1065
1066 if(index == m_nHeaps) {
1067 /* The new block is not contiguous, or is BigBlock. Add it to the heap list. */
1068 if(m_nHeaps == maxHeaps) {
1069 return -1; /* too many non-contiguous heaps */
1070 }
1071 m_heaps[m_nHeaps].base = ptr;
1072 m_heaps[m_nHeaps].len = size;
1073 #ifdef USE_BIGBLOCK_ALLOC
1074 m_heaps[m_nHeaps].bBigBlock = bBigBlock;
1075 #endif
1076 m_nHeaps++;
1077
1078 /*
1079 * Reserve the first LONG in the block for the ending boundary tag of a dummy
1080 * block at the start of the heap area.
1081 */
1082 size -= blockOverhead;
1083 ptr += blockOverhead;
1084 PSIZE(ptr) = 1; /* mark the dummy previous block as allocated */
1085 }
1086
1087 /*
1088 * Convert the heap to one large block. Set up its boundary tags, and those of
1089 * marker block after it. The marker block before the heap will already have
1090 * been set up if this heap is not contiguous with the end of another heap.
1091 */
1092 SetTags(ptr, size | 1);
1093 PBLOCK next = ptr + size; /* point to dummy end block */
1094 SIZE(next) = 1; /* mark the dummy end block as allocated */
1095
1096 /*
1097 * Link the block to the start of the free list by calling free().
1098 * This will merge the block with any adjacent free blocks.
1099 */
1100 Free(ptr);
1101 return 0;
1102 }
1103
1104
Expand(void * block,size_t size)1105 void* VMem::Expand(void* block, size_t size)
1106 {
1107 /*
1108 * Disallow negative or zero sizes.
1109 */
1110 size_t realsize = CalcAllocSize(size);
1111 if((int)realsize < minAllocSize || size == 0)
1112 return NULL;
1113
1114 PBLOCK ptr = (PBLOCK)block;
1115
1116 /* if the current size is the same as requested, do nothing. */
1117 size_t cursize = SIZE(ptr) & ~1;
1118 if(cursize == realsize) {
1119 return block;
1120 }
1121
1122 /* if the block is being shrunk, convert the remainder of the block into a new free block. */
1123 if(realsize <= cursize) {
1124 size_t nextsize = cursize - realsize; /* size of new remainder block */
1125 if(nextsize >= minAllocSize) {
1126 /*
1127 * Split the block
1128 * Set boundary tags for the resized block and the new block.
1129 */
1130 SetTags(ptr, realsize | 1);
1131 ptr += realsize;
1132
1133 /*
1134 * add the new block to the free list.
1135 * call Free to merge this block with next block if free
1136 */
1137 SetTags(ptr, nextsize | 1);
1138 Free(ptr);
1139 }
1140
1141 return block;
1142 }
1143
1144 PBLOCK next = ptr + cursize;
1145 size_t nextsize = SIZE(next);
1146
1147 /* Check the next block for consistency.*/
1148 if((nextsize&1) == 0 && (nextsize + cursize) >= realsize) {
1149 /*
1150 * The next block is free and big enough. Add the part that's needed
1151 * to our block, and split the remainder off into a new block.
1152 */
1153 if(m_pRover == next)
1154 m_pRover = NEXT(next);
1155
1156 /* Unlink the next block from the free list. */
1157 Unlink(next);
1158 cursize += nextsize; /* combine sizes */
1159
1160 size_t rem = cursize - realsize; /* size of remainder */
1161 if(rem >= minAllocSize) {
1162 /*
1163 * The remainder is big enough to be a new block.
1164 * Set boundary tags for the resized block and the new block.
1165 */
1166 next = ptr + realsize;
1167 /*
1168 * add the new block to the free list.
1169 * next block cannot be free
1170 */
1171 SetTags(next, rem);
1172 #ifdef _USE_BUDDY_BLOCKS
1173 AddToFreeList(next, rem);
1174 #else
1175 AddToFreeList(next, m_pFreeList);
1176 #endif
1177 cursize = realsize;
1178 }
1179 /* Set the boundary tags to mark it as allocated. */
1180 SetTags(ptr, cursize | 1);
1181 return ((void *)ptr);
1182 }
1183 return NULL;
1184 }
1185
1186 #ifdef _DEBUG_MEM
1187 #define LOG_FILENAME ".\\MemLog.txt"
1188
MemoryUsageMessage(char * str,long x,long y,int c)1189 void VMem::MemoryUsageMessage(char *str, long x, long y, int c)
1190 {
1191 char szBuffer[512];
1192 if(str) {
1193 if(!m_pLog)
1194 m_pLog = fopen(LOG_FILENAME, "w");
1195 sprintf(szBuffer, str, x, y, c);
1196 fputs(szBuffer, m_pLog);
1197 }
1198 else {
1199 if(m_pLog) {
1200 fflush(m_pLog);
1201 fclose(m_pLog);
1202 m_pLog = 0;
1203 }
1204 }
1205 }
1206
WalkHeap(int complete)1207 void VMem::WalkHeap(int complete)
1208 {
1209 if(complete) {
1210 MemoryUsageMessage(NULL, 0, 0, 0);
1211 size_t total = 0;
1212 for(int i = 0; i < m_nHeaps; ++i) {
1213 total += m_heaps[i].len;
1214 }
1215 MemoryUsageMessage("VMem heaps used %d. Total memory %08x\n", m_nHeaps, total, 0);
1216
1217 /* Walk all the heaps - verify structures */
1218 for(int index = 0; index < m_nHeaps; ++index) {
1219 PBLOCK ptr = m_heaps[index].base;
1220 size_t size = m_heaps[index].len;
1221 #ifndef _USE_BUDDY_BLOCKS
1222 #ifdef USE_BIGBLOCK_ALLOC
1223 if (!m_heaps[m_nHeaps].bBigBlock)
1224 #endif
1225 ASSERT(HeapValidate(m_hHeap, HEAP_NO_SERIALIZE, ptr));
1226 #endif
1227
1228 /* set over reserved header block */
1229 size -= blockOverhead;
1230 ptr += blockOverhead;
1231 PBLOCK pLast = ptr + size;
1232 ASSERT(PSIZE(ptr) == 1); /* dummy previous block is allocated */
1233 ASSERT(SIZE(pLast) == 1); /* dummy next block is allocated */
1234 while(ptr < pLast) {
1235 ASSERT(ptr > m_heaps[index].base);
1236 size_t cursize = SIZE(ptr) & ~1;
1237 ASSERT((PSIZE(ptr+cursize) & ~1) == cursize);
1238 MemoryUsageMessage("Memory Block %08x: Size %08x %c\n", (long)ptr, cursize, (SIZE(ptr)&1) ? 'x' : ' ');
1239 if(!(SIZE(ptr)&1)) {
1240 /* this block is on the free list */
1241 PBLOCK tmp = NEXT(ptr);
1242 while(tmp != ptr) {
1243 ASSERT((SIZE(tmp)&1)==0);
1244 if(tmp == m_pFreeList)
1245 break;
1246 ASSERT(NEXT(tmp));
1247 tmp = NEXT(tmp);
1248 }
1249 if(tmp == ptr) {
1250 MemoryUsageMessage("Memory Block %08x: Size %08x free but not in free list\n", (long)ptr, cursize, 0);
1251 }
1252 }
1253 ptr += cursize;
1254 }
1255 }
1256 MemoryUsageMessage(NULL, 0, 0, 0);
1257 }
1258 }
1259 #endif /* _DEBUG_MEM */
1260
1261 #endif /* _USE_MSVCRT_MEM_ALLOC */
1262
1263 #endif /* ___VMEM_H_INC___ */
1264