1 // Emacs style mode select   -*- C++ -*-
2 //-----------------------------------------------------------------------------
3 //
4 // $Id: z_zone.c 1562 2020-11-29 11:51:00Z wesleyjohnson $
5 //
6 // Copyright (C) 1993-1996 by id Software, Inc.
7 // Copyright (C) 1998-2010 by DooM Legacy Team.
8 //
9 // This program is free software; you can redistribute it and/or
10 // modify it under the terms of the GNU General Public License
11 // as published by the Free Software Foundation; either version 2
12 // of the License, or (at your option) any later version.
13 //
14 // This program is distributed in the hope that it will be useful,
15 // but WITHOUT ANY WARRANTY; without even the implied warranty of
16 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 // GNU General Public License for more details.
18 //
19 //
20 // $Log: z_zone.c,v $
21 // Revision 1.17  2002/07/29 21:52:25  hurdler
22 // Someone want to have a look at this bugs
23 //
24 // Revision 1.16  2001/06/30 15:06:01  bpereira
25 // fixed wronf next level name in intermission
26 //
27 // Revision 1.15  2001/03/13 22:14:20  stroggonmeth
28 // Long time no commit. 3D floors, FraggleScript, portals, ect.
29 //
30 // Revision 1.14  2001/01/25 22:15:44  bpereira
31 // added heretic support
32 //
33 // Revision 1.13  2000/11/06 20:52:16  bpereira
34 //
35 // Revision 1.12  2000/11/03 13:15:13  hurdler
36 // Some debug comments, please verify this and change what is needed!
37 //
38 // Revision 1.11  2000/11/02 17:50:10  stroggonmeth
39 // Big 3Dfloors & FraggleScript commit!!
40 //
41 // Revision 1.10  2000/10/14 18:33:34  hurdler
42 // Revision 1.9  2000/10/14 18:32:16  hurdler
43 //
44 // Revision 1.8  2000/10/04 16:33:54  hurdler
45 // Implement hardware texture memory stats
46 //
47 // Revision 1.7  2000/10/02 18:25:45  bpereira
48 // Revision 1.6  2000/08/31 14:30:56  bpereira
49 // Revision 1.5  2000/07/01 09:23:49  bpereira
50 // Revision 1.4  2000/04/30 10:30:10  bpereira
51 // Revision 1.3  2000/04/24 20:24:38  bpereira
52 // Revision 1.2  2000/02/27 00:42:11  hurdler
53 // Revision 1.1.1.1  2000/02/22 20:32:32  hurdler
54 // Initial import into CVS (v1.29 pr3)
55 //
56 //
57 // DESCRIPTION:
58 //      Zone Memory Allocation. Neat.
59 //
60 //-----------------------------------------------------------------------------
61 
62 
63 #include "doomincl.h"
64 #include "doomstat.h"
65 #include "z_zone.h"
66 #include "i_system.h"
67 #include "command.h"
68 #include "m_argv.h"
69 #include "i_video.h"
70 #ifdef HWRENDER
71 #include "hardware/hw_drv.h"
72    // for hardware memory stats
73 #endif
74 
75 // [WDJ] Include some pad memory, and check it for memory block overrun.
76 //#define PADMEM  1024
77 
78 // [WDJ] 1/24/2009 Memory control defines.
79 // Amount of main memory to allocate, in MB
80 // Original values
81 //#define NORM_MAIN_MEM_MB		6
82 //#define MAX_MAIN_MEM_MB		20
83 
84 // Larger values, FreeDoom ran out of space
85 // FreeDoom MAP10 uses 23MB, with fragmentation requires 30MB,
86 // but will run in 10MB when forced.
87 #define MIN_MAIN_MEM_MB		 8
88 #define NORM_MAIN_MEM_MB	24
89 #define MAX_MAIN_MEM_MB		80
90 #define GROW_MIN_MAIN_MEM_MB	 8
91 #define GROW_MAIN_MEM_MB	 8
92 
93 // Choose one (and only one) memory system.
94 // [WDJ] Because of the widely varying systems that Legacy can run on, it is
95 // desirable to maintain all these variations, to give each user one that fits
96 // well with their system and preferences.
97 
98 // [WDJ] Plain allocation from malloc works for systems with large memory.
99 // Gives a reference performance, can be used to differentiate tagging problems.
100 // When the user writes out-of-bounds of malloced region it will do a sigsegv.
101 // It does not use the tags, cannot recover PU_CACHE, PU_LEVEL, etc. memory.
102 // Uses the most memory of all choices.
103 //#define PLAIN_MALLOC
104 
105 // [WDJ] Combination of malloc and tags.
106 // Does malloc from heap, so will grow from heap as use increases.
107 // Maintains zone block lists and will free blocks according to tags.
108 //#define TAGGED_MALLOC
109 
110 // ZONE_ZALLOC, the default based on the original zone memory.
111 // It allocates a huge block at program start and makes all Z_Malloc
112 // allocations from it.
113 // Has a command line option to set the zone block size ( -mb <int> ).
114 // It also has some conditional experimental code.
115 #define ZONE_ZALLOC
116 
117 // Grows the initial allocation block when it runs out of memory.
118 // Runs in the smallest memory of all the choices.
119 // Uses tags, and recovers PU_CACHE and PU_LEVEL memory first.
120 // Applied as an option to ZONE_ZALLOC.
121 #define GROW_ZONE
122 
123 // Aggressively purges any PU_CACHE, clearing cache faster.
124 // This stresses the memory system more, testing user code to not
125 // depend upon PU_CACHE that can disappear. Used for testing memory code.
126 // Applied as an option to ZONE_ZALLOC.
127 //#define AGGRESSIVE_PURGE
128 
129 // [WDJ] 1/22/2009 For some crazy reason this code said that ->user==NULL
130 // is an free block, while almost every call to Z_ALLOC has user=NULL.
131 // Fixed to use tag=PU_FREE instead.
132 
133 
134 // default, and prevention of crossed definitions
135 #if ! (defined( PLAIN_MALLOC ) || defined( TAGGED_MALLOC ) )
136 #ifndef ZONE_ZALLOC
137 #define ZONE_ZALLOC
138 #endif
139 #endif
140 
141 #ifdef PLAIN_MALLOC
142   #undef ZONE_ZALLOC
143   #undef GROW_ZONE
144   #define USE_MALLOC
145 #endif
146 
147 #ifdef TAGGED_MALLOC
148   #undef ZONE_ZALLOC
149   #undef PLAIN_MALLOC
150   #undef GROW_ZONE
151   #define USE_MALLOC
152 #endif
153 
154 
155 // =========================================================================
156 //                        ZONE MEMORY ALLOCATION
157 // =========================================================================
158 //
159 // There is never any space between memblocks,
160 //  and there will never be two contiguous free memblocks.
161 // The rover can be left pointing at a non-empty block.
162 //
163 // It is of little value to free a cachable block (for ZONE memory),
164 //  because it will get overwritten automatically if needed.
165 //
166 
167 typedef struct
168 {
169     // start / end cap for linked list
170     // prevents free block combines around end of blocklist
171     memblock_t  blocklist;
172 
173     // total bytes malloced, including header
174     uint32_t	size;
175 
176     memblock_t* rover;
177 
178 } memzone_t;
179 
180 static memzone_t* mainzone;
181 
182 #ifdef USE_MALLOC
183 memzone_t	memhead;	// statically allocated mainzone
184 #endif
185 
186 #define ZONEID  0x1d4a11
187 #define INVALIDID 0x22eebb00
188 
189 void Command_MemInfo_f( void );
190 
191 
192 #if defined(TAGGED_MALLOC) || defined(ZONE_ZALLOC)
193 // Used to insert a new block between prv and nxt.
194 // ZONE_ZALLOC: Caller must make sure the blocks are logically contiguous.
Z_LinkBlock(memblock_t * block,memblock_t * prv,memblock_t * nxt)195 static void Z_LinkBlock(memblock_t *block, memblock_t *prv, memblock_t *nxt)
196 {
197   block->prev = prv;
198   block->next = nxt;
199   nxt->prev = prv->next = block;
200 }
201 #endif
202 
203 #ifdef USE_MALLOC
204 // PLAIN_MALLOC returns blocks to the heap.
205 // TAGGED_MALLOC unlinks blocks and returns them to the heap.
Z_UnlinkFree(memblock_t * block)206 static void Z_UnlinkFree(memblock_t *block)
207 {
208 #ifdef TAGGED_MALLOC
209   register memblock_t * prv = block->prev;
210   register memblock_t * nxt = block->next;
211 
212   block->prev->next = nxt;
213   block->next->prev = prv;
214 #endif
215   memhead.size -= block->size;
216   free(block);
217 }
218 #endif
219 
220 #ifdef ZONE_ZALLOC
221 // Zone marks blocks as free and combines them into large free blocks.
222 
223 // mark the block free
Z_MarkFree(memblock_t * block)224 static void Z_MarkFree(memblock_t *block)
225 {
226   block->memtag = PU_FREE; // free block mark
227 //  block->id = 0;
228   block->id = ZONEID;   // on free blocks too, as check
229   block->user = NULL;
230 }
231 
232 // mark a block free, try to combine it with neighboring free blocks
Z_CombineFreeBlock(memblock_t * block)233 static void Z_CombineFreeBlock(memblock_t *block)
234 {
235 
236   Z_MarkFree( block );  // mark as a free block
237 
238   // see if previous block is free, if so, merge blocks
239   memblock_t *other = block->prev;
240   if (other->memtag == PU_FREE)
241   {
242       other->size += block->size;
243       other->next = block->next;
244       other->next->prev = other;
245 
246 #ifdef PARANOIA
247       block->id = INVALIDID+1;  // merged block id
248 #else
249       block->id = 0;		// does not exist as a block
250 #endif
251       block->memtag = PU_INVALID;
252       if (block == mainzone->rover)	// rover block is gone, fix rover
253 	mainzone->rover = other;
254 
255       block = other;
256   }
257 
258   // see if next block is free, if so, merge blocks
259   other = block->next;
260   if (other->memtag == PU_FREE)
261   {
262       block->size += other->size;
263       block->next = other->next;
264       block->next->prev = block;
265 
266 #ifdef PARANOIA
267       other->id = INVALIDID+2;  // merged block id
268 #else
269       other->id = 0;		// does not exist as a block
270 #endif
271       other->memtag = PU_INVALID;
272       if (other == mainzone->rover)	// rover block is gone, fix rover
273 	mainzone->rover = block;
274   }
275 }
276 
277 
278 static unsigned int mb_used = NORM_MAIN_MEM_MB;
279 
280 //
281 // Z_ZoneInit
282 //
283 // Init zone without consideration for existing allocations.
Z_ZoneInit(int mb_zonesize)284 void Z_ZoneInit ( int mb_zonesize )
285 {
286     memblock_t*         block;
287     int zonesize = mb_zonesize<<20;
288 
289     // make the allocation of the huge memory block
290     mainzone = (memzone_t *) malloc(zonesize);
291     if( !mainzone ) {
292          I_Error("Could not allocate %d MiB.\n"
293                  "Please use -mb parameter and specify a lower value,\n"
294 		 "use a smaller video size, and/or a smaller wad.",
295 		 mb_zonesize);
296     }
297 
298     mb_used = mb_zonesize;	// save for some stats later
299 
300     // touch memory to stop swaping
301 //    memset(mainzone, 0, zonesize);
302     // [WDJ] This did not stop swapping, just made pages dirty so they must be swapped out.
303     // If user specifies large -mb value, then it would dirty a large unused memory area.
304     memset(mainzone, 0, min(zonesize, 16182));  // [WDJ] dirty only the first 16K
305 
306 //    if( M_checkParm("-lock") )
307 //        I_LockMemory(mainzone);
308 
309     // setup the zone
310     mainzone->size = zonesize;
311     // setup the block head as protected
312     Z_MarkFree( &mainzone->blocklist );	// Init as free
313     mainzone->blocklist.memtag = PU_ZONE; // protect head against free combine
314 
315     // set the entire zone to one free block
316     block = (memblock_t *)( (byte *)mainzone + sizeof(memzone_t) );
317     block->size = zonesize - sizeof(memzone_t);
318     Z_MarkFree( block );  // init free block
319 
320     // init circular linking
321     Z_LinkBlock(block, &mainzone->blocklist, &mainzone->blocklist);
322 
323     mainzone->rover = block;
324 }
325 
326 #ifdef GROW_ZONE
327 // Grow the ZONE_ZALLOC mainzone allocation
328 // Return first block of new free zone
Z_GrowZone(int reqsize,int std_grow)329 memblock_t*  Z_GrowZone( int reqsize, int std_grow )
330 {
331     // Grow the zone memory
332     int  min_rsize = reqsize + (16*sizeof(memblock_t));
333    	// reqsize + block header + zone header + extra
334     int  grow_size = max( std_grow, max( min_rsize, 2048 ) );
335     memblock_t * addzone = (memblock_t *) malloc(grow_size);
336     if( addzone ) {
337         // Create a guard to prevent combining with blocks outside
338         // this zone; the allocations are not contiguous
339         memblock_t * freezone = addzone + 1;	// next memblock
340         // first memblock as static guard
341         Z_MarkFree( addzone );
342         addzone->memtag = PU_ZONE;	// untouchable guard
343         addzone->size = sizeof(memblock_t);
344         // second memblock has remaining space as free block
345         Z_MarkFree( freezone );
346         freezone->size = grow_size - sizeof(memblock_t);
347         // link as last block in existing zone
348         // then will be part of normal search without extra coding
349         Z_LinkBlock( addzone, mainzone->blocklist.prev, &mainzone->blocklist );
350         Z_LinkBlock( freezone, addzone, &mainzone->blocklist );
351 
352         mainzone->size += grow_size;
353         mb_used = mainzone->size >> 20;	// to MiB
354 
355         GenPrintf(EMSG_info, "Z_Malloc: Grow by %d KiB, total %d MiB\n",
356 		grow_size>>10, mb_used);
357         return freezone;
358     }else{
359        return NULL;
360     }
361 }
362 #endif
363 #endif	// ZONE_ZALLOC
364 
365 
366 //
367 // Z_Init
368 //
Z_Init(void)369 void Z_Init (void)
370 {
371 #ifdef USE_MALLOC
372     // does not need to pre-allocate memory
373     mainzone = & memhead;	// static
374     // setup the linked list, rest of mainzone is unused
375     memhead.rover = NULL;
376     memhead.size = 0;
377     memhead.blocklist.memtag = PU_ZONE; // mark head, no combines anyway
378 #ifdef TAGGED_MALLOC
379     // init circular linking, TAGGED needs the lists
380     memhead.blocklist.next = memhead.blocklist.prev = &memhead.blocklist;
381 #else
382     // no lists in PLAIN
383     memhead.blocklist.next = memhead.blocklist.prev = NULL;
384 #endif
385 #else
386 // ZONE_ZALLOC
387     int mb_wanted = 10;
388 
389     if( M_CheckParm ("-mb") )
390     {
391         if( M_IsNextParm() )
392             mb_wanted = atoi (M_GetNextParm());
393         else
394             I_Error("usage : -mb <number of mebibytes for the heap>");
395     }
396     else
397     {
398         int total_mb, freemem_mb;
399         uint64_t  total;
400         freemem_mb = I_GetFreeMem(&total)>>20;
401         total_mb = total >> 20;	// MiB
402 // GenPrintf( EMSG_info, "Total mem: %ld .. ", total );
403         // freemem_mb==0, means that it is unavailable.
404         if( freemem_mb )
405         {
406 	    // [WDJ] total_mb, freemem_mb must be int, otherwise on 32 bit Linux
407 	    // print will report "free 0", and probably other errors occur too.
408 	    GenPrintf( EMSG_info, "System memory %d MiB, free %d MiB\n", total_mb, freemem_mb);
409 	}
410         else
411         {
412 	    if( (total & 0x0F) != 0x01 )  // not guessing
413 	        GenPrintf( EMSG_info, "System memory %d MiB\n", total_mb);
414 	    freemem_mb = total_mb >> 3;  // guess at free
415 	}
416         // [WDJ] We assume that the system uses memory for disk cache.
417         // Can ask for more than freemem and get it from disk cache.
418 	// MEM consts are now defined above.
419 #ifdef GROW_ZONE
420         // initial zone memory, will grow when needed
421         mb_wanted = min(min( GROW_MIN_MAIN_MEM_MB, freemem_mb ), MAX_MAIN_MEM_MB);
422         if( mb_wanted < 2 )   mb_wanted = 2;
423 #else
424         // zone memory, all at once, only get one try
425         mb_wanted = min( NORM_MAIN_MEM_MB, (total/2) );  // reasonable limit
426         if( freemem_mb < NORM_MAIN_MEM_MB )
427             freemem_mb = (freemem_mb + total)/2;	// ask for compromise
428         mb_wanted = min(max(freemem_mb, mb_wanted), MAX_MAIN_MEM_MB);
429         if( mb_wanted < MIN_MAIN_MEM_MB )
430 	    mb_wanted = MIN_MAIN_MEM_MB;
431 #endif
432     }
433     // [WDJ] mem limited to 2047 MB by 32bit int
434     if( mb_wanted > 2047 )   mb_wanted = 2047;	// [WDJ]
435     GenPrintf( EMSG_info, "%d MiB requested for Z_Init.\n", mb_wanted);
436     Z_ZoneInit( mb_wanted );
437 #endif
438 
439     // calls Z_Malloc, so must be last
440     COM_AddCommand("meminfo", Command_MemInfo_f, CC_info);
441 }
442 
443 
444 //
445 // Z_Free
446 //
447 #ifdef DEBUG_ZONE
Z_Free2(void * ptr,char * file,int line)448 void Z_Free2(void* ptr, char *file, int line)
449 #else
450 void Z_Free (void* ptr)
451 #endif
452 {
453     memblock_t*  block = (memblock_t *) ( (byte *)ptr - sizeof(memblock_t));
454 
455 #ifdef DEBUG_ZONE
456 #ifndef PLAIN_MALLOC
457    memblock_t*         other;
458    // SoM: HARDERCORE debuging
459    // Write all Z_Free's to a debug file
460 #ifdef DEBUGFILE
461    if(debugfile)
462      fprintf(debugfile, "ZFREE@File: %s, line: %i\n", file, line);
463 #endif
464    //BP: hardcore debuging
465    // check if there is not a user in this zone
466 for (other = mainzone->blocklist.next ; other->next != &mainzone->blocklist; other = other->next)
467 {
468    if((other!=block) &&
469       (other->memtag > PU_INVALID) &&
470       other->user &&
471       (other->user >= (void **)block) &&
472       (other->user < (void **)(((byte *)block) + block->size)))
473    {
474        //I_Error("Z_Free: Pointer in zone\n");
475        I_Error("Z_Free: Pointer %s:%d in zone at %s:%i",other->ownerfile,other->ownerline,file,line);
476    }
477 }
478 #endif
479 #endif
480 
481     if (block->id != ZONEID)
482         I_Error ("Z_Free: memory block has corrupt ZONEID: %x", block->id);
483     if (block->memtag == PU_FREE)  return;	// already freed
484 #ifdef PARANOIA
485     // get direct a segv when using a pointer that isn't right
486     memset(ptr,0,block->size-sizeof(memblock_t));
487 #endif
488 
489     if (block->user)
490     {
491         // clear the user's owner ptr (they no longer have access)
492         *block->user = NULL;
493     }
494 
495 #ifdef USE_MALLOC
496     Z_UnlinkFree(block);
497 #else
498 // ZONE_ZALLOC
499     Z_CombineFreeBlock( block );
500 #endif
501 }
502 
503 
504 #if defined(TAGGED_MALLOC) || defined(ZONE_ZALLOC)
505 // Purge a block, conditionally
Z_Purge(memblock_t * block)506 void Z_Purge( memblock_t* block )
507 {
508     if( block->memtag >= PU_PURGELEVEL ) {
509         // purge the block
510         Z_Free((byte *)block + sizeof(memblock_t));
511     }
512 }
513 #endif
514 
515 
516 #ifdef USE_MALLOC
517 //
518 // Z_Malloc
519 // You can pass a NULL user if the tag is < PU_PURGELEVEL.
520 //
521 #ifdef DEBUG_ZONE
Z_Malloc2(int reqsize,memtag_e tag,void ** user,int alignbits,char * file,int line)522 void*   Z_Malloc2 (int reqsize, memtag_e tag, void **user, int alignbits,
523 		   char *file, int line)
524 #else
525 void* Z_MallocAlign (int reqsize, memtag_e tag, void **user, int alignbits )
526 #endif
527 {
528     memblock_t* newblock;
529     int   memalloc_size;	// with the memalloc header
530 
531     if( tag == PU_FREE ){
532        GenPrintf(EMSG_warn,"Z_ALLOC called with PU_FREE tag, conflict with FREE BLOCK\n");
533        tag = PU_LEVEL;      // choose safe interpretation
534        // tag = PU_DAVE;	// if must debug
535     }
536 
537     if( tag == PU_CACHE_DEFAULT )   tag = PU_CACHE;
538 
539     reqsize = (reqsize + 3) & ~3;	// alloc rounded up to next 4 byte alignment
540     // account for size of block header
541     memalloc_size = reqsize + sizeof(memblock_t);
542 #if defined( DEBUG_ZONE ) && defined( PADMEM )
543     memalloc_size += PADMEM;
544 #endif
545 
546     newblock = malloc(memalloc_size);
547     if( newblock == NULL ){
548        I_Error ("Z_Malloc: malloc failed on allocation of %i bytes\n");
549     }
550     memhead.size += memalloc_size;
551 #ifdef TAGGED_MALLOC
552     // link at head of list
553     Z_LinkBlock( newblock, &memhead.blocklist, memhead.blocklist.next);
554     // convert tags that are not used to ones that are handled
555     if( tag >= PU_PURGELEVEL )   tag = PU_LEVEL;
556 #endif
557     newblock->memtag = tag;
558     newblock->id = ZONEID;
559     newblock->user = user;
560     newblock->size = memalloc_size;
561     void* basedata = (byte*)newblock + sizeof(memblock_t);
562     if (user) *user = basedata;
563 #if defined( DEBUG_ZONE ) && defined( PADMEM )
564     memset( &((byte*)basedata)[reqsize], 0, PADMEM );
565 #endif
566     return basedata;
567 }
568 
569 #else
570 // ZONE_ZALLOC
571 
572 //
573 // Z_Malloc
574 // You can pass a NULL user if the tag is < PU_PURGELEVEL.
575 //
576 
577 // [WDJ]  1/22/2009  PU_CACHE usage.
578 // PU_CACHE is being used with other Z_Malloc calls, and allocated memory
579 // is being purged before the user is done.  Using PU_CACHE on newly
580 // allocated blocks only works if Z_ALLOC takes a long time to cycle back
581 // around to them.  But when memory gets tight and it gets back to them
582 // quicker, then we get mysterious failures.  Because a lump may already
583 // be in memory, as a PU_CACHE tagged allocation, there is no assurance that
584 // a call to Z_Malloc will not free the PU_CACHE memory being used.
585 // Use PU_IN_USE to protect the allocation, then change the tag to PU_CACHE.
586 
587 #define MINFRAGMENT             sizeof(memblock_t)
588 
589 // [WDJ] 1/22/2009  MODIFIED ZONE_ZALLOC
590 // This also has experimental code blocks, which are currently disabled.
591 
592 #ifdef DEBUG_ZONE
Z_Malloc2(int reqsize,memtag_e tag,void ** user,int alignbits,char * file,int line)593 void*   Z_Malloc2 (int reqsize, memtag_e tag, void **user, int alignbits,
594 		   char *file, int line)
595 #else
596 void* Z_MallocAlign (int reqsize, memtag_e tag, void **user, int alignbits )
597 #endif
598 {
599     memblock_t* start;	// marks start of search
600     memblock_t* rover;	// walks through block list
601     memblock_t* base = NULL;	// [WDJ] points to usable memory, or is NULL
602     byte *basedata;
603 
604     int   memalloc_size;	// with the memalloc header
605     int   basesize = 0;	// accumulate blocks
606     int	  tries = 0;	// [WDJ] Try multiple passes before bombing out
607       // [WDJ] TODO: could compact memory after first try
608       // 1. Call owners of memory to reallocate, and clean up ptrs.
609       // 2. Let tag give permission to move blocks and update user ptr.
610     memtag_e  current_purgelevel = PU_CACHE;    // partial purging
611 
612     // From stdint, uintptr is an int the size of a ptr.
613     uintptr_t alignmask = (1 << alignbits) - 1;
614 #define ALIGN(p) (byte *)(((uintptr_t)(p) + alignmask) & ~alignmask)
615 
616 
617 // ZONE_ZALLOC
618     if( tag == PU_FREE ){
619        GenPrintf(EMSG_warn,"Z_ALLOC called with PU_FREE tag, conflict with FREE BLOCK\n");
620        tag = PU_LEVEL;      // choose safe interpretation
621        // tag = PU_DAVE;	// if must debug
622     }
623 
624     if( tag == PU_CACHE_DEFAULT )   tag = PU_CACHE;
625 
626     reqsize = (reqsize + 3) & ~3;	// alloc rounded up to next 4 byte alignment
627     // account for size of block header
628     memalloc_size = reqsize + sizeof(memblock_t);
629 #if defined( DEBUG_ZONE ) && defined( PADMEM )
630     memalloc_size += PADMEM;
631 #endif
632 
633     // scan through the block list,
634     // looking for the first free block of sufficient size,
635 
636     rover = mainzone->rover;
637 
638     // if there is a free block preceding base (there can be at most one), move one block back
639     if ( rover->prev->memtag == PU_FREE )  // move back if prev is free
640         rover = rover->prev;
641 
642     start = rover->prev;  // for test when have searched entire zone
643 
644     // Check on user code corrupting the memory block
645 //    if (rover->id && rover->id != ZONEID) //Hurdler: this shouldn't happen
646     if (rover->id != ZONEID) //Hurdler: this shouldn't happen
647     {
648         // [WDJ] 11/18/2009 Still get this error on some wads.  There must be
649         // some unchecked use of memory that writes past end of allocation.
650 	// FIXME: Find the source of this error !!
651         GenPrintf(EMSG_error,"WARNING: Legacy may crash soon. This is a known bug, sorry.\n");
652         GenPrintf(EMSG_error,"Memory corruption has been detected\n");
653         GenPrintf(EMSG_error,"Known to happen when node-builder is not run after editing level.\n");
654         GenPrintf(EMSG_error,"  corrupt ZONEID= %x\n", rover->id );
655     }
656 
657     for(;;)	// Search zone memory
658     {
659 #ifdef PARANOIA
660         if (rover->id != ZONEID)
661 	    GenPrintf(EMSG_error, "  corrupt ZONEID= %x\n", rover->id );
662 #endif
663         if (rover == start)
664         {
665             // scanned all the way around the list
666             //faB: debug to see if problems of memory fragmentation..
667             Command_MemInfo_f();
668 
669 	    tries ++;
670 	    if( tries < 4 ) {
671 	       current_purgelevel = PU_PURGELEVEL; // enable all purging
672 #ifdef GROW_ZONE
673 	       // Grow the zone allocation, and alloc from new memory
674 	       rover = Z_GrowZone( reqsize, GROW_MAIN_MEM_MB<<20 );
675 	       if( rover == NULL ) {
676 		   // new allocation failed, try something desperate
677 		   rover = Z_GrowZone( reqsize, 0);
678 		   if( rover == NULL ) {
679 		       GenPrintf(EMSG_info,"Z_Malloc: Retry %i on allocation of %i bytes\n",
680 			       tries, memalloc_size );
681 		       rover = start;
682 		   }
683 	       }
684   	       base = NULL;
685 #else
686 	       GenPrintf(EMSG_info,"Z_Malloc: Retry %i on allocation of %i bytes\n",
687 		       tries, memalloc_size );
688 #endif
689 	    }else{
690 	       I_Error ("Z_Malloc: failed on allocation of %i bytes\n"
691                      "Try to increase heap size using -mb parameter (actual heap size : %d Mb)\n", memalloc_size, mb_used);
692 	    }
693         }
694 
695         if (rover->memtag != PU_FREE)		// being used
696         {
697             if (rover->memtag < current_purgelevel)  // < PU_CACHE or PU_PURGELEVEL
698             {
699                 // hit a block that can't be purged, so move past it
700 
701                 //Hurdler: FIXME: this is where the crashing problem seem to come from
702 	        // [WDJ] 1/20/2009 Found other bugs that it probably interacted with.
703                 rover = rover->next;
704 	        base = NULL;	// cancel current consideration
705 	        basesize = 0;
706 	        continue;
707             }
708 	    // Purgable block
709 #ifdef AGGRESSIVE_PURGE
710             else
711             {
712                 // free the rover block (adding the size to base)
713 
714                 // the base cannot be rover, but could be rover->prev
715 	        memblock_t* roverprev = rover->prev;	// back away from block that can disappear
716 	        int roverprev_size = roverprev->size;	// size change test
717                 Z_Purge( rover );
718 	          // freed memory can be put in prev, or same block
719 	        // roverprev is unknown
720 	        rover = roverprev;
721 	        if( roverprev->size != roverprev_size ) {   // size changed
722 		    // roverprev was PU_FREE, and freed block was added to it
723 		    // old rover block is gone
724 		    // must redo roverprev because size changed
725 	            if( base ) {
726 		        // changed free block changes basesize
727 		        basesize -= roverprev_size;	// old size removed
728 		        // continue and let normal code add in new size
729 		    }
730 		    // add rover (roverprev) and test the existing base size
731 		}else{
732 		    // old rover block is still there, but now free
733 		    rover = roverprev->next;
734 		    // can now be considered same as existing free block
735 		}
736             }
737 #endif
738 	}
739         // rover is free or purgable
740         // accumulate it as part of considered base
741         if( ! base ) {
742 	   base = rover;	// start new base
743 	   basesize = 0;
744 	}
745         basesize += rover->size;       // accmulate the size
746         rover = rover->next;
747 
748         // base is available, so test it against size
749 
750 	// trial data alignment
751         basedata = ALIGN((byte *)base + sizeof(memblock_t));
752 	//Hurdler: huh? it crashed on my system :( (with 777.wad and 777.deh, only software mode)
753 	//         same problem with MR.ROCKET's wad -> it's probably not a problem with the wad !?
754 	//         this is because base doesn't point to something valid (and it's not NULL)
755 	// Check addr of end of blocks for fit
756 	// 	if( ((ULONG)base)+base->size >= basedata+memalloc_size-sizeof(memblock_t) ) break;
757 	if( (byte *)base + basesize >= basedata + reqsize ) {
758 	   // fits
759 #if 0
760 	   // [WDJ] Attempt at better allocation, does not have any effect
761 	   if( tries == 0 ) {
762 	      // Try harder to not fragment memory
763               extra = ((byte *)base + basesize) - (basedata + reqsize);
764 	      if( (extra > 32) && (extra < memalloc_size) ) continue;
765 	   }
766 #endif
767 	   // [WDJ] 1/20/2009 * CRITICAL ERROR FIX *
768 	   if( alignbits ) {  // consider alignment problems
769 	      // [WDJ] More complete checking to avoid misalign problems later.
770 	      // If misalignment is not large enough for a MINFRAGMENT, then
771 	      // cannot always find a place to put it, and it will cause errors.
772 	      // Eliminate problem by selecting a different block.
773 	      int misalign = (basedata - sizeof(memblock_t)) - (byte*)base;	// extra before
774 	      if( misalign <= MINFRAGMENT ) {   // have a problem
775 		 // require room for MINFRAGMENT to hold misalign memory.
776 		 // with at least 1 byte in the fragment, to avoid a strange case
777 		 basedata = ALIGN( (byte *)base + sizeof(memblock_t) + MINFRAGMENT + 1 );
778 		 if( (byte *)base + basesize >= basedata + reqsize )  break;  // OK
779 		 continue;	// too small for misalign, try another block
780 	      }
781 	   }
782 	   break;	// found block, break out of search loop
783 	}
784     }
785 
786     // Free the purgable blocks under consideration, combining them into
787     // one free block.
788     // [WDJ] Fragile code, do not modify unless you are willing to spend
789     // a few days testing for failures, after 3 level changes in certain wads,
790     // with AGGRESSIVE_PURGE on and off, with GROW_ZONE on and off.
791     Z_Purge( base );  // free the base
792     if( base->prev->next != base ) {
793         // base ought to be the first in any group of purgable, so this should not happen
794         I_Error("Z_MALLOC: internal error, purged base disappeared");
795     }
796     // rover can combine with a free during the loop, or it could be
797     // zone head (low address), so it is not reliable to test against rover.
798     while( base->size < basesize )
799     {
800         // stop at rover, end of tested blocks
801         if( base->next == rover )   break;
802         if( base->next->memtag == PU_ZONE )   break;  // failed, break tight loop
803         // free the next block, combining it with base
804 	Z_Purge( base->next );
805     }
806     if( base->size < basesize || base->id != ZONEID ) {
807         // Internal error with purging
808         GenPrintf(EMSG_error,"Z_MALLOC: request= %i, alloc size= %i, aligned= %i\n", reqsize, memalloc_size, alignbits );
809         GenPrintf(EMSG_error,"  got size= %i, accum size= %i\n", base->size, basesize );
810         if( base->next == rover )  GenPrintf(EMSG_error, "  Hit rover\n");
811         if( base->next->memtag == PU_ZONE )  GenPrintf(EMSG_error, "  Hit PU_ZONE\n");
812         if( base->id != ZONEID )  GenPrintf(EMSG_error, "  corrupt ZONEID= %x\n", base->id );
813         I_Error("Z_MALLOC: internal error, combined blocks less than request size");
814     }
815 
816     // aligning can leave free space in current block so make it really free
817     if( alignbits )
818     {
819         // The new, aligned, block.
820 	// Sub 1 from memblock ptr is same as sub of header size.
821         memblock_t *newbase = (memblock_t *)basedata - 1;
822         int misalign = (byte*)newbase - (byte*)base;	// extra before
823 
824 	// [WDJ] 1/20/2009 loop ensures misalign is 0, or >= MINFRAGMENT.
825         if( misalign > MINFRAGMENT )
826         {
827 	    // MINFRAGMENT >= sizeof( memblock_t )
828 	    // so base header does not overlap newbase header
829 	    // Link in newbase after base, and change base size.
830 	    Z_LinkBlock( newbase, base, base->next );
831 
832             newbase->size = base->size - misalign;
833             base->size = misalign;
834         }
835         else
836         {
837 	    GenPrintf(EMSG_error,"Z_ALLOC: internal error, misalign < MINFRAGMENT\n" );
838         }
839         base = newbase;	// aligned
840     }
841 
842     // commit to using the free block
843 
844     // tag marks block as in use
845     base->memtag = tag;	// must be done before Z_CombineFreeBlock
846     base->id = ZONEID;
847     base->user = user;	// valid or NULL
848 #ifdef DEBUG_ZONE
849     base->ownerfile = file;
850     base->ownerline = line;
851 #endif
852     // how much too big
853     int extra = base->size - memalloc_size;
854     if (extra > MINFRAGMENT)
855     {
856         // there will be a free fragment after the allocated block
857         memblock_t * exblock = (memblock_t *) ((byte *)base + memalloc_size );
858         exblock->size = extra;
859         base->size = memalloc_size;
860 
861         Z_LinkBlock(exblock, base, base->next);
862         // non-free tag on base prevents it from combining with exblock!
863         Z_CombineFreeBlock(exblock);
864     }
865 
866     // pointer to the data area after header; aligned has base already aligned
867     void *blockdata = (byte *)base + sizeof(memblock_t);
868 
869     if (user)
870     {
871         // setup user owner ptr
872         *user = blockdata;
873     }
874     else
875     {
876 	// [WDJ] Most of the calls have user==NULL !!
877         if (tag >= PU_PURGELEVEL) {
878             I_SoftError ("Z_Malloc: an owner is required for purgable blocks");
879 	    tag = PU_LEVEL;	// does not require user
880 	}
881     }
882 
883     // next allocation will start looking here
884     mainzone->rover = base->next;
885 #if defined( DEBUG_ZONE ) && defined( PADMEM )
886     memset( &((byte*)blockdata)[reqsize], 0, PADMEM );
887 #endif
888 
889     return blockdata;
890 }
891 #endif
892 
893 
894 
895 //
896 // Z_FreeTags
897 //
Z_FreeTags(memtag_e lowtag,memtag_e hightag)898 void Z_FreeTags( memtag_e lowtag, memtag_e hightag )
899 {
900 #ifdef PLAIN_MALLOC
901     // cannot search, no blocklist
902     return;
903 #else
904 // TAGGED_MALLOC, ZONE_ZALLOC
905     memblock_t* block;
906 
907     // protect PU_FREE, and PU_ZONE
908     if ( lowtag < PU_INVALID )   lowtag = PU_INVALID;
909 
910     for (block = mainzone->blocklist.next ;
911          block != &mainzone->blocklist ; )
912     {
913 #ifdef TAGGED_MALLOC
914         // get link before freeing
915         memblock_t* next = block->next;
916         // does not have any blocks with PU_FREE
917         if (block->memtag >= lowtag && block->memtag <= hightag)
918         {
919             Z_Free ( (byte *)block+sizeof(memblock_t));
920 	}
921         block = next;
922 #else
923         // PU_FREE and PU_ZONE are protected by limiting lowtag.
924         if (block->memtag >= lowtag && block->memtag <= hightag)
925         {
926 	    // Z_CombineFreeBlock can delete block, and block->next
927 	    memblock_t* prev = block->prev;	// prev is safe from deletion
928             Z_Free ( (byte *)block+sizeof(memblock_t));
929 	    // if block was combined, then prev has next block
930 	    block = (prev->next == block)? block->next : prev->next;
931 	}
932         else
933         {
934 	    block = block->next;
935         }
936 #endif
937     }
938 #endif
939 }
940 
941 
942 //
943 // Z_ChangeTags_To
944 // Change all blocks of old_tag to new_tag.
945 //
Z_ChangeTags_To(memtag_e old_tag,memtag_e new_tag)946 void Z_ChangeTags_To( memtag_e old_tag, memtag_e new_tag )
947 {
948 #ifdef PLAIN_MALLOC
949     return;
950 #else
951 // TAGGED_MALLOC, ZONE_ZALLOC
952     memblock_t* block;
953     for (block = mainzone->blocklist.next ;
954          block != &mainzone->blocklist ;
955          block = block->next)
956     {
957         // free blocks will not match
958         if (block->memtag == old_tag)  block->memtag = new_tag;
959     }
960 #endif
961 }
962 
963 
964 //
965 // Z_DumpHeap
966 // Note: TFileDumpHeap( stdout ) ?
967 //
Z_DumpHeap(memtag_e lowtag,memtag_e hightag)968 void Z_DumpHeap(memtag_e lowtag, memtag_e hightag)
969 {
970     memblock_t* block;
971 
972     CONS_Printf ("zone size: %li  location: %p\n",
973             mainzone->size,mainzone);
974 
975     CONS_Printf ("tag range: %i to %i\n",
976             lowtag, hightag);
977 
978     for (block = mainzone->blocklist.next ; ; block = block->next)
979     {
980         if (block->memtag >= lowtag && block->memtag <= hightag)
981         {
982             CONS_Printf ("block:%p    size:%7i    user:%p    tag:%3i prev:%p next:%p\n",
983                     block, block->size, block->user, block->memtag, block->next, block->prev);
984 	}
985 
986         if (block->next == &mainzone->blocklist)
987         {
988             // all blocks have been hit
989             break;
990         }
991 #ifdef GROW_ZONE
992         if ( block->next->memtag != PU_ZONE )	// exclude zone heads
993 #endif
994         if ( (byte *)block + block->size != (byte *)block->next)
995             CONS_Printf ("ERROR: block size does not touch the next block\n");
996 
997         if ( block->next->prev != block)
998             CONS_Printf ("ERROR: next block doesn't have proper back link\n");
999 
1000         if ( block->memtag==PU_FREE && block->next->memtag==PU_FREE )
1001             CONS_Printf ("ERROR: two consecutive free blocks\n");
1002     }
1003 }
1004 
1005 
1006 //
1007 // Z_FileDumpHeap
1008 //
Z_FileDumpHeap(FILE * f)1009 void Z_FileDumpHeap (FILE* f)
1010 {
1011     memblock_t* block;
1012     int i=0;
1013 
1014     fprintf (f, "zone size: %d     location: %p\n",mainzone->size,mainzone);
1015 
1016     for (block = mainzone->blocklist.next ; ; block = block->next)
1017     {
1018         i++;
1019         fprintf (f,"block:%p size:%7i user:%p tag:%3i prev:%p next:%p id:%7i\n",
1020                  block, block->size, block->user, block->memtag, block->prev, block->next, block->id);
1021 
1022         if (block->next == &mainzone->blocklist)
1023         {
1024             // all blocks have been hit
1025             break;
1026         }
1027 
1028         if (block->user &&
1029 	    (*block->user != (byte *)block + sizeof(memblock_t)))
1030 	    fprintf (f,"ERROR: block doesn't have a proper user\n");
1031 
1032 #ifdef GROW_ZONE
1033         if ( block->next->memtag != PU_ZONE )	// exclude zone heads
1034 #endif
1035         if ( (byte *)block + block->size != (byte *)block->next)
1036             fprintf (f,"ERROR: block size does not touch the next block\n");
1037 
1038         if ( block->next->prev != block)
1039             fprintf (f,"ERROR: next block doesn't have proper back link\n");
1040 
1041         if ( block->memtag==PU_FREE && block->next->memtag==PU_FREE )
1042             fprintf (f,"ERROR: two consecutive free blocks\n");
1043     }
1044     fprintf (f,"Total : %d blocks\n"
1045                "===============================================================================\n\n",i);
1046 }
1047 
1048 
1049 
1050 //
1051 // Z_CheckHeap
1052 //
Z_CheckHeap(int i)1053 void Z_CheckHeap (int i)
1054 {
1055 #ifdef PLAIN_MALLOC
1056     return;
1057 #else
1058 // TAGGED_MALLOC, ZONE_ZALLOC
1059     memblock_t* block;
1060     for (block = mainzone->blocklist.next ; ; block = block->next)
1061     {
1062         if (block->next == &mainzone->blocklist)
1063         {
1064             // all blocks have been hit
1065             break;
1066         }
1067 
1068         if (block->user &&
1069 	    (*block->user != (byte *)block + sizeof(memblock_t)))
1070             I_Error ("Z_CheckHeap: block doesn't have a proper user %d\n",i);
1071 
1072 #ifdef ZONE_ZALLOC
1073 #ifdef GROW_ZONE
1074         if ( block->next->memtag != PU_ZONE )	// exclude zone heads
1075 #endif
1076         if ( (byte *)block + block->size != (byte *)block->next)
1077             I_Error ("Z_CheckHeap: block size does not touch the next block %d\n",i);
1078 #endif
1079 // TAGGED_MALLOC, ZONE_ZALLOC
1080         if ( block->next->prev != block)
1081             I_Error ("Z_CheckHeap: next block doesn't have proper back link %d\n",i);
1082 #ifdef ZONE_ZALLOC
1083         if ( block->memtag==PU_FREE && block->next->memtag==PU_FREE )
1084             I_Error ("Z_CheckHeap: two consecutive free blocks %d\n",i);
1085 #endif
1086     }
1087 #endif	// TAGGED_MALLOC, ZONE_ZALLOC
1088 }
1089 
1090 
1091 
1092 
1093 //
1094 // Z_ChangeTag
1095 //
1096 #ifdef PARANOIA
Z_ChangeTag_debug(void * ptr,memtag_e chtag,char * fn,int ln)1097 void  Z_ChangeTag_debug (void *ptr, memtag_e chtag, char * fn, int ln)
1098 #else
1099 void  Z_ChangeTag ( void* ptr, memtag_e chtag )
1100 #endif
1101 {
1102 
1103 #ifdef PARANOIA
1104     if( ! verify_Z_Malloc( ptr ) )
1105         I_Error("Z_CT at %s:%i", fn, ln );
1106 #endif
1107 
1108 #ifdef PLAIN_MALLOC
1109     return;
1110 
1111 #else
1112 // TAGGED_MALLOC, ZONE_ZALLOC
1113     memblock_t* block;
1114     block = (memblock_t *) ( (byte *)ptr - sizeof(memblock_t));
1115 
1116     if (block->id != ZONEID)
1117         I_Error ("Z_ChangeTag: free block has corrupt ZONEID: %x", block->id);
1118 
1119     if (chtag >= PU_PURGELEVEL && !block->user)
1120         I_Error ("Z_ChangeTag: an owner is required for purgable blocks");
1121 
1122     if (chtag == PU_FREE ) {
1123        GenPrintf(EMSG_warn,"Z_ChangeTag: changing to 0 tag, conflict with FREE BLOCK\n" );
1124        chtag = PU_LEVEL;  // safe
1125 	// chtag = PU_DAVE;  // if need to debug
1126     }
1127 
1128     // [WDJ] protect PU_LOCK_SB against casual change
1129     if (block->memtag == PU_LOCK_SB && chtag != PU_UNLOCK_CACHE)  goto done;
1130 
1131     // [WDJ] special tag changes which are conditional on existing tag
1132     switch( chtag ) {
1133      case PU_CACHE_DEFAULT:
1134        // Change to PU_CACHE_DEFAULT is conditional.
1135        // Protect non-purgable lumps against casual degrading to PU_CACHE
1136        if (block->memtag < PU_PURGELEVEL )  goto done;
1137        chtag = PU_CACHE;
1138        break;
1139 
1140      case PU_LUMP:
1141        // PU_LUMP can become PU_CACHE, so do not override
1142        // a more restrictive tag
1143        if (block->memtag < PU_LUMP )  goto done;
1144        break;
1145 
1146      case PU_IN_USE:
1147        // Becomes PU_CACHE later, so do not override a more restrictive tag
1148        if (block->memtag < PU_IN_USE )  goto done;
1149        break;
1150      default:
1151        break;
1152     }
1153 
1154 
1155     block->memtag = chtag;
1156 
1157 done:
1158     return;
1159 #endif
1160 }
1161 
1162 
1163 #ifdef PARANOIA
1164 // Return true when the memory block is valid
verify_Z_Malloc(void * mp)1165 byte  verify_Z_Malloc( void * mp )
1166 {
1167     memblock_t * mmp = (memblock_t*) ((byte *)(mp) - sizeof(memblock_t));
1168     if( mmp->id != ZONEID )  return 0;
1169     if( mmp->memtag < PU_STATIC || mmp->memtag > PU_CACHE_DEFAULT )  return 0;
1170     return 1;
1171 }
1172 #endif
1173 
1174 
1175 //
1176 // Z_FreeMemory
1177 //
Z_FreeMemory(int * realfree,int * cachemem,int * usedmem,int * largefreeblock)1178 void Z_FreeMemory (int *realfree,int *cachemem,int *usedmem,int *largefreeblock)
1179 {
1180 #ifdef PLAIN_MALLOC
1181     *realfree = 0;
1182     *cachemem = 0;
1183     *usedmem = (int)memhead.size;
1184     *largefreeblock = 0;
1185     return;
1186 #else
1187 // ZONE_ZALLOC, TAGGED_MALLOC
1188     memblock_t*         block;
1189     int freeblock=0;
1190 
1191     *realfree = 0;
1192     *cachemem = 0;
1193     *usedmem  = 0;
1194     *largefreeblock = 0;
1195 
1196     for (block = mainzone->blocklist.next ;
1197          block != &mainzone->blocklist;
1198          block = block->next)
1199     {
1200         if ( block->memtag == PU_FREE )
1201         {
1202             // free memory
1203             *realfree += block->size;
1204             freeblock += block->size;
1205             if(freeblock>*largefreeblock)
1206                 *largefreeblock = freeblock;
1207         }
1208         else
1209         {
1210             if(block->memtag >= PU_PURGELEVEL)
1211             {
1212                 // purgable memory (cache)
1213                 *cachemem += block->size;
1214                 freeblock += block->size;
1215                 if(freeblock>*largefreeblock)
1216                     *largefreeblock = freeblock;
1217             }
1218             else
1219             {
1220                 // used block
1221                 *usedmem += block->size;
1222                 freeblock=0;
1223             }
1224 	}
1225     }
1226 #endif
1227 }
1228 
1229 
1230 //
1231 // Z_TagUsage
1232 // - return number of bytes currently allocated in the heap for the given tag
Z_TagUsage(memtag_e usetag)1233 int Z_TagUsage (memtag_e usetag)
1234 {
1235     int             bytes = 0;
1236 
1237 #if defined(TAGGED_MALLOC) || defined(ZONE_ZALLOC)
1238     memblock_t*     block;
1239     for (block = mainzone->blocklist.next ;
1240          block != &mainzone->blocklist;
1241          block = block->next)
1242     {
1243         if (block->memtag == usetag)
1244             bytes += block->size;
1245     }
1246 #endif
1247 
1248     return bytes;
1249 }
1250 
1251 
Command_MemInfo_f(void)1252 void Command_MemInfo_f(void)
1253 {
1254     uint64_t freebytes, totalbytes;
1255 #ifdef PLAIN_MALLOC
1256     CONS_Printf("\2Memory Heap Info - Plain Malloc\n");
1257     CONS_Printf("used  memory       : %7d KiB\n", memhead.size>>10);
1258 #else
1259 // ZONE_ZALLOC, TAGGED_MALLOC
1260     int  memfree, cache, used, largefreeblock;
1261 
1262     Z_CheckHeap(-1);
1263     Z_FreeMemory(&memfree, &cache, &used, &largefreeblock);
1264 #ifdef TAGGED_MALLOC
1265     CONS_Printf("\2Memory Heap Info - Tagged Malloc\n");
1266     CONS_Printf("alloc memory       : %7d KiB\n", memhead.size>>10);
1267 #else
1268     CONS_Printf("\2Memory Heap Info\n");
1269     CONS_Printf("total heap size    : %7d KiB\n", mb_used<<10);
1270 #endif
1271     CONS_Printf("used  memory       : %7d KiB\n", used>>10);
1272     CONS_Printf("free  memory       : %7d KiB\n", memfree>>10);
1273     CONS_Printf("cache memory       : %7d KiB\n", cache>>10);
1274     CONS_Printf("largest free block : %7d KiB\n", largefreeblock>>10);
1275 #ifdef HWRENDER
1276     if( rendermode != render_soft )
1277     {
1278     CONS_Printf("patch info headers : %7d KiB\n", Z_TagUsage(PU_HWRPATCHINFO)>>10);
1279     CONS_Printf("HW texture cache   : %7d KiB\n", Z_TagUsage(PU_HWRCACHE)>>10);
1280     CONS_Printf("plane polygons     : %7d KiB\n", Z_TagUsage(PU_HWRPLANE)>>10);
1281     CONS_Printf("HW texture used    : %7d KiB\n", HWD.pfnGetTextureUsed()>>10);
1282     }
1283 #endif
1284 #endif	// PLAIN_MALLOC
1285 
1286     CONS_Printf("\2System Memory Info\n");
1287     freebytes = I_GetFreeMem(&totalbytes);
1288     CONS_Printf("Total     physical memory: %6d KiB\n", totalbytes>>10);
1289     CONS_Printf("Available physical memory: %6d KiB\n", freebytes>>10);
1290 }
1291 
1292 
1293 
1294 
1295 
Z_Strdup(const char * s,memtag_e tag,void ** user)1296 char *Z_Strdup(const char *s, memtag_e tag, void **user)
1297 {
1298   return strcpy(Z_Malloc(strlen(s)+1, tag, user), s);
1299 }
1300 
1301 // return size of data of this block.
Z_Datasize(void * ptr)1302 int Z_Datasize( void* ptr )
1303 {
1304     memblock_t*  block = (memblock_t *) ( (byte *)ptr - sizeof(memblock_t));
1305     return  block->size - sizeof(memblock_t);
1306 }
1307