1 #ifdef _RAKNET_SUPPORT_DL_MALLOC
2 
3 /*
4 Default header file for malloc-2.8.x, written by Doug Lea
5 and released to the public domain, as explained at
6 http://creativecommons.org/licenses/publicdomain.
7 
8 last update: Wed May 27 14:25:17 2009  Doug Lea  (dl at gee)
9 
10 This header is for ANSI C/C++ only.  You can set any of
11 the following #defines before including:
12 
13 * If USE_DL_PREFIX is defined, it is assumed that malloc.c
14 was also compiled with this option, so all routines
15 have names starting with "dl".
16 
17 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
18 file will be #included AFTER <malloc.h>. This is needed only if
19 your system defines a struct mallinfo that is incompatible with the
20 standard one declared here.  Otherwise, you can include this file
21 INSTEAD of your system system <malloc.h>.  At least on ANSI, all
22 declarations should be compatible with system versions
23 
24 * If MSPACES is defined, declarations for mspace versions are included.
25 */
26 
27 #ifndef MALLOC_280_H
28 #define MALLOC_280_H
29 
30 #include "rdlmalloc-options.h"
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 #include <stddef.h>   /* for size_t */
37 
38 #ifndef ONLY_MSPACES
39 #define ONLY_MSPACES 0     /* define to a value */
40 #endif  /* ONLY_MSPACES */
41 #ifndef NO_MALLINFO
42 #define NO_MALLINFO 0
43 #endif  /* NO_MALLINFO */
44 
45 
46 #if !ONLY_MSPACES
47 
48 #ifndef USE_DL_PREFIX
49 #define rdlcalloc               calloc
50 #define rdlfree                 free
51 #define rdlmalloc               malloc
52 #define rdlmemalign             memalign
53 #define rdlrealloc              realloc
54 #define rdlvalloc               valloc
55 #define rdlpvalloc              pvalloc
56 #define rdlmallinfo             mallinfo
57 #define rdlmallopt              mallopt
58 #define rdlmalloc_trim          malloc_trim
59 #define rdlmalloc_stats         malloc_stats
60 #define rdlmalloc_usable_size   malloc_usable_size
61 #define rdlmalloc_footprint     malloc_footprint
62 #define rdlindependent_calloc   independent_calloc
63 #define rdlindependent_comalloc independent_comalloc
64 #endif /* USE_DL_PREFIX */
65 #if !NO_MALLINFO
66 #ifndef HAVE_USR_INCLUDE_MALLOC_H
67 #ifndef _MALLOC_H
68 #ifndef MALLINFO_FIELD_TYPE
69 #define MALLINFO_FIELD_TYPE size_t
70 #endif /* MALLINFO_FIELD_TYPE */
71 #ifndef STRUCT_MALLINFO_DECLARED
72 #define STRUCT_MALLINFO_DECLARED 1
73 	struct mallinfo {
74 		MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
75 		MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
76 		MALLINFO_FIELD_TYPE smblks;   /* always 0 */
77 		MALLINFO_FIELD_TYPE hblks;    /* always 0 */
78 		MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
79 		MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
80 		MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
81 		MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
82 		MALLINFO_FIELD_TYPE fordblks; /* total free space */
83 		MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
84 	};
85 #endif /* STRUCT_MALLINFO_DECLARED */
86 #endif  /* _MALLOC_H */
87 #endif  /* HAVE_USR_INCLUDE_MALLOC_H */
88 #endif  /* !NO_MALLINFO */
89 
90 	/*
91 	malloc(size_t n)
92 	Returns a pointer to a newly allocated chunk of at least n bytes, or
93 	null if no space is available, in which case errno is set to ENOMEM
94 	on ANSI C systems.
95 
96 	If n is zero, malloc returns a minimum-sized chunk. (The minimum
97 	size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
98 	systems.)  Note that size_t is an unsigned type, so calls with
99 	arguments that would be negative if signed are interpreted as
100 	requests for huge amounts of space, which will often fail. The
101 	maximum supported value of n differs across systems, but is in all
102 	cases less than the maximum representable value of a size_t.
103 	*/
104 	void* rdlmalloc(size_t);
105 
106 	/*
107 	free(void* p)
108 	Releases the chunk of memory pointed to by p, that had been previously
109 	allocated using malloc or a related routine such as realloc.
110 	It has no effect if p is null. If p was not malloced or already
111 	freed, free(p) will by default cuase the current program to abort.
112 	*/
113 	void  rdlfree(void*);
114 
115 	/*
116 	calloc(size_t n_elements, size_t element_size);
117 	Returns a pointer to n_elements * element_size bytes, with all locations
118 	set to zero.
119 	*/
120 	void* rdlcalloc(size_t, size_t);
121 
122 	/*
123 	realloc(void* p, size_t n)
124 	Returns a pointer to a chunk of size n that contains the same data
125 	as does chunk p up to the minimum of (n, p's size) bytes, or null
126 	if no space is available.
127 
128 	The returned pointer may or may not be the same as p. The algorithm
129 	prefers extending p in most cases when possible, otherwise it
130 	employs the equivalent of a malloc-copy-free sequence.
131 
132 	If p is null, realloc is equivalent to malloc.
133 
134 	If space is not available, realloc returns null, errno is set (if on
135 	ANSI) and p is NOT freed.
136 
137 	if n is for fewer bytes than already held by p, the newly unused
138 	space is lopped off and freed if possible.  realloc with a size
139 	argument of zero (re)allocates a minimum-sized chunk.
140 
141 	The old unix realloc convention of allowing the last-free'd chunk
142 	to be used as an argument to realloc is not supported.
143 	*/
144 
145 	void* rdlrealloc(void*, size_t);
146 
147 	/*
148 	memalign(size_t alignment, size_t n);
149 	Returns a pointer to a newly allocated chunk of n bytes, aligned
150 	in accord with the alignment argument.
151 
152 	The alignment argument should be a power of two. If the argument is
153 	not a power of two, the nearest greater power is used.
154 	8-byte alignment is guaranteed by normal malloc calls, so don't
155 	bother calling memalign with an argument of 8 or less.
156 
157 	Overreliance on memalign is a sure way to fragment space.
158 	*/
159 	void* rdlmemalign(size_t, size_t);
160 
161 	/*
162 	valloc(size_t n);
163 	Equivalent to memalign(pagesize, n), where pagesize is the page
164 	size of the system. If the pagesize is unknown, 4096 is used.
165 	*/
166 	void* rdlvalloc(size_t);
167 
168 	/*
169 	mallopt(int parameter_number, int parameter_value)
170 	Sets tunable parameters The format is to provide a
171 	(parameter-number, parameter-value) pair.  mallopt then sets the
172 	corresponding parameter to the argument value if it can (i.e., so
173 	long as the value is meaningful), and returns 1 if successful else
174 	0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
175 	normally defined in malloc.h.  None of these are use in this malloc,
176 	so setting them has no effect. But this malloc also supports other
177 	options in mallopt:
178 
179 	Symbol            param #  default    allowed param values
180 	M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
181 	M_GRANULARITY        -2     page size   any power of 2 >= page size
182 	M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
183 	*/
184 	int rdlmallopt(int, int);
185 
186 #define M_TRIM_THRESHOLD     (-1)
187 #define M_GRANULARITY        (-2)
188 #define M_MMAP_THRESHOLD     (-3)
189 
190 
191 	/*
192 	malloc_footprint();
193 	Returns the number of bytes obtained from the system.  The total
194 	number of bytes allocated by malloc, realloc etc., is less than this
195 	value. Unlike mallinfo, this function returns only a precomputed
196 	result, so can be called frequently to monitor memory consumption.
197 	Even if locks are otherwise defined, this function does not use them,
198 	so results might not be up to date.
199 	*/
200 	size_t rdlmalloc_footprint();
201 
202 #if !NO_MALLINFO
203 	/*
204 	mallinfo()
205 	Returns (by copy) a struct containing various summary statistics:
206 
207 	arena:     current total non-mmapped bytes allocated from system
208 	ordblks:   the number of free chunks
209 	smblks:    always zero.
210 	hblks:     current number of mmapped regions
211 	hblkhd:    total bytes held in mmapped regions
212 	usmblks:   the maximum total allocated space. This will be greater
213 	than current total if trimming has occurred.
214 	fsmblks:   always zero
215 	uordblks:  current total allocated space (normal or mmapped)
216 	fordblks:  total free space
217 	keepcost:  the maximum number of bytes that could ideally be released
218 	back to system via malloc_trim. ("ideally" means that
219 	it ignores page restrictions etc.)
220 
221 	Because these fields are ints, but internal bookkeeping may
222 	be kept as longs, the reported values may wrap around zero and
223 	thus be inaccurate.
224 	*/
225 
226 	struct mallinfo rdlmallinfo(void);
227 #endif  /* NO_MALLINFO */
228 
229 	/*
230 	independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
231 
232 	independent_calloc is similar to calloc, but instead of returning a
233 	single cleared space, it returns an array of pointers to n_elements
234 	independent elements that can hold contents of size elem_size, each
235 	of which starts out cleared, and can be independently freed,
236 	realloc'ed etc. The elements are guaranteed to be adjacently
237 	allocated (this is not guaranteed to occur with multiple callocs or
238 	mallocs), which may also improve cache locality in some
239 	applications.
240 
241 	The "chunks" argument is optional (i.e., may be null, which is
242 	probably the most typical usage). If it is null, the returned array
243 	is itself dynamically allocated and should also be freed when it is
244 	no longer needed. Otherwise, the chunks array must be of at least
245 	n_elements in length. It is filled in with the pointers to the
246 	chunks.
247 
248 	In either case, independent_calloc returns this pointer array, or
249 	null if the allocation failed.  If n_elements is zero and "chunks"
250 	is null, it returns a chunk representing an array with zero elements
251 	(which should be freed if not wanted).
252 
253 	Each element must be individually freed when it is no longer
254 	needed. If you'd like to instead be able to free all at once, you
255 	should instead use regular calloc and assign pointers into this
256 	space to represent elements.  (In this case though, you cannot
257 	independently free elements.)
258 
259 	independent_calloc simplifies and speeds up implementations of many
260 	kinds of pools.  It may also be useful when constructing large data
261 	structures that initially have a fixed number of fixed-sized nodes,
262 	but the number is not known at compile time, and some of the nodes
263 	may later need to be freed. For example:
264 
265 	struct Node { int item; struct Node* next; };
266 
267 	struct Node* build_list() {
268 	struct Node** pool;
269 	int n = read_number_of_nodes_needed();
270 	if (n <= 0) return 0;
271 	pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
272 	if (pool == 0) die();
273 	// organize into a linked list...
274 	struct Node* first = pool[0];
275 	for (i = 0; i < n-1; ++i)
276 	pool[i]->next = pool[i+1];
277 	free(pool);     // Can now free the array (or not, if it is needed later)
278 	return first;
279 	}
280 	*/
281 	void** rdlindependent_calloc(size_t, size_t, void**);
282 
283 	/*
284 	independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
285 
286 	independent_comalloc allocates, all at once, a set of n_elements
287 	chunks with sizes indicated in the "sizes" array.    It returns
288 	an array of pointers to these elements, each of which can be
289 	independently freed, realloc'ed etc. The elements are guaranteed to
290 	be adjacently allocated (this is not guaranteed to occur with
291 	multiple callocs or mallocs), which may also improve cache locality
292 	in some applications.
293 
294 	The "chunks" argument is optional (i.e., may be null). If it is null
295 	the returned array is itself dynamically allocated and should also
296 	be freed when it is no longer needed. Otherwise, the chunks array
297 	must be of at least n_elements in length. It is filled in with the
298 	pointers to the chunks.
299 
300 	In either case, independent_comalloc returns this pointer array, or
301 	null if the allocation failed.  If n_elements is zero and chunks is
302 	null, it returns a chunk representing an array with zero elements
303 	(which should be freed if not wanted).
304 
305 	Each element must be individually freed when it is no longer
306 	needed. If you'd like to instead be able to free all at once, you
307 	should instead use a single regular malloc, and assign pointers at
308 	particular offsets in the aggregate space. (In this case though, you
309 	cannot independently free elements.)
310 
311 	independent_comallac differs from independent_calloc in that each
312 	element may have a different size, and also that it does not
313 	automatically clear elements.
314 
315 	independent_comalloc can be used to speed up allocation in cases
316 	where several structs or objects must always be allocated at the
317 	same time.  For example:
318 
319 	struct Head { ... }
320 	struct Foot { ... }
321 
322 	void send_message(char* msg) {
323 	int msglen = strlen(msg);
324 	size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
325 	void* chunks[3];
326 	if (independent_comalloc(3, sizes, chunks) == 0)
327 	die();
328 	struct Head* head = (struct Head*)(chunks[0]);
329 	char*        body = (char*)(chunks[1]);
330 	struct Foot* foot = (struct Foot*)(chunks[2]);
331 	// ...
332 	}
333 
334 	In general though, independent_comalloc is worth using only for
335 	larger values of n_elements. For small values, you probably won't
336 	detect enough difference from series of malloc calls to bother.
337 
338 	Overuse of independent_comalloc can increase overall memory usage,
339 	since it cannot reuse existing noncontiguous small chunks that
340 	might be available for some of the elements.
341 	*/
342 	void** rdlindependent_comalloc(size_t, size_t*, void**);
343 
344 
345 	/*
346 	pvalloc(size_t n);
347 	Equivalent to valloc(minimum-page-that-holds(n)), that is,
348 	round up n to nearest pagesize.
349 	*/
350 	void*  rdlpvalloc(size_t);
351 
352 	/*
353 	malloc_trim(size_t pad);
354 
355 	If possible, gives memory back to the system (via negative arguments
356 	to sbrk) if there is unused memory at the `high' end of the malloc
357 	pool or in unused MMAP segments. You can call this after freeing
358 	large blocks of memory to potentially reduce the system-level memory
359 	requirements of a program. However, it cannot guarantee to reduce
360 	memory. Under some allocation patterns, some large free blocks of
361 	memory will be locked between two used chunks, so they cannot be
362 	given back to the system.
363 
364 	The `pad' argument to malloc_trim represents the amount of free
365 	trailing space to leave untrimmed. If this argument is zero, only
366 	the minimum amount of memory to maintain internal data structures
367 	will be left. Non-zero arguments can be supplied to maintain enough
368 	trailing space to service future expected allocations without having
369 	to re-obtain memory from the system.
370 
371 	Malloc_trim returns 1 if it actually released any memory, else 0.
372 	*/
373 	int  rdlmalloc_trim(size_t);
374 
375 	/*
376 	malloc_stats();
377 	Prints on stderr the amount of space obtained from the system (both
378 	via sbrk and mmap), the maximum amount (which may be more than
379 	current if malloc_trim and/or munmap got called), and the current
380 	number of bytes allocated via malloc (or realloc, etc) but not yet
381 	freed. Note that this is the number of bytes allocated, not the
382 	number requested. It will be larger than the number requested
383 	because of alignment and bookkeeping overhead. Because it includes
384 	alignment wastage as being in use, this figure may be greater than
385 	zero even when no user-level chunks are allocated.
386 
387 	The reported current and maximum system memory can be inaccurate if
388 	a program makes other calls to system memory allocation functions
389 	(normally sbrk) outside of malloc.
390 
391 	malloc_stats prints only the most commonly interesting statistics.
392 	More information can be obtained by calling mallinfo.
393 	*/
394 	void  rdlmalloc_stats();
395 
396 #endif /* !ONLY_MSPACES */
397 
398 	/*
399 	malloc_usable_size(void* p);
400 
401 	Returns the number of bytes you can actually use in
402 	an allocated chunk, which may be more than you requested (although
403 	often not) due to alignment and minimum size constraints.
404 	You can use this many bytes without worrying about
405 	overwriting other allocated objects. This is not a particularly great
406 	programming practice. malloc_usable_size can be more useful in
407 	debugging and assertions, for example:
408 
409 	p = malloc(n);
410 	assert(malloc_usable_size(p) >= 256);
411 	*/
412 	size_t rdlmalloc_usable_size(void*);
413 
414 
415 #if MSPACES
416 
417 	/*
418 	mspace is an opaque type representing an independent
419 	region of space that supports rak_mspace_malloc, etc.
420 	*/
421 	typedef void* mspace;
422 
423 	/*
424 	rak_create_mspace creates and returns a new independent space with the
425 	given initial capacity, or, if 0, the default granularity size.  It
426 	returns null if there is no system memory available to create the
427 	space.  If argument locked is non-zero, the space uses a separate
428 	lock to control access. The capacity of the space will grow
429 	dynamically as needed to service rak_mspace_malloc requests.  You can
430 	control the sizes of incremental increases of this space by
431 	compiling with a different DEFAULT_GRANULARITY or dynamically
432 	setting with mallopt(M_GRANULARITY, value).
433 	*/
434 	mspace rak_create_mspace(size_t capacity, int locked);
435 
436 	/*
437 	rak_destroy_mspace destroys the given space, and attempts to return all
438 	of its memory back to the system, returning the total number of
439 	bytes freed. After destruction, the results of access to all memory
440 	used by the space become undefined.
441 	*/
442 	size_t rak_destroy_mspace(mspace msp);
443 
444 	/*
445 	rak_create_mspace_with_base uses the memory supplied as the initial base
446 	of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
447 	space is used for bookkeeping, so the capacity must be at least this
448 	large. (Otherwise 0 is returned.) When this initial space is
449 	exhausted, additional memory will be obtained from the system.
450 	Destroying this space will deallocate all additionally allocated
451 	space (if possible) but not the initial base.
452 	*/
453 	mspace rak_create_mspace_with_base(void* base, size_t capacity, int locked);
454 
455 	/*
456 	rak_mspace_track_large_chunks controls whether requests for large chunks
457 	are allocated in their own untracked mmapped regions, separate from
458 	others in this mspace. By default large chunks are not tracked,
459 	which reduces fragmentation. However, such chunks are not
460 	necessarily released to the system upon rak_destroy_mspace.  Enabling
461 	tracking by setting to true may increase fragmentation, but avoids
462 	leakage when relying on rak_destroy_mspace to release all memory
463 	allocated using this space.  The function returns the previous
464 	setting.
465 	*/
466 	int rak_mspace_track_large_chunks(mspace msp, int enable);
467 
468 	/*
469 	rak_mspace_malloc behaves as malloc, but operates within
470 	the given space.
471 	*/
472 	void* rak_mspace_malloc(mspace msp, size_t bytes);
473 
474 	/*
475 	rak_mspace_free behaves as free, but operates within
476 	the given space.
477 
478 	If compiled with FOOTERS==1, rak_mspace_free is not actually needed.
479 	free may be called instead of rak_mspace_free because freed chunks from
480 	any space are handled by their originating spaces.
481 	*/
482 	void rak_mspace_free(mspace msp, void* mem);
483 
484 	/*
485 	rak_mspace_realloc behaves as realloc, but operates within
486 	the given space.
487 
488 	If compiled with FOOTERS==1, rak_mspace_realloc is not actually
489 	needed.  realloc may be called instead of rak_mspace_realloc because
490 	realloced chunks from any space are handled by their originating
491 	spaces.
492 	*/
493 	void* rak_mspace_realloc(mspace msp, void* mem, size_t newsize);
494 
495 	/*
496 	rak_mspace_calloc behaves as calloc, but operates within
497 	the given space.
498 	*/
499 	void* rak_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
500 
501 	/*
502 	rak_mspace_memalign behaves as memalign, but operates within
503 	the given space.
504 	*/
505 	void* rak_mspace_memalign(mspace msp, size_t alignment, size_t bytes);
506 
507 	/*
508 	rak_mspace_independent_calloc behaves as independent_calloc, but
509 	operates within the given space.
510 	*/
511 	void** rak_mspace_independent_calloc(mspace msp, size_t n_elements,
512 		size_t elem_size, void* chunks[]);
513 
514 	/*
515 	rak_mspace_independent_comalloc behaves as independent_comalloc, but
516 	operates within the given space.
517 	*/
518 	void** rak_mspace_independent_comalloc(mspace msp, size_t n_elements,
519 		size_t sizes[], void* chunks[]);
520 
521 	/*
522 	rak_mspace_footprint() returns the number of bytes obtained from the
523 	system for this space.
524 	*/
525 	size_t rak_mspace_footprint(mspace msp);
526 
527 
528 #if !NO_MALLINFO
529 	/*
530 	rak_mspace_mallinfo behaves as mallinfo, but reports properties of
531 	the given space.
532 	*/
533 	struct mallinfo rak_mspace_mallinfo(mspace msp);
534 #endif /* NO_MALLINFO */
535 
536 	/*
537 	malloc_usable_size(void* p) behaves the same as malloc_usable_size;
538 	*/
539 	size_t rak_mspace_usable_size(void* mem);
540 
541 	/*
542 	rak_mspace_malloc_stats behaves as malloc_stats, but reports
543 	properties of the given space.
544 	*/
545 	void rak_mspace_malloc_stats(mspace msp);
546 
547 	/*
548 	rak_mspace_trim behaves as malloc_trim, but
549 	operates within the given space.
550 	*/
551 	int rak_mspace_trim(mspace msp, size_t pad);
552 
553 	/*
554 	An alias for mallopt.
555 	*/
556 	int rak_mspace_mallopt(int, int);
557 
558 #endif  /* MSPACES */
559 
560 #ifdef __cplusplus
561 };  /* end of extern "C" */
562 #endif
563 
564 /*
565 This is a version (aka rdlmalloc) of malloc/free/realloc written by
566 Doug Lea and released to the public domain, as explained at
567 http://creativecommons.org/licenses/publicdomain.  Send questions,
568 comments, complaints, performance data, etc to dl@cs.oswego.edu
569 
570 * Version 2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
571 
572 Note: There may be an updated version of this malloc obtainable at
573 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
574 Check before installing!
575 
576 * Quickstart
577 
578 This library is all in one file to simplify the most common usage:
579 ftp it, compile it (-O3), and link it into another program. All of
580 the compile-time options default to reasonable values for use on
581 most platforms.  You might later want to step through various
582 compile-time and dynamic tuning options.
583 
584 For convenience, an include file for code using this malloc is at:
585 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h
586 You don't really need this .h file unless you call functions not
587 defined in your system include files.  The .h file contains only the
588 excerpts from this file needed for using this malloc on ANSI C/C++
589 systems, so long as you haven't changed compile-time options about
590 naming and tuning parameters.  If you do, then you can create your
591 own malloc.h that does include all settings by cutting at the point
592 indicated below. Note that you may already by default be using a C
593 library containing a malloc that is based on some version of this
594 malloc (for example in linux). You might still want to use the one
595 in this file to customize settings or to avoid overheads associated
596 with library versions.
597 
598 * Vital statistics:
599 
600 Supported pointer/size_t representation:       4 or 8 bytes
601 size_t MUST be an unsigned type of the same width as
602 pointers. (If you are using an ancient system that declares
603 size_t as a signed type, or need it to be a different width
604 than pointers, you can use a previous release of this malloc
605 (e.g. 2.7.2) supporting these.)
606 
607 Alignment:                                     8 bytes (default)
608 This suffices for nearly all current machines and C compilers.
609 However, you can define MALLOC_ALIGNMENT to be wider than this
610 if necessary (up to 128bytes), at the expense of using more space.
611 
612 Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
613 8 or 16 bytes (if 8byte sizes)
614 Each malloced chunk has a hidden word of overhead holding size
615 and status information, and additional cross-check word
616 if FOOTERS is defined.
617 
618 Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
619 8-byte ptrs:  32 bytes    (including overhead)
620 
621 Even a request for zero bytes (i.e., malloc(0)) returns a
622 pointer to something of the minimum allocatable size.
623 The maximum overhead wastage (i.e., number of extra bytes
624 allocated than were requested in malloc) is less than or equal
625 to the minimum size, except for requests >= mmap_threshold that
626 are serviced via mmap(), where the worst case wastage is about
627 32 bytes plus the remainder from a system page (the minimal
628 mmap unit); typically 4096 or 8192 bytes.
629 
630 Security: static-safe; optionally more or less
631 The "security" of malloc refers to the ability of malicious
632 code to accentuate the effects of errors (for example, freeing
633 space that is not currently malloc'ed or overwriting past the
634 ends of chunks) in code that calls malloc.  This malloc
635 guarantees not to modify any memory locations below the base of
636 heap, i.e., static variables, even in the presence of usage
637 errors.  The routines additionally detect most improper frees
638 and reallocs.  All this holds as long as the static bookkeeping
639 for malloc itself is not corrupted by some other means.  This
640 is only one aspect of security -- these checks do not, and
641 cannot, detect all possible programming errors.
642 
643 If FOOTERS is defined nonzero, then each allocated chunk
644 carries an additional check word to verify that it was malloced
645 from its space.  These check words are the same within each
646 execution of a program using malloc, but differ across
647 executions, so externally crafted fake chunks cannot be
648 freed. This improves security by rejecting frees/reallocs that
649 could corrupt heap memory, in addition to the checks preventing
650 writes to statics that are always on.  This may further improve
651 security at the expense of time and space overhead.  (Note that
652 FOOTERS may also be worth using with MSPACES.)
653 
654 By default detected errors cause the program to abort (calling
655 "abort()"). You can override this to instead proceed past
656 errors by defining PROCEED_ON_ERROR.  In this case, a bad free
657 has no effect, and a malloc that encounters a bad address
658 caused by user overwrites will ignore the bad address by
659 dropping pointers and indices to all known memory. This may
660 be appropriate for programs that should continue if at all
661 possible in the face of programming errors, although they may
662 run out of memory because dropped memory is never reclaimed.
663 
664 If you don't like either of these options, you can define
665 CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
666 else. And if if you are sure that your program using malloc has
667 no errors or vulnerabilities, you can define INSECURE to 1,
668 which might (or might not) provide a small performance improvement.
669 
670 Thread-safety: NOT thread-safe unless USE_LOCKS defined
671 When USE_LOCKS is defined, each public call to malloc, free,
672 etc is surrounded with either a pthread mutex or a win32
673 spinlock (depending on DL_PLATFORM_WIN32). This is not especially fast, and
674 can be a major bottleneck.  It is designed only to provide
675 minimal protection in concurrent environments, and to provide a
676 basis for extensions.  If you are using malloc in a concurrent
677 program, consider instead using nedmalloc
678 (http://www.nedprod.com/programs/portable/nedmalloc/) or
679 ptmalloc (See http://www.malloc.de), which are derived
680 from versions of this malloc.
681 
682 System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
683 This malloc can use unix sbrk or any emulation (invoked using
684 the CALL_MORECORE macro) and/or mmap/munmap or any emulation
685 (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
686 memory.  On most unix systems, it tends to work best if both
687 MORECORE and MMAP are enabled.  On Win32, it uses emulations
688 based on VirtualAlloc. It also uses common C library functions
689 like memset.
690 
691 Compliance: I believe it is compliant with the Single Unix Specification
692 (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
693 others as well.
694 
695 * Overview of algorithms
696 
697 This is not the fastest, most space-conserving, most portable, or
698 most tunable malloc ever written. However it is among the fastest
699 while also being among the most space-conserving, portable and
700 tunable.  Consistent balance across these factors results in a good
701 general-purpose allocator for malloc-intensive programs.
702 
703 In most ways, this malloc is a best-fit allocator. Generally, it
704 chooses the best-fitting existing chunk for a request, with ties
705 broken in approximately least-recently-used order. (This strategy
706 normally maintains low fragmentation.) However, for requests less
707 than 256bytes, it deviates from best-fit when there is not an
708 exactly fitting available chunk by preferring to use space adjacent
709 to that used for the previous small request, as well as by breaking
710 ties in approximately most-recently-used order. (These enhance
711 locality of series of small allocations.)  And for very large requests
712 (>= 256Kb by default), it relies on system memory mapping
713 facilities, if supported.  (This helps avoid carrying around and
714 possibly fragmenting memory used only for large chunks.)
715 
716 All operations (except malloc_stats and mallinfo) have execution
717 times that are bounded by a constant factor of the number of bits in
718 a size_t, not counting any clearing in calloc or copying in realloc,
719 or actions surrounding MORECORE and MMAP that have times
720 proportional to the number of non-contiguous regions returned by
721 system allocation routines, which is often just 1. In real-time
722 applications, you can optionally suppress segment traversals using
723 NO_SEGMENT_TRAVERSAL, which assures bounded execution even when
724 system allocators return non-contiguous spaces, at the typical
725 expense of carrying around more memory and increased fragmentation.
726 
727 The implementation is not very modular and seriously overuses
728 macros. Perhaps someday all C compilers will do as good a job
729 inlining modular code as can now be done by brute-force expansion,
730 but now, enough of them seem not to.
731 
732 Some compilers issue a lot of warnings about code that is
733 dead/unreachable only on some platforms, and also about intentional
734 uses of negation on unsigned types. All known cases of each can be
735 ignored.
736 
737 For a longer but out of date high-level description, see
738 http://gee.cs.oswego.edu/dl/html/malloc.html
739 
740 * MSPACES
741 If MSPACES is defined, then in addition to malloc, free, etc.,
742 this file also defines rak_mspace_malloc, rak_mspace_free, etc. These
743 are versions of malloc routines that take an "mspace" argument
744 obtained using rak_create_mspace, to control all internal bookkeeping.
745 If ONLY_MSPACES is defined, only these versions are compiled.
746 So if you would like to use this allocator for only some allocations,
747 and your system malloc for others, you can compile with
748 ONLY_MSPACES and then do something like...
749 static mspace mymspace = rak_create_mspace(0,0); // for example
750 #define mymalloc(bytes)  rak_mspace_malloc(mymspace, bytes)
751 
752 (Note: If you only need one instance of an mspace, you can instead
753 use "USE_DL_PREFIX" to relabel the global malloc.)
754 
755 You can similarly create thread-local allocators by storing
756 mspaces as thread-locals. For example:
757 static __thread mspace tlms = 0;
758 void*  tlmalloc(size_t bytes) {
759 if (tlms == 0) tlms = rak_create_mspace(0, 0);
760 return rak_mspace_malloc(tlms, bytes);
761 }
762 void  tlfree(void* mem) { rak_mspace_free(tlms, mem); }
763 
764 Unless FOOTERS is defined, each mspace is completely independent.
765 You cannot allocate from one and free to another (although
766 conformance is only weakly checked, so usage errors are not always
767 caught). If FOOTERS is defined, then each chunk carries around a tag
768 indicating its originating mspace, and frees are directed to their
769 originating spaces.
770 
771 -------------------------  Compile-time options ---------------------------
772 
773 Be careful in setting #define values for numerical constants of type
774 size_t. On some systems, literal values are not automatically extended
775 to size_t precision unless they are explicitly casted. You can also
776 use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.
777 
778 DL_PLATFORM_WIN32                    default: defined if _WIN32 defined
779 Defining DL_PLATFORM_WIN32 sets up defaults for MS environment and compilers.
780 Otherwise defaults are for unix. Beware that there seem to be some
781 cases where this malloc might not be a pure drop-in replacement for
782 Win32 malloc: Random-looking failures from Win32 GDI API's (eg;
783 SetDIBits()) may be due to bugs in some video driver implementations
784 when pixel buffers are malloc()ed, and the region spans more than
785 one VirtualAlloc()ed region. Because rdlmalloc uses a small (64Kb)
786 default granularity, pixel buffers may straddle virtual allocation
787 regions more often than when using the Microsoft allocator.  You can
788 avoid this by using VirtualAlloc() and VirtualFree() for all pixel
789 buffers rather than using malloc().  If this is not possible,
790 recompile this malloc with a larger DEFAULT_GRANULARITY.
791 
792 MALLOC_ALIGNMENT         default: (size_t)8
793 Controls the minimum alignment for malloc'ed chunks.  It must be a
794 power of two and at least 8, even on machines for which smaller
795 alignments would suffice. It may be defined as larger than this
796 though. Note however that code and data structures are optimized for
797 the case of 8-byte alignment.
798 
799 MSPACES                  default: 0 (false)
800 If true, compile in support for independent allocation spaces.
801 This is only supported if HAVE_MMAP is true.
802 
803 ONLY_MSPACES             default: 0 (false)
804 If true, only compile in mspace versions, not regular versions.
805 
806 USE_LOCKS                default: 0 (false)
807 Causes each call to each public routine to be surrounded with
808 pthread or DL_PLATFORM_WIN32 mutex lock/unlock. (If set true, this can be
809 overridden on a per-mspace basis for mspace versions.) If set to a
810 non-zero value other than 1, locks are used, but their
811 implementation is left out, so lock functions must be supplied manually,
812 as described below.
813 
814 USE_SPIN_LOCKS           default: 1 iff USE_LOCKS and on x86 using gcc or MSC
815 If true, uses custom spin locks for locking. This is currently
816 supported only for x86 platforms using gcc or recent MS compilers.
817 Otherwise, posix locks or win32 critical sections are used.
818 
819 FOOTERS                  default: 0
820 If true, provide extra checking and dispatching by placing
821 information in the footers of allocated chunks. This adds
822 space and time overhead.
823 
824 INSECURE                 default: 0
825 If true, omit checks for usage errors and heap space overwrites.
826 
827 USE_DL_PREFIX            default: NOT defined
828 Causes compiler to prefix all public routines with the string 'dl'.
829 This can be useful when you only want to use this malloc in one part
830 of a program, using your regular system malloc elsewhere.
831 
832 ABORT                    default: defined as abort()
833 Defines how to abort on failed checks.  On most systems, a failed
834 check cannot die with an "assert" or even print an informative
835 message, because the underlying print routines in turn call malloc,
836 which will fail again.  Generally, the best policy is to simply call
837 abort(). It's not very useful to do more than this because many
838 errors due to overwriting will show up as address faults (null, odd
839 addresses etc) rather than malloc-triggered checks, so will also
840 abort.  Also, most compilers know that abort() does not return, so
841 can better optimize code conditionally calling it.
842 
843 PROCEED_ON_ERROR           default: defined as 0 (false)
844 Controls whether detected bad addresses cause them to bypassed
845 rather than aborting. If set, detected bad arguments to free and
846 realloc are ignored. And all bookkeeping information is zeroed out
847 upon a detected overwrite of freed heap space, thus losing the
848 ability to ever return it from malloc again, but enabling the
849 application to proceed. If PROCEED_ON_ERROR is defined, the
850 static variable malloc_corruption_error_count is compiled in
851 and can be examined to see if errors have occurred. This option
852 generates slower code than the default abort policy.
853 
854 DEBUG                    default: NOT defined
855 The DEBUG setting is mainly intended for people trying to modify
856 this code or diagnose problems when porting to new platforms.
857 However, it may also be able to better isolate user errors than just
858 using runtime checks.  The assertions in the check routines spell
859 out in more detail the assumptions and invariants underlying the
860 algorithms.  The checking is fairly extensive, and will slow down
861 execution noticeably. Calling malloc_stats or mallinfo with DEBUG
862 set will attempt to check every non-mmapped allocated and free chunk
863 in the course of computing the summaries.
864 
865 ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
866 Debugging assertion failures can be nearly impossible if your
867 version of the assert macro causes malloc to be called, which will
868 lead to a cascade of further failures, blowing the runtime stack.
869 ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
870 which will usually make debugging easier.
871 
872 MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
873 The action to take before "return 0" when malloc fails to be able to
874 return memory because there is none available.
875 
876 HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
877 True if this system supports sbrk or an emulation of it.
878 
879 MORECORE                  default: sbrk
880 The name of the sbrk-style system routine to call to obtain more
881 memory.  See below for guidance on writing custom MORECORE
882 functions. The type of the argument to sbrk/MORECORE varies across
883 systems.  It cannot be size_t, because it supports negative
884 arguments, so it is normally the signed type of the same width as
885 size_t (sometimes declared as "intptr_t").  It doesn't much matter
886 though. Internally, we only call it with arguments less than half
887 the max value of a size_t, which should work across all reasonable
888 possibilities, although sometimes generating compiler warnings.
889 
890 MORECORE_CONTIGUOUS       default: 1 (true) if HAVE_MORECORE
891 If true, take advantage of fact that consecutive calls to MORECORE
892 with positive arguments always return contiguous increasing
893 addresses.  This is true of unix sbrk. It does not hurt too much to
894 set it true anyway, since malloc copes with non-contiguities.
895 Setting it false when definitely non-contiguous saves time
896 and possibly wasted space it would take to discover this though.
897 
898 MORECORE_CANNOT_TRIM      default: NOT defined
899 True if MORECORE cannot release space back to the system when given
900 negative arguments. This is generally necessary only if you are
901 using a hand-crafted MORECORE function that cannot handle negative
902 arguments.
903 
904 NO_SEGMENT_TRAVERSAL       default: 0
905 If non-zero, suppresses traversals of memory segments
906 returned by either MORECORE or CALL_MMAP. This disables
907 merging of segments that are contiguous, and selectively
908 releasing them to the OS if unused, but bounds execution times.
909 
910 HAVE_MMAP                 default: 1 (true)
911 True if this system supports mmap or an emulation of it.  If so, and
912 HAVE_MORECORE is not true, MMAP is used for all system
913 allocation. If set and HAVE_MORECORE is true as well, MMAP is
914 primarily used to directly allocate very large blocks. It is also
915 used as a backup strategy in cases where MORECORE fails to provide
916 space from system. Note: A single call to MUNMAP is assumed to be
917 able to unmap memory that may have be allocated using multiple calls
918 to MMAP, so long as they are adjacent.
919 
920 HAVE_MREMAP               default: 1 on linux, else 0
921 If true realloc() uses mremap() to re-allocate large blocks and
922 extend or shrink allocation spaces.
923 
924 MMAP_CLEARS               default: 1 except on WINCE.
925 True if mmap clears memory so calloc doesn't need to. This is true
926 for standard unix mmap using /dev/zero and on DL_PLATFORM_WIN32 except for WINCE.
927 
928 USE_BUILTIN_FFS            default: 0 (i.e., not used)
929 Causes malloc to use the builtin ffs() function to compute indices.
930 Some compilers may recognize and intrinsify ffs to be faster than the
931 supplied C version. Also, the case of x86 using gcc is special-cased
932 to an asm instruction, so is already as fast as it can be, and so
933 this setting has no effect. Similarly for Win32 under recent MS compilers.
934 (On most x86s, the asm version is only slightly faster than the C version.)
935 
936 malloc_getpagesize         default: derive from system includes, or 4096.
937 The system page size. To the extent possible, this malloc manages
938 memory from the system in page-size units.  This may be (and
939 usually is) a function rather than a constant. This is ignored
940 if DL_PLATFORM_WIN32, where page size is determined using getSystemInfo during
941 initialization.
942 
943 USE_DEV_RANDOM             default: 0 (i.e., not used)
944 Causes malloc to use /dev/random to initialize secure magic seed for
945 stamping footers. Otherwise, the current time is used.
946 
947 NO_MALLINFO                default: 0
948 If defined, don't compile "mallinfo". This can be a simple way
949 of dealing with mismatches between system declarations and
950 those in this file.
951 
952 MALLINFO_FIELD_TYPE        default: size_t
953 The type of the fields in the mallinfo struct. This was originally
954 defined as "int" in SVID etc, but is more usefully defined as
955 size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
956 
957 REALLOC_ZERO_BYTES_FREES    default: not defined
958 This should be set if a call to realloc with zero bytes should
959 be the same as a call to free. Some people think it should. Otherwise,
960 since this malloc returns a unique pointer for malloc(0), so does
961 realloc(p, 0).
962 
963 LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
964 LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
965 LACKS_STDLIB_H                default: NOT defined unless on DL_PLATFORM_WIN32
966 Define these if your system does not have these header files.
967 You might need to manually insert some of the declarations they provide.
968 
969 DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
970 system_info.dwAllocationGranularity in DL_PLATFORM_WIN32,
971 otherwise 64K.
972 Also settable using mallopt(M_GRANULARITY, x)
973 The unit for allocating and deallocating memory from the system.  On
974 most systems with contiguous MORECORE, there is no reason to
975 make this more than a page. However, systems with MMAP tend to
976 either require or encourage larger granularities.  You can increase
977 this value to prevent system allocation functions to be called so
978 often, especially if they are slow.  The value must be at least one
979 page and must be a power of two.  Setting to 0 causes initialization
980 to either page size or win32 region size.  (Note: In previous
981 versions of malloc, the equivalent of this option was called
982 "TOP_PAD")
983 
984 DEFAULT_TRIM_THRESHOLD    default: 2MB
985 Also settable using mallopt(M_TRIM_THRESHOLD, x)
986 The maximum amount of unused top-most memory to keep before
987 releasing via malloc_trim in free().  Automatic trimming is mainly
988 useful in long-lived programs using contiguous MORECORE.  Because
989 trimming via sbrk can be slow on some systems, and can sometimes be
990 wasteful (in cases where programs immediately afterward allocate
991 more large chunks) the value should be high enough so that your
992 overall system performance would improve by releasing this much
993 memory.  As a rough guide, you might set to a value close to the
994 average size of a process (program) running on your system.
995 Releasing this much memory would allow such a process to run in
996 memory.  Generally, it is worth tuning trim thresholds when a
997 program undergoes phases where several large chunks are allocated
998 and released in ways that can reuse each other's storage, perhaps
999 mixed with phases where there are no such chunks at all. The trim
1000 value must be greater than page size to have any useful effect.  To
1001 disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
1002 some people use of mallocing a huge space and then freeing it at
1003 program startup, in an attempt to reserve system memory, doesn't
1004 have the intended effect under automatic trimming, since that memory
1005 will immediately be returned to the system.
1006 
1007 DEFAULT_MMAP_THRESHOLD       default: 256K
1008 Also settable using mallopt(M_MMAP_THRESHOLD, x)
1009 The request size threshold for using MMAP to directly service a
1010 request. Requests of at least this size that cannot be allocated
1011 using already-existing space will be serviced via mmap.  (If enough
1012 normal freed space already exists it is used instead.)  Using mmap
1013 segregates relatively large chunks of memory so that they can be
1014 individually obtained and released from the host system. A request
1015 serviced through mmap is never reused by any other request (at least
1016 not directly; the system may just so happen to remap successive
1017 requests to the same locations).  Segregating space in this way has
1018 the benefits that: Mmapped space can always be individually released
1019 back to the system, which helps keep the system level memory demands
1020 of a long-lived program low.  Also, mapped memory doesn't become
1021 `locked' between other chunks, as can happen with normally allocated
1022 chunks, which means that even trimming via malloc_trim would not
1023 release them.  However, it has the disadvantage that the space
1024 cannot be reclaimed, consolidated, and then used to service later
1025 requests, as happens with normal chunks.  The advantages of mmap
1026 nearly always outweigh disadvantages for "large" chunks, but the
1027 value of "large" may vary across systems.  The default is an
1028 empirically derived value that works well in most systems. You can
1029 disable mmap by setting to MAX_SIZE_T.
1030 
1031 MAX_RELEASE_CHECK_RATE   default: 4095 unless not HAVE_MMAP
1032 The number of consolidated frees between checks to release
1033 unused segments when freeing. When using non-contiguous segments,
1034 especially with multiple mspaces, checking only for topmost space
1035 doesn't always suffice to trigger trimming. To compensate for this,
1036 free() will, with a period of MAX_RELEASE_CHECK_RATE (or the
1037 current number of segments, if greater) try to release unused
1038 segments to the OS when freeing chunks that result in
1039 consolidation. The best value for this parameter is a compromise
1040 between slowing down frees with relatively costly checks that
1041 rarely trigger versus holding on to unused memory. To effectively
1042 disable, set to MAX_SIZE_T. This may lead to a very slight speed
1043 improvement at the expense of carrying around more memory.
1044 */
1045 
1046 /* Version identifier to allow people to support multiple versions */
1047 #ifndef DLMALLOC_VERSION
1048 #define DLMALLOC_VERSION 20804
1049 #endif /* DLMALLOC_VERSION */
1050 
1051 #include "rdlmalloc-options.h"
1052 
1053 #ifndef WIN32
1054 #if defined(_XBOX) || defined(X360)
1055 #else
1056 #if defined(_WIN32)
1057 #define DL_PLATFORM_WIN32 1
1058 #endif  /* _WIN32 */
1059 #ifdef _WIN32_WCE
1060 #define LACKS_FCNTL_H
1061 #define DL_PLATFORM_WIN32 1
1062 #endif /* _WIN32_WCE */
1063 #endif
1064 #else
1065 #define DL_PLATFORM_WIN32 1
1066 #endif  /* DL_PLATFORM_WIN32 */
1067 
1068 #if defined(_XBOX) || defined(X360)
1069 #define HAVE_MMAP 1
1070 #define HAVE_MORECORE 0
1071 #define LACKS_UNISTD_H
1072 #define LACKS_SYS_PARAM_H
1073 #define LACKS_SYS_MMAN_H
1074 #define LACKS_STRING_H
1075 #define LACKS_STRINGS_H
1076 #define LACKS_SYS_TYPES_H
1077 #define LACKS_ERRNO_H
1078 #ifndef MALLOC_FAILURE_ACTION
1079 #define MALLOC_FAILURE_ACTION
1080 #endif
1081 #define MMAP_CLEARS 1
1082 #endif
1083 
1084 #if defined(_PS3) || defined(__PS3__) || defined(SN_TARGET_PS3)
1085 #define LACKS_SYS_PARAM_H
1086 #include "sysutil\sysutil_sysparam.h"
1087 #define LACKS_SYS_MMAN_H
1088 #endif
1089 
1090 
1091 #ifdef DL_PLATFORM_WIN32
1092 #define WIN32_LEAN_AND_MEAN
1093 #include <windows.h>
1094 #define HAVE_MMAP 1
1095 #define HAVE_MORECORE 0
1096 #define LACKS_UNISTD_H
1097 #define LACKS_SYS_PARAM_H
1098 #define LACKS_SYS_MMAN_H
1099 #define LACKS_STRING_H
1100 #define LACKS_STRINGS_H
1101 #define LACKS_SYS_TYPES_H
1102 #define LACKS_ERRNO_H
1103 #ifndef MALLOC_FAILURE_ACTION
1104 #define MALLOC_FAILURE_ACTION
1105 #endif /* MALLOC_FAILURE_ACTION */
1106 #ifdef _WIN32_WCE /* WINCE reportedly does not clear */
1107 #define MMAP_CLEARS 0
1108 #else
1109 #define MMAP_CLEARS 1
1110 #endif /* _WIN32_WCE */
1111 #endif  /* DL_PLATFORM_WIN32 */
1112 
1113 #if defined(DARWIN) || defined(_DARWIN)
1114 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
1115 #ifndef HAVE_MORECORE
1116 #define HAVE_MORECORE 0
1117 #define HAVE_MMAP 1
1118 /* OSX allocators provide 16 byte alignment */
1119 #ifndef MALLOC_ALIGNMENT
1120 #define MALLOC_ALIGNMENT ((size_t)16U)
1121 #endif
1122 #endif  /* HAVE_MORECORE */
1123 #endif  /* DARWIN */
1124 
1125 #ifndef LACKS_SYS_TYPES_H
1126 #include <sys/types.h>  /* For size_t */
1127 #endif  /* LACKS_SYS_TYPES_H */
1128 
1129 #if (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310)
1130 #define SPIN_LOCKS_AVAILABLE 1
1131 #else
1132 #define SPIN_LOCKS_AVAILABLE 0
1133 #endif
1134 
1135 /* The maximum possible size_t value has all bits set */
1136 #define MAX_SIZE_T           (~(size_t)0)
1137 
1138 #ifndef ONLY_MSPACES
1139 #define ONLY_MSPACES 0     /* define to a value */
1140 #else
1141 #define ONLY_MSPACES 1
1142 #endif  /* ONLY_MSPACES */
1143 #ifndef MSPACES
1144 #if ONLY_MSPACES
1145 #define MSPACES 1
1146 #else   /* ONLY_MSPACES */
1147 #define MSPACES 0
1148 #endif  /* ONLY_MSPACES */
1149 #endif  /* MSPACES */
1150 #ifndef MALLOC_ALIGNMENT
1151 #define MALLOC_ALIGNMENT ((size_t)8U)
1152 #endif  /* MALLOC_ALIGNMENT */
1153 #ifndef FOOTERS
1154 #define FOOTERS 0
1155 #endif  /* FOOTERS */
1156 #ifndef ABORT
1157 #define ABORT  abort()
1158 #endif  /* ABORT */
1159 #ifndef ABORT_ON_ASSERT_FAILURE
1160 #define ABORT_ON_ASSERT_FAILURE 1
1161 #endif  /* ABORT_ON_ASSERT_FAILURE */
1162 #ifndef PROCEED_ON_ERROR
1163 #define PROCEED_ON_ERROR 0
1164 #endif  /* PROCEED_ON_ERROR */
1165 #ifndef USE_LOCKS
1166 #define USE_LOCKS 0
1167 #endif  /* USE_LOCKS */
1168 #ifndef USE_SPIN_LOCKS
1169 #if USE_LOCKS && SPIN_LOCKS_AVAILABLE
1170 #define USE_SPIN_LOCKS 1
1171 #else
1172 #define USE_SPIN_LOCKS 0
1173 #endif /* USE_LOCKS && SPIN_LOCKS_AVAILABLE. */
1174 #endif /* USE_SPIN_LOCKS */
1175 #ifndef INSECURE
1176 #define INSECURE 0
1177 #endif  /* INSECURE */
1178 #ifndef HAVE_MMAP
1179 #define HAVE_MMAP 1
1180 #endif  /* HAVE_MMAP */
1181 #ifndef MMAP_CLEARS
1182 #define MMAP_CLEARS 1
1183 #endif  /* MMAP_CLEARS */
1184 #ifndef HAVE_MREMAP
1185 #ifdef linux
1186 #define HAVE_MREMAP 1
1187 #else   /* linux */
1188 #define HAVE_MREMAP 0
1189 #endif  /* linux */
1190 #endif  /* HAVE_MREMAP */
1191 #ifndef MALLOC_FAILURE_ACTION
1192 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
1193 #endif  /* MALLOC_FAILURE_ACTION */
1194 #ifndef HAVE_MORECORE
1195 #if ONLY_MSPACES
1196 #define HAVE_MORECORE 0
1197 #else   /* ONLY_MSPACES */
1198 #define HAVE_MORECORE 1
1199 #endif  /* ONLY_MSPACES */
1200 #endif  /* HAVE_MORECORE */
1201 #if !HAVE_MORECORE
1202 #define MORECORE_CONTIGUOUS 0
1203 #else   /* !HAVE_MORECORE */
1204 #define MORECORE_DEFAULT sbrk
1205 #ifndef MORECORE_CONTIGUOUS
1206 #define MORECORE_CONTIGUOUS 1
1207 #endif  /* MORECORE_CONTIGUOUS */
1208 #endif  /* HAVE_MORECORE */
1209 #ifndef DEFAULT_GRANULARITY
1210 #if (MORECORE_CONTIGUOUS || defined(DL_PLATFORM_WIN32))
1211 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
1212 #else   /* MORECORE_CONTIGUOUS */
1213 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
1214 #endif  /* MORECORE_CONTIGUOUS */
1215 #endif  /* DEFAULT_GRANULARITY */
1216 #ifndef DEFAULT_TRIM_THRESHOLD
1217 #ifndef MORECORE_CANNOT_TRIM
1218 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
1219 #else   /* MORECORE_CANNOT_TRIM */
1220 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
1221 #endif  /* MORECORE_CANNOT_TRIM */
1222 #endif  /* DEFAULT_TRIM_THRESHOLD */
1223 #ifndef DEFAULT_MMAP_THRESHOLD
1224 #if HAVE_MMAP
1225 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
1226 #else   /* HAVE_MMAP */
1227 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
1228 #endif  /* HAVE_MMAP */
1229 #endif  /* DEFAULT_MMAP_THRESHOLD */
1230 #ifndef MAX_RELEASE_CHECK_RATE
1231 #if HAVE_MMAP
1232 #define MAX_RELEASE_CHECK_RATE 4095
1233 #else
1234 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
1235 #endif /* HAVE_MMAP */
1236 #endif /* MAX_RELEASE_CHECK_RATE */
1237 #ifndef USE_BUILTIN_FFS
1238 #define USE_BUILTIN_FFS 0
1239 #endif  /* USE_BUILTIN_FFS */
1240 #ifndef USE_DEV_RANDOM
1241 #define USE_DEV_RANDOM 0
1242 #endif  /* USE_DEV_RANDOM */
1243 #ifndef NO_MALLINFO
1244 #define NO_MALLINFO 0
1245 #endif  /* NO_MALLINFO */
1246 #ifndef MALLINFO_FIELD_TYPE
1247 #define MALLINFO_FIELD_TYPE size_t
1248 #endif  /* MALLINFO_FIELD_TYPE */
1249 #ifndef NO_SEGMENT_TRAVERSAL
1250 #define NO_SEGMENT_TRAVERSAL 0
1251 #endif /* NO_SEGMENT_TRAVERSAL */
1252 
1253 /*
1254 mallopt tuning options.  SVID/XPG defines four standard parameter
1255 numbers for mallopt, normally defined in malloc.h.  None of these
1256 are used in this malloc, so setting them has no effect. But this
1257 malloc does support the following options.
1258 */
1259 
1260 #define M_TRIM_THRESHOLD     (-1)
1261 #define M_GRANULARITY        (-2)
1262 #define M_MMAP_THRESHOLD     (-3)
1263 
1264 /* ------------------------ Mallinfo declarations ------------------------ */
1265 
1266 #if !NO_MALLINFO
1267 /*
1268 This version of malloc supports the standard SVID/XPG mallinfo
1269 routine that returns a struct containing usage properties and
1270 statistics. It should work on any system that has a
1271 /usr/include/malloc.h defining struct mallinfo.  The main
1272 declaration needed is the mallinfo struct that is returned (by-copy)
1273 by mallinfo().  The malloinfo struct contains a bunch of fields that
1274 are not even meaningful in this version of malloc.  These fields are
1275 are instead filled by mallinfo() with other numbers that might be of
1276 interest.
1277 
1278 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
1279 /usr/include/malloc.h file that includes a declaration of struct
1280 mallinfo.  If so, it is included; else a compliant version is
1281 declared below.  These must be precisely the same for mallinfo() to
1282 work.  The original SVID version of this struct, defined on most
1283 systems with mallinfo, declares all fields as ints. But some others
1284 define as unsigned long. If your system defines the fields using a
1285 type of different width than listed here, you MUST #include your
1286 system version and #define HAVE_USR_INCLUDE_MALLOC_H.
1287 */
1288 
1289 /* #define HAVE_USR_INCLUDE_MALLOC_H */
1290 
1291 #ifdef HAVE_USR_INCLUDE_MALLOC_H
1292 #include "/usr/include/malloc.h"
1293 #else /* HAVE_USR_INCLUDE_MALLOC_H */
1294 #ifndef STRUCT_MALLINFO_DECLARED
1295 #define STRUCT_MALLINFO_DECLARED 1
1296 struct mallinfo {
1297 	MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
1298 	MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
1299 	MALLINFO_FIELD_TYPE smblks;   /* always 0 */
1300 	MALLINFO_FIELD_TYPE hblks;    /* always 0 */
1301 	MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
1302 	MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
1303 	MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
1304 	MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
1305 	MALLINFO_FIELD_TYPE fordblks; /* total free space */
1306 	MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
1307 };
1308 #endif /* STRUCT_MALLINFO_DECLARED */
1309 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
1310 #endif /* NO_MALLINFO */
1311 
1312 /*
1313 Try to persuade compilers to inline. The most critical functions for
1314 inlining are defined as macros, so these aren't used for them.
1315 */
1316 
1317 #ifndef FORCEINLINE
1318 #if defined(__GNUC__)
1319 #define FORCEINLINE __inline __attribute__ ((always_inline))
1320 #elif defined(_MSC_VER)
1321 #define FORCEINLINE __forceinline
1322 #endif
1323 #endif
1324 #ifndef NOINLINE
1325 #if defined(__GNUC__)
1326 #define NOINLINE __attribute__ ((noinline))
1327 #elif defined(_MSC_VER)
1328 #define NOINLINE __declspec(noinline)
1329 #else
1330 #define NOINLINE
1331 #endif
1332 #endif
1333 
1334 #ifdef __cplusplus
1335 extern "C" {
1336 #ifndef FORCEINLINE
1337 #define FORCEINLINE inline
1338 #endif
1339 #endif /* __cplusplus */
1340 #ifndef FORCEINLINE
1341 #define FORCEINLINE
1342 #endif
1343 
1344 #if !ONLY_MSPACES
1345 
1346 	/* ------------------- Declarations of public routines ------------------- */
1347 
1348 #ifndef USE_DL_PREFIX
1349 #define rdlcalloc               calloc
1350 #define rdlfree                 free
1351 #define rdlmalloc               malloc
1352 #define rdlmemalign             memalign
1353 #define rdlrealloc              realloc
1354 #define rdlvalloc               valloc
1355 #define rdlpvalloc              pvalloc
1356 #define rdlmallinfo             mallinfo
1357 #define rdlmallopt              mallopt
1358 #define rdlmalloc_trim          malloc_trim
1359 #define rdlmalloc_stats         malloc_stats
1360 #define rdlmalloc_usable_size   malloc_usable_size
1361 #define rdlmalloc_footprint     malloc_footprint
1362 #define dlmalloc_max_footprint malloc_max_footprint
1363 #define rdlindependent_calloc   independent_calloc
1364 #define rdlindependent_comalloc independent_comalloc
1365 #endif /* USE_DL_PREFIX */
1366 
1367 
1368 	/*
1369 	malloc(size_t n)
1370 	Returns a pointer to a newly allocated chunk of at least n bytes, or
1371 	null if no space is available, in which case errno is set to ENOMEM
1372 	on ANSI C systems.
1373 
1374 	If n is zero, malloc returns a minimum-sized chunk. (The minimum
1375 	size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
1376 	systems.)  Note that size_t is an unsigned type, so calls with
1377 	arguments that would be negative if signed are interpreted as
1378 	requests for huge amounts of space, which will often fail. The
1379 	maximum supported value of n differs across systems, but is in all
1380 	cases less than the maximum representable value of a size_t.
1381 	*/
1382 	void* rdlmalloc(size_t);
1383 
1384 	/*
1385 	free(void* p)
1386 	Releases the chunk of memory pointed to by p, that had been previously
1387 	allocated using malloc or a related routine such as realloc.
1388 	It has no effect if p is null. If p was not malloced or already
1389 	freed, free(p) will by default cause the current program to abort.
1390 	*/
1391 	void  rdlfree(void*);
1392 
1393 	/*
1394 	calloc(size_t n_elements, size_t element_size);
1395 	Returns a pointer to n_elements * element_size bytes, with all locations
1396 	set to zero.
1397 	*/
1398 	void* rdlcalloc(size_t, size_t);
1399 
1400 	/*
1401 	realloc(void* p, size_t n)
1402 	Returns a pointer to a chunk of size n that contains the same data
1403 	as does chunk p up to the minimum of (n, p's size) bytes, or null
1404 	if no space is available.
1405 
1406 	The returned pointer may or may not be the same as p. The algorithm
1407 	prefers extending p in most cases when possible, otherwise it
1408 	employs the equivalent of a malloc-copy-free sequence.
1409 
1410 	If p is null, realloc is equivalent to malloc.
1411 
1412 	If space is not available, realloc returns null, errno is set (if on
1413 	ANSI) and p is NOT freed.
1414 
1415 	if n is for fewer bytes than already held by p, the newly unused
1416 	space is lopped off and freed if possible.  realloc with a size
1417 	argument of zero (re)allocates a minimum-sized chunk.
1418 
1419 	The old unix realloc convention of allowing the last-free'd chunk
1420 	to be used as an argument to realloc is not supported.
1421 	*/
1422 
1423 	void* rdlrealloc(void*, size_t);
1424 
1425 	/*
1426 	memalign(size_t alignment, size_t n);
1427 	Returns a pointer to a newly allocated chunk of n bytes, aligned
1428 	in accord with the alignment argument.
1429 
1430 	The alignment argument should be a power of two. If the argument is
1431 	not a power of two, the nearest greater power is used.
1432 	8-byte alignment is guaranteed by normal malloc calls, so don't
1433 	bother calling memalign with an argument of 8 or less.
1434 
1435 	Overreliance on memalign is a sure way to fragment space.
1436 	*/
1437 	void* rdlmemalign(size_t, size_t);
1438 
1439 	/*
1440 	valloc(size_t n);
1441 	Equivalent to memalign(pagesize, n), where pagesize is the page
1442 	size of the system. If the pagesize is unknown, 4096 is used.
1443 	*/
1444 	void* rdlvalloc(size_t);
1445 
1446 	/*
1447 	mallopt(int parameter_number, int parameter_value)
1448 	Sets tunable parameters The format is to provide a
1449 	(parameter-number, parameter-value) pair.  mallopt then sets the
1450 	corresponding parameter to the argument value if it can (i.e., so
1451 	long as the value is meaningful), and returns 1 if successful else
1452 	0.  To workaround the fact that mallopt is specified to use int,
1453 	not size_t parameters, the value -1 is specially treated as the
1454 	maximum unsigned size_t value.
1455 
1456 	SVID/XPG/ANSI defines four standard param numbers for mallopt,
1457 	normally defined in malloc.h.  None of these are use in this malloc,
1458 	so setting them has no effect. But this malloc also supports other
1459 	options in mallopt. See below for details.  Briefly, supported
1460 	parameters are as follows (listed defaults are for "typical"
1461 	configurations).
1462 
1463 	Symbol            param #  default    allowed param values
1464 	M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1 disables)
1465 	M_GRANULARITY        -2     page size   any power of 2 >= page size
1466 	M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
1467 	*/
1468 	int rdlmallopt(int, int);
1469 
1470 	/*
1471 	malloc_footprint();
1472 	Returns the number of bytes obtained from the system.  The total
1473 	number of bytes allocated by malloc, realloc etc., is less than this
1474 	value. Unlike mallinfo, this function returns only a precomputed
1475 	result, so can be called frequently to monitor memory consumption.
1476 	Even if locks are otherwise defined, this function does not use them,
1477 	so results might not be up to date.
1478 	*/
1479 	size_t rdlmalloc_footprint(void);
1480 
1481 	/*
1482 	malloc_max_footprint();
1483 	Returns the maximum number of bytes obtained from the system. This
1484 	value will be greater than current footprint if deallocated space
1485 	has been reclaimed by the system. The peak number of bytes allocated
1486 	by malloc, realloc etc., is less than this value. Unlike mallinfo,
1487 	this function returns only a precomputed result, so can be called
1488 	frequently to monitor memory consumption.  Even if locks are
1489 	otherwise defined, this function does not use them, so results might
1490 	not be up to date.
1491 	*/
1492 	size_t dlmalloc_max_footprint(void);
1493 
1494 #if !NO_MALLINFO
1495 	/*
1496 	mallinfo()
1497 	Returns (by copy) a struct containing various summary statistics:
1498 
1499 	arena:     current total non-mmapped bytes allocated from system
1500 	ordblks:   the number of free chunks
1501 	smblks:    always zero.
1502 	hblks:     current number of mmapped regions
1503 	hblkhd:    total bytes held in mmapped regions
1504 	usmblks:   the maximum total allocated space. This will be greater
1505 	than current total if trimming has occurred.
1506 	fsmblks:   always zero
1507 	uordblks:  current total allocated space (normal or mmapped)
1508 	fordblks:  total free space
1509 	keepcost:  the maximum number of bytes that could ideally be released
1510 	back to system via malloc_trim. ("ideally" means that
1511 	it ignores page restrictions etc.)
1512 
1513 	Because these fields are ints, but internal bookkeeping may
1514 	be kept as longs, the reported values may wrap around zero and
1515 	thus be inaccurate.
1516 	*/
1517 	struct mallinfo rdlmallinfo(void);
1518 #endif /* NO_MALLINFO */
1519 
1520 	/*
1521 	independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
1522 
1523 	independent_calloc is similar to calloc, but instead of returning a
1524 	single cleared space, it returns an array of pointers to n_elements
1525 	independent elements that can hold contents of size elem_size, each
1526 	of which starts out cleared, and can be independently freed,
1527 	realloc'ed etc. The elements are guaranteed to be adjacently
1528 	allocated (this is not guaranteed to occur with multiple callocs or
1529 	mallocs), which may also improve cache locality in some
1530 	applications.
1531 
1532 	The "chunks" argument is optional (i.e., may be null, which is
1533 	probably the most typical usage). If it is null, the returned array
1534 	is itself dynamically allocated and should also be freed when it is
1535 	no longer needed. Otherwise, the chunks array must be of at least
1536 	n_elements in length. It is filled in with the pointers to the
1537 	chunks.
1538 
1539 	In either case, independent_calloc returns this pointer array, or
1540 	null if the allocation failed.  If n_elements is zero and "chunks"
1541 	is null, it returns a chunk representing an array with zero elements
1542 	(which should be freed if not wanted).
1543 
1544 	Each element must be individually freed when it is no longer
1545 	needed. If you'd like to instead be able to free all at once, you
1546 	should instead use regular calloc and assign pointers into this
1547 	space to represent elements.  (In this case though, you cannot
1548 	independently free elements.)
1549 
1550 	independent_calloc simplifies and speeds up implementations of many
1551 	kinds of pools.  It may also be useful when constructing large data
1552 	structures that initially have a fixed number of fixed-sized nodes,
1553 	but the number is not known at compile time, and some of the nodes
1554 	may later need to be freed. For example:
1555 
1556 	struct Node { int item; struct Node* next; };
1557 
1558 	struct Node* build_list() {
1559 	struct Node** pool;
1560 	int n = read_number_of_nodes_needed();
1561 	if (n <= 0) return 0;
1562 	pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1563 	if (pool == 0) die();
1564 	// organize into a linked list...
1565 	struct Node* first = pool[0];
1566 	for (i = 0; i < n-1; ++i)
1567 	pool[i]->next = pool[i+1];
1568 	free(pool);     // Can now free the array (or not, if it is needed later)
1569 	return first;
1570 	}
1571 	*/
1572 	void** rdlindependent_calloc(size_t, size_t, void**);
1573 
1574 	/*
1575 	independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
1576 
1577 	independent_comalloc allocates, all at once, a set of n_elements
1578 	chunks with sizes indicated in the "sizes" array.    It returns
1579 	an array of pointers to these elements, each of which can be
1580 	independently freed, realloc'ed etc. The elements are guaranteed to
1581 	be adjacently allocated (this is not guaranteed to occur with
1582 	multiple callocs or mallocs), which may also improve cache locality
1583 	in some applications.
1584 
1585 	The "chunks" argument is optional (i.e., may be null). If it is null
1586 	the returned array is itself dynamically allocated and should also
1587 	be freed when it is no longer needed. Otherwise, the chunks array
1588 	must be of at least n_elements in length. It is filled in with the
1589 	pointers to the chunks.
1590 
1591 	In either case, independent_comalloc returns this pointer array, or
1592 	null if the allocation failed.  If n_elements is zero and chunks is
1593 	null, it returns a chunk representing an array with zero elements
1594 	(which should be freed if not wanted).
1595 
1596 	Each element must be individually freed when it is no longer
1597 	needed. If you'd like to instead be able to free all at once, you
1598 	should instead use a single regular malloc, and assign pointers at
1599 	particular offsets in the aggregate space. (In this case though, you
1600 	cannot independently free elements.)
1601 
1602 	independent_comallac differs from independent_calloc in that each
1603 	element may have a different size, and also that it does not
1604 	automatically clear elements.
1605 
1606 	independent_comalloc can be used to speed up allocation in cases
1607 	where several structs or objects must always be allocated at the
1608 	same time.  For example:
1609 
1610 	struct Head { ... }
1611 	struct Foot { ... }
1612 
1613 	void send_message(char* msg) {
1614 	int msglen = strlen(msg);
1615 	size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1616 	void* chunks[3];
1617 	if (independent_comalloc(3, sizes, chunks) == 0)
1618 	die();
1619 	struct Head* head = (struct Head*)(chunks[0]);
1620 	char*        body = (char*)(chunks[1]);
1621 	struct Foot* foot = (struct Foot*)(chunks[2]);
1622 	// ...
1623 	}
1624 
1625 	In general though, independent_comalloc is worth using only for
1626 	larger values of n_elements. For small values, you probably won't
1627 	detect enough difference from series of malloc calls to bother.
1628 
1629 	Overuse of independent_comalloc can increase overall memory usage,
1630 	since it cannot reuse existing noncontiguous small chunks that
1631 	might be available for some of the elements.
1632 	*/
1633 	void** rdlindependent_comalloc(size_t, size_t*, void**);
1634 
1635 
1636 	/*
1637 	pvalloc(size_t n);
1638 	Equivalent to valloc(minimum-page-that-holds(n)), that is,
1639 	round up n to nearest pagesize.
1640 	*/
1641 	void*  rdlpvalloc(size_t);
1642 
1643 	/*
1644 	malloc_trim(size_t pad);
1645 
1646 	If possible, gives memory back to the system (via negative arguments
1647 	to sbrk) if there is unused memory at the `high' end of the malloc
1648 	pool or in unused MMAP segments. You can call this after freeing
1649 	large blocks of memory to potentially reduce the system-level memory
1650 	requirements of a program. However, it cannot guarantee to reduce
1651 	memory. Under some allocation patterns, some large free blocks of
1652 	memory will be locked between two used chunks, so they cannot be
1653 	given back to the system.
1654 
1655 	The `pad' argument to malloc_trim represents the amount of free
1656 	trailing space to leave untrimmed. If this argument is zero, only
1657 	the minimum amount of memory to maintain internal data structures
1658 	will be left. Non-zero arguments can be supplied to maintain enough
1659 	trailing space to service future expected allocations without having
1660 	to re-obtain memory from the system.
1661 
1662 	Malloc_trim returns 1 if it actually released any memory, else 0.
1663 	*/
1664 	int  rdlmalloc_trim(size_t);
1665 
1666 	/*
1667 	malloc_stats();
1668 	Prints on stderr the amount of space obtained from the system (both
1669 	via sbrk and mmap), the maximum amount (which may be more than
1670 	current if malloc_trim and/or munmap got called), and the current
1671 	number of bytes allocated via malloc (or realloc, etc) but not yet
1672 	freed. Note that this is the number of bytes allocated, not the
1673 	number requested. It will be larger than the number requested
1674 	because of alignment and bookkeeping overhead. Because it includes
1675 	alignment wastage as being in use, this figure may be greater than
1676 	zero even when no user-level chunks are allocated.
1677 
1678 	The reported current and maximum system memory can be inaccurate if
1679 	a program makes other calls to system memory allocation functions
1680 	(normally sbrk) outside of malloc.
1681 
1682 	malloc_stats prints only the most commonly interesting statistics.
1683 	More information can be obtained by calling mallinfo.
1684 	*/
1685 	void  rdlmalloc_stats(void);
1686 
1687 #endif /* ONLY_MSPACES */
1688 
1689 	/*
1690 	malloc_usable_size(void* p);
1691 
1692 	Returns the number of bytes you can actually use in
1693 	an allocated chunk, which may be more than you requested (although
1694 	often not) due to alignment and minimum size constraints.
1695 	You can use this many bytes without worrying about
1696 	overwriting other allocated objects. This is not a particularly great
1697 	programming practice. malloc_usable_size can be more useful in
1698 	debugging and assertions, for example:
1699 
1700 	p = malloc(n);
1701 	assert(malloc_usable_size(p) >= 256);
1702 	*/
1703 	size_t rdlmalloc_usable_size(void*);
1704 
1705 
1706 #if MSPACES
1707 
1708 	/*
1709 	mspace is an opaque type representing an independent
1710 	region of space that supports rak_mspace_malloc, etc.
1711 	*/
1712 	typedef void* mspace;
1713 
1714 	/*
1715 	rak_create_mspace creates and returns a new independent space with the
1716 	given initial capacity, or, if 0, the default granularity size.  It
1717 	returns null if there is no system memory available to create the
1718 	space.  If argument locked is non-zero, the space uses a separate
1719 	lock to control access. The capacity of the space will grow
1720 	dynamically as needed to service rak_mspace_malloc requests.  You can
1721 	control the sizes of incremental increases of this space by
1722 	compiling with a different DEFAULT_GRANULARITY or dynamically
1723 	setting with mallopt(M_GRANULARITY, value).
1724 	*/
1725 	mspace rak_create_mspace(size_t capacity, int locked);
1726 
1727 	/*
1728 	rak_destroy_mspace destroys the given space, and attempts to return all
1729 	of its memory back to the system, returning the total number of
1730 	bytes freed. After destruction, the results of access to all memory
1731 	used by the space become undefined.
1732 	*/
1733 	size_t rak_destroy_mspace(mspace msp);
1734 
1735 	/*
1736 	rak_create_mspace_with_base uses the memory supplied as the initial base
1737 	of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1738 	space is used for bookkeeping, so the capacity must be at least this
1739 	large. (Otherwise 0 is returned.) When this initial space is
1740 	exhausted, additional memory will be obtained from the system.
1741 	Destroying this space will deallocate all additionally allocated
1742 	space (if possible) but not the initial base.
1743 	*/
1744 	mspace rak_create_mspace_with_base(void* base, size_t capacity, int locked);
1745 
1746 	/*
1747 	rak_mspace_track_large_chunks controls whether requests for large chunks
1748 	are allocated in their own untracked mmapped regions, separate from
1749 	others in this mspace. By default large chunks are not tracked,
1750 	which reduces fragmentation. However, such chunks are not
1751 	necessarily released to the system upon rak_destroy_mspace.  Enabling
1752 	tracking by setting to true may increase fragmentation, but avoids
1753 	leakage when relying on rak_destroy_mspace to release all memory
1754 	allocated using this space.  The function returns the previous
1755 	setting.
1756 	*/
1757 	int rak_mspace_track_large_chunks(mspace msp, int enable);
1758 
1759 
1760 	/*
1761 	rak_mspace_malloc behaves as malloc, but operates within
1762 	the given space.
1763 	*/
1764 	void* rak_mspace_malloc(mspace msp, size_t bytes);
1765 
1766 	/*
1767 	rak_mspace_free behaves as free, but operates within
1768 	the given space.
1769 
1770 	If compiled with FOOTERS==1, rak_mspace_free is not actually needed.
1771 	free may be called instead of rak_mspace_free because freed chunks from
1772 	any space are handled by their originating spaces.
1773 	*/
1774 	void rak_mspace_free(mspace msp, void* mem);
1775 
1776 	/*
1777 	rak_mspace_realloc behaves as realloc, but operates within
1778 	the given space.
1779 
1780 	If compiled with FOOTERS==1, rak_mspace_realloc is not actually
1781 	needed.  realloc may be called instead of rak_mspace_realloc because
1782 	realloced chunks from any space are handled by their originating
1783 	spaces.
1784 	*/
1785 	void* rak_mspace_realloc(mspace msp, void* mem, size_t newsize);
1786 
1787 	/*
1788 	rak_mspace_calloc behaves as calloc, but operates within
1789 	the given space.
1790 	*/
1791 	void* rak_mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1792 
1793 	/*
1794 	rak_mspace_memalign behaves as memalign, but operates within
1795 	the given space.
1796 	*/
1797 	void* rak_mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1798 
1799 	/*
1800 	rak_mspace_independent_calloc behaves as independent_calloc, but
1801 	operates within the given space.
1802 	*/
1803 	void** rak_mspace_independent_calloc(mspace msp, size_t n_elements,
1804 		size_t elem_size, void* chunks[]);
1805 
1806 	/*
1807 	rak_mspace_independent_comalloc behaves as independent_comalloc, but
1808 	operates within the given space.
1809 	*/
1810 	void** rak_mspace_independent_comalloc(mspace msp, size_t n_elements,
1811 		size_t sizes[], void* chunks[]);
1812 
1813 	/*
1814 	rak_mspace_footprint() returns the number of bytes obtained from the
1815 	system for this space.
1816 	*/
1817 	size_t rak_mspace_footprint(mspace msp);
1818 
1819 	/*
1820 	mspace_max_footprint() returns the peak number of bytes obtained from the
1821 	system for this space.
1822 	*/
1823 	size_t mspace_max_footprint(mspace msp);
1824 
1825 
1826 #if !NO_MALLINFO
1827 	/*
1828 	rak_mspace_mallinfo behaves as mallinfo, but reports properties of
1829 	the given space.
1830 	*/
1831 	struct mallinfo rak_mspace_mallinfo(mspace msp);
1832 #endif /* NO_MALLINFO */
1833 
1834 	/*
1835 	malloc_usable_size(void* p) behaves the same as malloc_usable_size;
1836 	*/
1837 	size_t rak_mspace_usable_size(void* mem);
1838 
1839 	/*
1840 	rak_mspace_malloc_stats behaves as malloc_stats, but reports
1841 	properties of the given space.
1842 	*/
1843 	void rak_mspace_malloc_stats(mspace msp);
1844 
1845 	/*
1846 	rak_mspace_trim behaves as malloc_trim, but
1847 	operates within the given space.
1848 	*/
1849 	int rak_mspace_trim(mspace msp, size_t pad);
1850 
1851 	/*
1852 	An alias for mallopt.
1853 	*/
1854 	int rak_mspace_mallopt(int, int);
1855 
1856 #endif /* MSPACES */
1857 
1858 #ifdef __cplusplus
1859 };  /* end of extern "C" */
1860 #endif /* __cplusplus */
1861 
1862 /*
1863 ========================================================================
1864 To make a fully customizable malloc.h header file, cut everything
1865 above this line, put into file malloc.h, edit to suit, and #include it
1866 on the next line, as well as in programs that use this malloc.
1867 ========================================================================
1868 */
1869 
1870 /* #include "malloc.h" */
1871 
1872 /*------------------------------ internal #includes ---------------------- */
1873 
1874 #ifdef DL_PLATFORM_WIN32
1875 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1876 #endif /* DL_PLATFORM_WIN32 */
1877 
1878 #include <stdio.h>       /* for printing in malloc_stats */
1879 
1880 #ifndef LACKS_ERRNO_H
1881 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
1882 #endif /* LACKS_ERRNO_H */
1883 
1884 #if FOOTERS || DEBUG
1885 #include <time.h>        /* for magic initialization */
1886 #endif /* FOOTERS */
1887 
1888 #ifndef LACKS_STDLIB_H
1889 #include <stdlib.h>      /* for abort() */
1890 #endif /* LACKS_STDLIB_H */
1891 
1892 #ifdef DEBUG
1893 #if ABORT_ON_ASSERT_FAILURE
1894 #undef assert
1895 #define assert(x) if(!(x)) ABORT
1896 #else /* ABORT_ON_ASSERT_FAILURE */
1897 #include <assert.h>
1898 #endif /* ABORT_ON_ASSERT_FAILURE */
1899 #else  /* DEBUG */
1900 #ifndef assert
1901 #define assert(x)
1902 #endif
1903 #define DEBUG 0
1904 #endif /* DEBUG */
1905 
1906 #ifndef LACKS_STRING_H
1907 #include <string.h>      /* for memset etc */
1908 #endif  /* LACKS_STRING_H */
1909 
1910 #if USE_BUILTIN_FFS
1911 #ifndef LACKS_STRINGS_H
1912 #include <strings.h>     /* for ffs */
1913 #endif /* LACKS_STRINGS_H */
1914 #endif /* USE_BUILTIN_FFS */
1915 
1916 #if HAVE_MMAP
1917 #ifndef LACKS_SYS_MMAN_H
1918 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
1919 #if (defined(linux) && !defined(__USE_GNU))
1920 #define __USE_GNU 1
1921 #include <sys/mman.h>    /* for mmap */
1922 #undef __USE_GNU
1923 #else
1924 #include <sys/mman.h>    /* for mmap */
1925 #endif /* linux */
1926 #endif /* LACKS_SYS_MMAN_H */
1927 #ifndef LACKS_FCNTL_H
1928 #include <fcntl.h>
1929 #endif /* LACKS_FCNTL_H */
1930 #endif /* HAVE_MMAP */
1931 
1932 #ifndef LACKS_UNISTD_H
1933 #include <unistd.h>     /* for sbrk, sysconf */
1934 #else /* LACKS_UNISTD_H */
1935 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1936 extern void*     sbrk(ptrdiff_t);
1937 #endif /* FreeBSD etc */
1938 #endif /* LACKS_UNISTD_H */
1939 
1940 /* Declarations for locking */
1941 #if USE_LOCKS
1942 #if defined(_XBOX) || defined(X360)
1943 #pragma intrinsic (_InterlockedCompareExchange)
1944 #pragma intrinsic (_InterlockedExchange)
1945 #define interlockedcompareexchange _InterlockedCompareExchange
1946 #define interlockedexchange _InterlockedExchange
1947 #elif !defined(DL_PLATFORM_WIN32)
1948 #include <pthread.h>
1949 #if defined (__SVR4) && defined (__sun)  /* solaris */
1950 #include <thread.h>
1951 #endif /* solaris */
1952 #else
1953 #ifndef _M_AMD64
1954 /* These are already defined on AMD64 builds */
1955 #ifdef __cplusplus
1956 extern "C" {
1957 #endif /* __cplusplus */
1958 	LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
1959 	LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
1960 #ifdef __cplusplus
1961 }
1962 #endif /* __cplusplus */
1963 #endif /* _M_AMD64 */
1964 #pragma intrinsic (_InterlockedCompareExchange)
1965 #pragma intrinsic (_InterlockedExchange)
1966 #define interlockedcompareexchange _InterlockedCompareExchange
1967 #define interlockedexchange _InterlockedExchange
1968 #endif /* Win32 */
1969 #endif /* USE_LOCKS */
1970 
1971 /* Declarations for bit scanning on win32 */
1972 #if defined(_MSC_VER) && _MSC_VER>=1300 && defined(DL_PLATFORM_WIN32)
1973 #ifndef BitScanForward	/* Try to avoid pulling in WinNT.h */
1974 #ifdef __cplusplus
1975 extern "C" {
1976 #endif /* __cplusplus */
1977 	unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1978 	unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1979 #ifdef __cplusplus
1980 }
1981 #endif /* __cplusplus */
1982 
1983 #define BitScanForward _BitScanForward
1984 #define BitScanReverse _BitScanReverse
1985 #pragma intrinsic(_BitScanForward)
1986 #pragma intrinsic(_BitScanReverse)
1987 #endif /* BitScanForward */
1988 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
1989 
1990 #ifndef DL_PLATFORM_WIN32
1991 #ifndef malloc_getpagesize
1992 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
1993 #    ifndef _SC_PAGE_SIZE
1994 #      define _SC_PAGE_SIZE _SC_PAGESIZE
1995 #    endif
1996 #  endif
1997 #  ifdef _SC_PAGE_SIZE
1998 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1999 #  else
2000 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
2001 extern size_t getpagesize();
2002 #      define malloc_getpagesize getpagesize()
2003 #    else
2004 #      ifdef DL_PLATFORM_WIN32 /* use supplied emulation of getpagesize */
2005 #        define malloc_getpagesize getpagesize()
2006 #      else
2007 #        ifndef LACKS_SYS_PARAM_H
2008 #          include <sys/param.h>
2009 #        endif
2010 #        ifdef EXEC_PAGESIZE
2011 #          define malloc_getpagesize EXEC_PAGESIZE
2012 #        else
2013 #          ifdef NBPG
2014 #            ifndef CLSIZE
2015 #              define malloc_getpagesize NBPG
2016 #            else
2017 #              define malloc_getpagesize (NBPG * CLSIZE)
2018 #            endif
2019 #          else
2020 #            ifdef NBPC
2021 #              define malloc_getpagesize NBPC
2022 #            else
2023 #              ifdef PAGESIZE
2024 #                define malloc_getpagesize PAGESIZE
2025 #              else /* just guess */
2026 #                define malloc_getpagesize ((size_t)4096U)
2027 #              endif
2028 #            endif
2029 #          endif
2030 #        endif
2031 #      endif
2032 #    endif
2033 #  endif
2034 #endif
2035 #endif
2036 
2037 
2038 
2039 /* ------------------- size_t and alignment properties -------------------- */
2040 
2041 /* The byte and bit size of a size_t */
2042 #define SIZE_T_SIZE         (sizeof(size_t))
2043 #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
2044 
2045 /* Some constants coerced to size_t */
2046 /* Annoying but necessary to avoid errors on some platforms */
2047 #define SIZE_T_ZERO         ((size_t)0)
2048 #define SIZE_T_ONE          ((size_t)1)
2049 #define SIZE_T_TWO          ((size_t)2)
2050 #define SIZE_T_FOUR         ((size_t)4)
2051 #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
2052 #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
2053 #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
2054 #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
2055 
2056 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
2057 #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
2058 
2059 /* True if address a has acceptable alignment */
2060 #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
2061 
2062 /* the number of bytes to offset an address to align it */
2063 #define align_offset(A)\
2064 	((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
2065 	((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
2066 
2067 /* -------------------------- MMAP preliminaries ------------------------- */
2068 
2069 /*
2070 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
2071 checks to fail so compiler optimizer can delete code rather than
2072 using so many "#if"s.
2073 */
2074 
2075 
2076 /* MORECORE and MMAP must return MFAIL on failure */
2077 #define MFAIL                ((void*)(MAX_SIZE_T))
2078 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
2079 
2080 #if HAVE_MMAP
2081 
2082 #if defined(_XBOX) || defined(X360)
2083 	/* Win32 MMAP via VirtualAlloc */
win32mmap(size_t size)2084 	static void* win32mmap(size_t size) {
2085 		void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
2086 		return (ptr != 0)? ptr: MFAIL;
2087 	}
2088 
2089 	/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
win32direct_mmap(size_t size)2090 	static void* win32direct_mmap(size_t size) {
2091 		void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
2092 			PAGE_READWRITE);
2093 		return (ptr != 0)? ptr: MFAIL;
2094 	}
2095 
2096 	/* This function supports releasing coalesed segments */
win32munmap(void * ptr,size_t size)2097 	static int win32munmap(void* ptr, size_t size) {
2098 		MEMORY_BASIC_INFORMATION minfo;
2099 		char* cptr = (char*)ptr;
2100 		while (size) {
2101 			if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
2102 				return -1;
2103 			if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
2104 				minfo.State != MEM_COMMIT || minfo.RegionSize > size)
2105 				return -1;
2106 			if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
2107 				return -1;
2108 			cptr += minfo.RegionSize;
2109 			size -= minfo.RegionSize;
2110 		}
2111 		return 0;
2112 	}
2113 
2114 	#define RAK_MMAP_DEFAULT(s)             win32mmap(s)
2115 	#define RAK_MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
2116 	#define RAK_DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
2117 #elif defined(_PS3) || defined(__PS3__) || defined(SN_TARGET_PS3)
2118 
___freeit_dlmalloc_default__(void * s)2119 inline int ___freeit_dlmalloc_default__(void* s) {free(s); return 0;}
2120 #define RAK_MMAP_DEFAULT(s) malloc(s);
2121 #define RAK_MUNMAP_DEFAULT(a, s) ___freeit_dlmalloc_default__(a);
2122 #define RAK_DIRECT_MMAP_DEFAULT(s) malloc(s);
2123 
2124 #elif !defined(DL_PLATFORM_WIN32)
2125 	#define RAK_MUNMAP_DEFAULT(a, s)  munmap((a), (s))
2126 	#define MMAP_PROT            (PROT_READ|PROT_WRITE)
2127 	#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
2128 	#define MAP_ANONYMOUS        MAP_ANON
2129 	#endif /* MAP_ANON */
2130 	#ifdef MAP_ANONYMOUS
2131 	#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
2132 	#define RAK_MMAP_DEFAULT(s)       mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
2133 	#else /* MAP_ANONYMOUS */
2134 	/*
2135 	Nearly all versions of mmap support MAP_ANONYMOUS, so the following
2136 	is unlikely to be needed, but is supplied just in case.
2137 	*/
2138 	#define MMAP_FLAGS           (MAP_PRIVATE)
2139 	static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
2140 	#define RAK_MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
2141 		(dev_zero_fd = open("/dev/zero", O_RDWR), \
2142 		mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
2143 		mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
2144 	#endif /* MAP_ANONYMOUS */
2145 
2146 	#define RAK_DIRECT_MMAP_DEFAULT(s) RAK_MMAP_DEFAULT(s)
2147 
2148 #else /* DL_PLATFORM_WIN32 */
2149 
2150 	/* Win32 MMAP via VirtualAlloc */
win32mmap(size_t size)2151 	static FORCEINLINE void* win32mmap(size_t size) {
2152 		void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
2153 		return (ptr != 0)? ptr: MFAIL;
2154 	}
2155 
2156 	/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
win32direct_mmap(size_t size)2157 	static FORCEINLINE void* win32direct_mmap(size_t size) {
2158 		void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
2159 			PAGE_READWRITE);
2160 		return (ptr != 0)? ptr: MFAIL;
2161 	}
2162 
2163 	/* This function supports releasing coalesed segments */
win32munmap(void * ptr,size_t size)2164 	static FORCEINLINE int win32munmap(void* ptr, size_t size) {
2165 		MEMORY_BASIC_INFORMATION minfo;
2166 		char* cptr = (char*)ptr;
2167 		while (size) {
2168 			if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
2169 				return -1;
2170 			if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
2171 				minfo.State != MEM_COMMIT || minfo.RegionSize > size)
2172 				return -1;
2173 			if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
2174 				return -1;
2175 			cptr += minfo.RegionSize;
2176 			size -= minfo.RegionSize;
2177 		}
2178 		return 0;
2179 	}
2180 
2181 	#define RAK_MMAP_DEFAULT(s)             win32mmap(s)
2182 	#define RAK_MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
2183 	#define RAK_DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
2184 #endif /* DL_PLATFORM_WIN32 */
2185 #endif /* HAVE_MMAP */
2186 
2187 #if HAVE_MREMAP
2188 #ifndef DL_PLATFORM_WIN32
2189 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
2190 #endif /* DL_PLATFORM_WIN32 */
2191 #endif /* HAVE_MREMAP */
2192 
2193 
2194 /**
2195 * Define CALL_MORECORE
2196 */
2197 #if HAVE_MORECORE
2198 #ifdef MORECORE
2199 #define CALL_MORECORE(S)    MORECORE(S)
2200 #else  /* MORECORE */
2201 #define CALL_MORECORE(S)    MORECORE_DEFAULT(S)
2202 #endif /* MORECORE */
2203 #else  /* HAVE_MORECORE */
2204 #define CALL_MORECORE(S)        MFAIL
2205 #endif /* HAVE_MORECORE */
2206 
2207 /**
2208 * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
2209 */
2210 #if HAVE_MMAP
2211 #define USE_MMAP_BIT            (SIZE_T_ONE)
2212 
2213 #ifdef MMAP
2214 #define CALL_MMAP(s)        MMAP(s)
2215 #else /* MMAP */
2216 #define CALL_MMAP(s)        RAK_MMAP_DEFAULT(s)
2217 #endif /* MMAP */
2218 #ifdef MUNMAP
2219 #define CALL_MUNMAP(a, s)   MUNMAP((a), (s))
2220 #else /* MUNMAP */
2221 #define CALL_MUNMAP(a, s)   RAK_MUNMAP_DEFAULT((a), (s))
2222 #endif /* MUNMAP */
2223 #ifdef DIRECT_MMAP
2224 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
2225 #else /* DIRECT_MMAP */
2226 #define CALL_DIRECT_MMAP(s) RAK_DIRECT_MMAP_DEFAULT(s)
2227 #endif /* DIRECT_MMAP */
2228 #else  /* HAVE_MMAP */
2229 #define USE_MMAP_BIT            (SIZE_T_ZERO)
2230 
2231 #define MMAP(s)                 MFAIL
2232 #define MUNMAP(a, s)            (-1)
2233 #define DIRECT_MMAP(s)          MFAIL
2234 #define CALL_DIRECT_MMAP(s)     DIRECT_MMAP(s)
2235 #define CALL_MMAP(s)            MMAP(s)
2236 #define CALL_MUNMAP(a, s)       MUNMAP((a), (s))
2237 #endif /* HAVE_MMAP */
2238 
2239 /**
2240 * Define CALL_MREMAP
2241 */
2242 #if HAVE_MMAP && HAVE_MREMAP
2243 #ifdef MREMAP
2244 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
2245 #else /* MREMAP */
2246 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
2247 #endif /* MREMAP */
2248 #else  /* HAVE_MMAP && HAVE_MREMAP */
2249 #define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
2250 #endif /* HAVE_MMAP && HAVE_MREMAP */
2251 
2252 /* mstate bit set if continguous morecore disabled or failed */
2253 #define USE_NONCONTIGUOUS_BIT (4U)
2254 
2255 /* segment bit set in rak_create_mspace_with_base */
2256 #define EXTERN_BIT            (8U)
2257 
2258 
2259 #endif /* MALLOC_280_H */
2260 
2261 #endif // _RAKNET_SUPPORT_DL_MALLOC