1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written
4 * by Doug Lea and released to the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/-
6 *
7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/
8 * as file malloc-2.6.6.c.
9 */
10
11 #include <common.h>
12 #include <log.h>
13 #include <asm/global_data.h>
14
15 #if CONFIG_IS_ENABLED(UNIT_TEST)
16 #define DEBUG
17 #endif
18
19 #include <malloc.h>
20 #include <asm/io.h>
21
22 #ifdef DEBUG
23 #if __STD_C
24 static void malloc_update_mallinfo (void);
25 void malloc_stats (void);
26 #else
27 static void malloc_update_mallinfo ();
28 void malloc_stats();
29 #endif
30 #endif /* DEBUG */
31
32 DECLARE_GLOBAL_DATA_PTR;
33
34 /*
35 Emulation of sbrk for WIN32
36 All code within the ifdef WIN32 is untested by me.
37
38 Thanks to Martin Fong and others for supplying this.
39 */
40
41
42 #ifdef WIN32
43
44 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
45 ~(malloc_getpagesize-1))
46 #define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
47
48 /* resrve 64MB to insure large contiguous space */
49 #define RESERVED_SIZE (1024*1024*64)
50 #define NEXT_SIZE (2048*1024)
51 #define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
52
53 struct GmListElement;
54 typedef struct GmListElement GmListElement;
55
56 struct GmListElement
57 {
58 GmListElement* next;
59 void* base;
60 };
61
62 static GmListElement* head = 0;
63 static unsigned int gNextAddress = 0;
64 static unsigned int gAddressBase = 0;
65 static unsigned int gAllocatedSize = 0;
66
67 static
makeGmListElement(void * bas)68 GmListElement* makeGmListElement (void* bas)
69 {
70 GmListElement* this;
71 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
72 assert (this);
73 if (this)
74 {
75 this->base = bas;
76 this->next = head;
77 head = this;
78 }
79 return this;
80 }
81
gcleanup()82 void gcleanup ()
83 {
84 BOOL rval;
85 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
86 if (gAddressBase && (gNextAddress - gAddressBase))
87 {
88 rval = VirtualFree ((void*)gAddressBase,
89 gNextAddress - gAddressBase,
90 MEM_DECOMMIT);
91 assert (rval);
92 }
93 while (head)
94 {
95 GmListElement* next = head->next;
96 rval = VirtualFree (head->base, 0, MEM_RELEASE);
97 assert (rval);
98 LocalFree (head);
99 head = next;
100 }
101 }
102
103 static
findRegion(void * start_address,unsigned long size)104 void* findRegion (void* start_address, unsigned long size)
105 {
106 MEMORY_BASIC_INFORMATION info;
107 if (size >= TOP_MEMORY) return NULL;
108
109 while ((unsigned long)start_address + size < TOP_MEMORY)
110 {
111 VirtualQuery (start_address, &info, sizeof (info));
112 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
113 return start_address;
114 else
115 {
116 /* Requested region is not available so see if the */
117 /* next region is available. Set 'start_address' */
118 /* to the next region and call 'VirtualQuery()' */
119 /* again. */
120
121 start_address = (char*)info.BaseAddress + info.RegionSize;
122
123 /* Make sure we start looking for the next region */
124 /* on the *next* 64K boundary. Otherwise, even if */
125 /* the new region is free according to */
126 /* 'VirtualQuery()', the subsequent call to */
127 /* 'VirtualAlloc()' (which follows the call to */
128 /* this routine in 'wsbrk()') will round *down* */
129 /* the requested address to a 64K boundary which */
130 /* we already know is an address in the */
131 /* unavailable region. Thus, the subsequent call */
132 /* to 'VirtualAlloc()' will fail and bring us back */
133 /* here, causing us to go into an infinite loop. */
134
135 start_address =
136 (void *) AlignPage64K((unsigned long) start_address);
137 }
138 }
139 return NULL;
140
141 }
142
143
wsbrk(long size)144 void* wsbrk (long size)
145 {
146 void* tmp;
147 if (size > 0)
148 {
149 if (gAddressBase == 0)
150 {
151 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
152 gNextAddress = gAddressBase =
153 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
154 MEM_RESERVE, PAGE_NOACCESS);
155 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
156 gAllocatedSize))
157 {
158 long new_size = max (NEXT_SIZE, AlignPage (size));
159 void* new_address = (void*)(gAddressBase+gAllocatedSize);
160 do
161 {
162 new_address = findRegion (new_address, new_size);
163
164 if (!new_address)
165 return (void*)-1;
166
167 gAddressBase = gNextAddress =
168 (unsigned int)VirtualAlloc (new_address, new_size,
169 MEM_RESERVE, PAGE_NOACCESS);
170 /* repeat in case of race condition */
171 /* The region that we found has been snagged */
172 /* by another thread */
173 }
174 while (gAddressBase == 0);
175
176 assert (new_address == (void*)gAddressBase);
177
178 gAllocatedSize = new_size;
179
180 if (!makeGmListElement ((void*)gAddressBase))
181 return (void*)-1;
182 }
183 if ((size + gNextAddress) > AlignPage (gNextAddress))
184 {
185 void* res;
186 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
187 (size + gNextAddress -
188 AlignPage (gNextAddress)),
189 MEM_COMMIT, PAGE_READWRITE);
190 if (!res)
191 return (void*)-1;
192 }
193 tmp = (void*)gNextAddress;
194 gNextAddress = (unsigned int)tmp + size;
195 return tmp;
196 }
197 else if (size < 0)
198 {
199 unsigned int alignedGoal = AlignPage (gNextAddress + size);
200 /* Trim by releasing the virtual memory */
201 if (alignedGoal >= gAddressBase)
202 {
203 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
204 MEM_DECOMMIT);
205 gNextAddress = gNextAddress + size;
206 return (void*)gNextAddress;
207 }
208 else
209 {
210 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
211 MEM_DECOMMIT);
212 gNextAddress = gAddressBase;
213 return (void*)-1;
214 }
215 }
216 else
217 {
218 return (void*)gNextAddress;
219 }
220 }
221
222 #endif
223
224
225
226 /*
227 Type declarations
228 */
229
230
231 struct malloc_chunk
232 {
233 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
234 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
235 struct malloc_chunk* fd; /* double links -- used only if free. */
236 struct malloc_chunk* bk;
237 } __attribute__((__may_alias__)) ;
238
239 typedef struct malloc_chunk* mchunkptr;
240
241 /*
242
243 malloc_chunk details:
244
245 (The following includes lightly edited explanations by Colin Plumb.)
246
247 Chunks of memory are maintained using a `boundary tag' method as
248 described in e.g., Knuth or Standish. (See the paper by Paul
249 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
250 survey of such techniques.) Sizes of free chunks are stored both
251 in the front of each chunk and at the end. This makes
252 consolidating fragmented chunks into bigger chunks very fast. The
253 size fields also hold bits representing whether chunks are free or
254 in use.
255
256 An allocated chunk looks like this:
257
258
259 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
260 | Size of previous chunk, if allocated | |
261 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
262 | Size of chunk, in bytes |P|
263 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
264 | User data starts here... .
265 . .
266 . (malloc_usable_space() bytes) .
267 . |
268 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
269 | Size of chunk |
270 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
271
272
273 Where "chunk" is the front of the chunk for the purpose of most of
274 the malloc code, but "mem" is the pointer that is returned to the
275 user. "Nextchunk" is the beginning of the next contiguous chunk.
276
277 Chunks always begin on even word boundries, so the mem portion
278 (which is returned to the user) is also on an even word boundary, and
279 thus double-word aligned.
280
281 Free chunks are stored in circular doubly-linked lists, and look like this:
282
283 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
284 | Size of previous chunk |
285 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
286 `head:' | Size of chunk, in bytes |P|
287 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
288 | Forward pointer to next chunk in list |
289 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
290 | Back pointer to previous chunk in list |
291 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
292 | Unused space (may be 0 bytes long) .
293 . .
294 . |
295
296 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
297 `foot:' | Size of chunk, in bytes |
298 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
299
300 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
301 chunk size (which is always a multiple of two words), is an in-use
302 bit for the *previous* chunk. If that bit is *clear*, then the
303 word before the current chunk size contains the previous chunk
304 size, and can be used to find the front of the previous chunk.
305 (The very first chunk allocated always has this bit set,
306 preventing access to non-existent (or non-owned) memory.)
307
308 Note that the `foot' of the current chunk is actually represented
309 as the prev_size of the NEXT chunk. (This makes it easier to
310 deal with alignments etc).
311
312 The two exceptions to all this are
313
314 1. The special chunk `top', which doesn't bother using the
315 trailing size field since there is no
316 next contiguous chunk that would have to index off it. (After
317 initialization, `top' is forced to always exist. If it would
318 become less than MINSIZE bytes long, it is replenished via
319 malloc_extend_top.)
320
321 2. Chunks allocated via mmap, which have the second-lowest-order
322 bit (IS_MMAPPED) set in their size fields. Because they are
323 never merged or traversed from any other chunk, they have no
324 foot size or inuse information.
325
326 Available chunks are kept in any of several places (all declared below):
327
328 * `av': An array of chunks serving as bin headers for consolidated
329 chunks. Each bin is doubly linked. The bins are approximately
330 proportionally (log) spaced. There are a lot of these bins
331 (128). This may look excessive, but works very well in
332 practice. All procedures maintain the invariant that no
333 consolidated chunk physically borders another one. Chunks in
334 bins are kept in size order, with ties going to the
335 approximately least recently used chunk.
336
337 The chunks in each bin are maintained in decreasing sorted order by
338 size. This is irrelevant for the small bins, which all contain
339 the same-sized chunks, but facilitates best-fit allocation for
340 larger chunks. (These lists are just sequential. Keeping them in
341 order almost never requires enough traversal to warrant using
342 fancier ordered data structures.) Chunks of the same size are
343 linked with the most recently freed at the front, and allocations
344 are taken from the back. This results in LRU or FIFO allocation
345 order, which tends to give each chunk an equal opportunity to be
346 consolidated with adjacent freed chunks, resulting in larger free
347 chunks and less fragmentation.
348
349 * `top': The top-most available chunk (i.e., the one bordering the
350 end of available memory) is treated specially. It is never
351 included in any bin, is used only if no other chunk is
352 available, and is released back to the system if it is very
353 large (see M_TRIM_THRESHOLD).
354
355 * `last_remainder': A bin holding only the remainder of the
356 most recently split (non-top) chunk. This bin is checked
357 before other non-fitting chunks, so as to provide better
358 locality for runs of sequentially allocated chunks.
359
360 * Implicitly, through the host system's memory mapping tables.
361 If supported, requests greater than a threshold are usually
362 serviced via calls to mmap, and then later released via munmap.
363
364 */
365
366 /* sizes, alignments */
367
368 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
369 #define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
370 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
371 #define MINSIZE (sizeof(struct malloc_chunk))
372
373 /* conversion from malloc headers to user pointers, and back */
374
375 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
376 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
377
378 /* pad request bytes into a usable size */
379
380 #define request2size(req) \
381 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
382 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
383 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
384
385 /* Check if m has acceptable alignment */
386
387 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
388
389
390
391
392 /*
393 Physical chunk operations
394 */
395
396
397 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
398
399 #define PREV_INUSE 0x1
400
401 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
402
403 #define IS_MMAPPED 0x2
404
405 /* Bits to mask off when extracting size */
406
407 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
408
409
410 /* Ptr to next physical malloc_chunk. */
411
412 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
413
414 /* Ptr to previous physical malloc_chunk */
415
416 #define prev_chunk(p)\
417 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
418
419
420 /* Treat space at ptr + offset as a chunk */
421
422 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
423
424
425
426
427 /*
428 Dealing with use bits
429 */
430
431 /* extract p's inuse bit */
432
433 #define inuse(p)\
434 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
435
436 /* extract inuse bit of previous chunk */
437
438 #define prev_inuse(p) ((p)->size & PREV_INUSE)
439
440 /* check for mmap()'ed chunk */
441
442 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
443
444 /* set/clear chunk as in use without otherwise disturbing */
445
446 #define set_inuse(p)\
447 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
448
449 #define clear_inuse(p)\
450 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
451
452 /* check/set/clear inuse bits in known places */
453
454 #define inuse_bit_at_offset(p, s)\
455 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
456
457 #define set_inuse_bit_at_offset(p, s)\
458 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
459
460 #define clear_inuse_bit_at_offset(p, s)\
461 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
462
463
464
465
466 /*
467 Dealing with size fields
468 */
469
470 /* Get size, ignoring use bits */
471
472 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
473
474 /* Set size at head, without disturbing its use bit */
475
476 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
477
478 /* Set size/use ignoring previous bits in header */
479
480 #define set_head(p, s) ((p)->size = (s))
481
482 /* Set size at footer (only when chunk is not in use) */
483
484 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
485
486
487
488
489
490 /*
491 Bins
492
493 The bins, `av_' are an array of pairs of pointers serving as the
494 heads of (initially empty) doubly-linked lists of chunks, laid out
495 in a way so that each pair can be treated as if it were in a
496 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
497 and chunks are the same).
498
499 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
500 8 bytes apart. Larger bins are approximately logarithmically
501 spaced. (See the table below.) The `av_' array is never mentioned
502 directly in the code, but instead via bin access macros.
503
504 Bin layout:
505
506 64 bins of size 8
507 32 bins of size 64
508 16 bins of size 512
509 8 bins of size 4096
510 4 bins of size 32768
511 2 bins of size 262144
512 1 bin of size what's left
513
514 There is actually a little bit of slop in the numbers in bin_index
515 for the sake of speed. This makes no difference elsewhere.
516
517 The special chunks `top' and `last_remainder' get their own bins,
518 (this is implemented via yet more trickery with the av_ array),
519 although `top' is never properly linked to its bin since it is
520 always handled specially.
521
522 */
523
524 #define NAV 128 /* number of bins */
525
526 typedef struct malloc_chunk* mbinptr;
527
528 /* access macros */
529
530 #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
531 #define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
532 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
533
534 /*
535 The first 2 bins are never indexed. The corresponding av_ cells are instead
536 used for bookkeeping. This is not to save space, but to simplify
537 indexing, maintain locality, and avoid some initialization tests.
538 */
539
540 #define top (av_[2]) /* The topmost chunk */
541 #define last_remainder (bin_at(1)) /* remainder from last split */
542
543
544 /*
545 Because top initially points to its own bin with initial
546 zero size, thus forcing extension on the first malloc request,
547 we avoid having any special code in malloc to check whether
548 it even exists yet. But we still need to in malloc_extend_top.
549 */
550
551 #define initial_top ((mchunkptr)(bin_at(0)))
552
553 /* Helper macro to initialize bins */
554
555 #define IAV(i) bin_at(i), bin_at(i)
556
557 static mbinptr av_[NAV * 2 + 2] = {
558 NULL, NULL,
559 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
560 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
561 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
562 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
563 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
564 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
565 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
566 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
567 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
568 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
569 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
570 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
571 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
572 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
573 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
574 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
575 };
576
577 #ifdef CONFIG_NEEDS_MANUAL_RELOC
malloc_bin_reloc(void)578 static void malloc_bin_reloc(void)
579 {
580 mbinptr *p = &av_[2];
581 size_t i;
582
583 for (i = 2; i < ARRAY_SIZE(av_); ++i, ++p)
584 *p = (mbinptr)((ulong)*p + gd->reloc_off);
585 }
586 #else
malloc_bin_reloc(void)587 static inline void malloc_bin_reloc(void) {}
588 #endif
589
590 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
591 static void malloc_init(void);
592 #endif
593
594 ulong mem_malloc_start = 0;
595 ulong mem_malloc_end = 0;
596 ulong mem_malloc_brk = 0;
597
sbrk(ptrdiff_t increment)598 void *sbrk(ptrdiff_t increment)
599 {
600 ulong old = mem_malloc_brk;
601 ulong new = old + increment;
602
603 /*
604 * if we are giving memory back make sure we clear it out since
605 * we set MORECORE_CLEARS to 1
606 */
607 if (increment < 0)
608 memset((void *)new, 0, -increment);
609
610 if ((new < mem_malloc_start) || (new > mem_malloc_end))
611 return (void *)MORECORE_FAILURE;
612
613 mem_malloc_brk = new;
614
615 return (void *)old;
616 }
617
mem_malloc_init(ulong start,ulong size)618 void mem_malloc_init(ulong start, ulong size)
619 {
620 mem_malloc_start = start;
621 mem_malloc_end = start + size;
622 mem_malloc_brk = start;
623
624 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
625 malloc_init();
626 #endif
627
628 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
629 mem_malloc_end);
630 #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
631 memset((void *)mem_malloc_start, 0x0, size);
632 #endif
633 malloc_bin_reloc();
634 }
635
636 /* field-extraction macros */
637
638 #define first(b) ((b)->fd)
639 #define last(b) ((b)->bk)
640
641 /*
642 Indexing into bins
643 */
644
645 #define bin_index(sz) \
646 (((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
647 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
648 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
649 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
650 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
651 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
652 126)
653 /*
654 bins for chunks < 512 are all spaced 8 bytes apart, and hold
655 identically sized chunks. This is exploited in malloc.
656 */
657
658 #define MAX_SMALLBIN 63
659 #define MAX_SMALLBIN_SIZE 512
660 #define SMALLBIN_WIDTH 8
661
662 #define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
663
664 /*
665 Requests are `small' if both the corresponding and the next bin are small
666 */
667
668 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
669
670
671
672 /*
673 To help compensate for the large number of bins, a one-level index
674 structure is used for bin-by-bin searching. `binblocks' is a
675 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
676 have any (possibly) non-empty bins, so they can be skipped over
677 all at once during during traversals. The bits are NOT always
678 cleared as soon as all bins in a block are empty, but instead only
679 when all are noticed to be empty during traversal in malloc.
680 */
681
682 #define BINBLOCKWIDTH 4 /* bins per block */
683
684 #define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
685 #define binblocks_w (av_[1])
686
687 /* bin<->block macros */
688
689 #define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
690 #define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
691 #define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
692
693
694
695
696
697 /* Other static bookkeeping data */
698
699 /* variables holding tunable values */
700
701 static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
702 static unsigned long top_pad = DEFAULT_TOP_PAD;
703 static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
704 static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
705
706 /* The first value returned from sbrk */
707 static char* sbrk_base = (char*)(-1);
708
709 /* The maximum memory obtained from system via sbrk */
710 static unsigned long max_sbrked_mem = 0;
711
712 /* The maximum via either sbrk or mmap */
713 static unsigned long max_total_mem = 0;
714
715 /* internal working copy of mallinfo */
716 static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
717
718 /* The total memory obtained from system via sbrk */
719 #define sbrked_mem (current_mallinfo.arena)
720
721 /* Tracking mmaps */
722
723 #ifdef DEBUG
724 static unsigned int n_mmaps = 0;
725 #endif /* DEBUG */
726 static unsigned long mmapped_mem = 0;
727 #if HAVE_MMAP
728 static unsigned int max_n_mmaps = 0;
729 static unsigned long max_mmapped_mem = 0;
730 #endif
731
732 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
malloc_init(void)733 static void malloc_init(void)
734 {
735 int i, j;
736
737 debug("bins (av_ array) are at %p\n", (void *)av_);
738
739 av_[0] = NULL; av_[1] = NULL;
740 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
741 av_[i] = bin_at(j - 2);
742 av_[i + 1] = bin_at(j - 2);
743
744 /* Just print the first few bins so that
745 * we can see there are alright.
746 */
747 if (i < 10)
748 debug("av_[%d]=%lx av_[%d]=%lx\n",
749 i, (ulong)av_[i],
750 i + 1, (ulong)av_[i + 1]);
751 }
752
753 /* Init the static bookkeeping as well */
754 sbrk_base = (char *)(-1);
755 max_sbrked_mem = 0;
756 max_total_mem = 0;
757 #ifdef DEBUG
758 memset((void *)¤t_mallinfo, 0, sizeof(struct mallinfo));
759 #endif
760 }
761 #endif
762
763 /*
764 Debugging support
765 */
766
767 #ifdef DEBUG
768
769
770 /*
771 These routines make a number of assertions about the states
772 of data structures that should be true at all times. If any
773 are not true, it's very likely that a user program has somehow
774 trashed memory. (It's also possible that there is a coding error
775 in malloc. In which case, please report it!)
776 */
777
778 #if __STD_C
do_check_chunk(mchunkptr p)779 static void do_check_chunk(mchunkptr p)
780 #else
781 static void do_check_chunk(p) mchunkptr p;
782 #endif
783 {
784 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
785
786 /* No checkable chunk is mmapped */
787 assert(!chunk_is_mmapped(p));
788
789 /* Check for legal address ... */
790 assert((char*)p >= sbrk_base);
791 if (p != top)
792 assert((char*)p + sz <= (char*)top);
793 else
794 assert((char*)p + sz <= sbrk_base + sbrked_mem);
795
796 }
797
798
799 #if __STD_C
do_check_free_chunk(mchunkptr p)800 static void do_check_free_chunk(mchunkptr p)
801 #else
802 static void do_check_free_chunk(p) mchunkptr p;
803 #endif
804 {
805 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
806 mchunkptr next = chunk_at_offset(p, sz);
807
808 do_check_chunk(p);
809
810 /* Check whether it claims to be free ... */
811 assert(!inuse(p));
812
813 /* Unless a special marker, must have OK fields */
814 if ((long)sz >= (long)MINSIZE)
815 {
816 assert((sz & MALLOC_ALIGN_MASK) == 0);
817 assert(aligned_OK(chunk2mem(p)));
818 /* ... matching footer field */
819 assert(next->prev_size == sz);
820 /* ... and is fully consolidated */
821 assert(prev_inuse(p));
822 assert (next == top || inuse(next));
823
824 /* ... and has minimally sane links */
825 assert(p->fd->bk == p);
826 assert(p->bk->fd == p);
827 }
828 else /* markers are always of size SIZE_SZ */
829 assert(sz == SIZE_SZ);
830 }
831
832 #if __STD_C
do_check_inuse_chunk(mchunkptr p)833 static void do_check_inuse_chunk(mchunkptr p)
834 #else
835 static void do_check_inuse_chunk(p) mchunkptr p;
836 #endif
837 {
838 mchunkptr next = next_chunk(p);
839 do_check_chunk(p);
840
841 /* Check whether it claims to be in use ... */
842 assert(inuse(p));
843
844 /* ... and is surrounded by OK chunks.
845 Since more things can be checked with free chunks than inuse ones,
846 if an inuse chunk borders them and debug is on, it's worth doing them.
847 */
848 if (!prev_inuse(p))
849 {
850 mchunkptr prv = prev_chunk(p);
851 assert(next_chunk(prv) == p);
852 do_check_free_chunk(prv);
853 }
854 if (next == top)
855 {
856 assert(prev_inuse(next));
857 assert(chunksize(next) >= MINSIZE);
858 }
859 else if (!inuse(next))
860 do_check_free_chunk(next);
861
862 }
863
864 #if __STD_C
do_check_malloced_chunk(mchunkptr p,INTERNAL_SIZE_T s)865 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
866 #else
867 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
868 #endif
869 {
870 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
871 long room = sz - s;
872
873 do_check_inuse_chunk(p);
874
875 /* Legal size ... */
876 assert((long)sz >= (long)MINSIZE);
877 assert((sz & MALLOC_ALIGN_MASK) == 0);
878 assert(room >= 0);
879 assert(room < (long)MINSIZE);
880
881 /* ... and alignment */
882 assert(aligned_OK(chunk2mem(p)));
883
884
885 /* ... and was allocated at front of an available chunk */
886 assert(prev_inuse(p));
887
888 }
889
890
891 #define check_free_chunk(P) do_check_free_chunk(P)
892 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
893 #define check_chunk(P) do_check_chunk(P)
894 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
895 #else
896 #define check_free_chunk(P)
897 #define check_inuse_chunk(P)
898 #define check_chunk(P)
899 #define check_malloced_chunk(P,N)
900 #endif
901
902
903
904 /*
905 Macro-based internal utilities
906 */
907
908
909 /*
910 Linking chunks in bin lists.
911 Call these only with variables, not arbitrary expressions, as arguments.
912 */
913
914 /*
915 Place chunk p of size s in its bin, in size order,
916 putting it ahead of others of same size.
917 */
918
919
920 #define frontlink(P, S, IDX, BK, FD) \
921 { \
922 if (S < MAX_SMALLBIN_SIZE) \
923 { \
924 IDX = smallbin_index(S); \
925 mark_binblock(IDX); \
926 BK = bin_at(IDX); \
927 FD = BK->fd; \
928 P->bk = BK; \
929 P->fd = FD; \
930 FD->bk = BK->fd = P; \
931 } \
932 else \
933 { \
934 IDX = bin_index(S); \
935 BK = bin_at(IDX); \
936 FD = BK->fd; \
937 if (FD == BK) mark_binblock(IDX); \
938 else \
939 { \
940 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
941 BK = FD->bk; \
942 } \
943 P->bk = BK; \
944 P->fd = FD; \
945 FD->bk = BK->fd = P; \
946 } \
947 }
948
949
950 /* take a chunk off a list */
951
952 #define unlink(P, BK, FD) \
953 { \
954 BK = P->bk; \
955 FD = P->fd; \
956 FD->bk = BK; \
957 BK->fd = FD; \
958 } \
959
960 /* Place p as the last remainder */
961
962 #define link_last_remainder(P) \
963 { \
964 last_remainder->fd = last_remainder->bk = P; \
965 P->fd = P->bk = last_remainder; \
966 }
967
968 /* Clear the last_remainder bin */
969
970 #define clear_last_remainder \
971 (last_remainder->fd = last_remainder->bk = last_remainder)
972
973
974
975
976
977 /* Routines dealing with mmap(). */
978
979 #if HAVE_MMAP
980
981 #if __STD_C
mmap_chunk(size_t size)982 static mchunkptr mmap_chunk(size_t size)
983 #else
984 static mchunkptr mmap_chunk(size) size_t size;
985 #endif
986 {
987 size_t page_mask = malloc_getpagesize - 1;
988 mchunkptr p;
989
990 #ifndef MAP_ANONYMOUS
991 static int fd = -1;
992 #endif
993
994 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
995
996 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
997 * there is no following chunk whose prev_size field could be used.
998 */
999 size = (size + SIZE_SZ + page_mask) & ~page_mask;
1000
1001 #ifdef MAP_ANONYMOUS
1002 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
1003 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1004 #else /* !MAP_ANONYMOUS */
1005 if (fd < 0)
1006 {
1007 fd = open("/dev/zero", O_RDWR);
1008 if(fd < 0) return 0;
1009 }
1010 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1011 #endif
1012
1013 if(p == (mchunkptr)-1) return 0;
1014
1015 n_mmaps++;
1016 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1017
1018 /* We demand that eight bytes into a page must be 8-byte aligned. */
1019 assert(aligned_OK(chunk2mem(p)));
1020
1021 /* The offset to the start of the mmapped region is stored
1022 * in the prev_size field of the chunk; normally it is zero,
1023 * but that can be changed in memalign().
1024 */
1025 p->prev_size = 0;
1026 set_head(p, size|IS_MMAPPED);
1027
1028 mmapped_mem += size;
1029 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1030 max_mmapped_mem = mmapped_mem;
1031 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1032 max_total_mem = mmapped_mem + sbrked_mem;
1033 return p;
1034 }
1035
1036 #if __STD_C
munmap_chunk(mchunkptr p)1037 static void munmap_chunk(mchunkptr p)
1038 #else
1039 static void munmap_chunk(p) mchunkptr p;
1040 #endif
1041 {
1042 INTERNAL_SIZE_T size = chunksize(p);
1043 int ret;
1044
1045 assert (chunk_is_mmapped(p));
1046 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1047 assert((n_mmaps > 0));
1048 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1049
1050 n_mmaps--;
1051 mmapped_mem -= (size + p->prev_size);
1052
1053 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1054
1055 /* munmap returns non-zero on failure */
1056 assert(ret == 0);
1057 }
1058
1059 #if HAVE_MREMAP
1060
1061 #if __STD_C
mremap_chunk(mchunkptr p,size_t new_size)1062 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1063 #else
1064 static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1065 #endif
1066 {
1067 size_t page_mask = malloc_getpagesize - 1;
1068 INTERNAL_SIZE_T offset = p->prev_size;
1069 INTERNAL_SIZE_T size = chunksize(p);
1070 char *cp;
1071
1072 assert (chunk_is_mmapped(p));
1073 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1074 assert((n_mmaps > 0));
1075 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1076
1077 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1078 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1079
1080 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1081
1082 if (cp == (char *)-1) return 0;
1083
1084 p = (mchunkptr)(cp + offset);
1085
1086 assert(aligned_OK(chunk2mem(p)));
1087
1088 assert((p->prev_size == offset));
1089 set_head(p, (new_size - offset)|IS_MMAPPED);
1090
1091 mmapped_mem -= size + offset;
1092 mmapped_mem += new_size;
1093 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1094 max_mmapped_mem = mmapped_mem;
1095 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1096 max_total_mem = mmapped_mem + sbrked_mem;
1097 return p;
1098 }
1099
1100 #endif /* HAVE_MREMAP */
1101
1102 #endif /* HAVE_MMAP */
1103
1104 /*
1105 Extend the top-most chunk by obtaining memory from system.
1106 Main interface to sbrk (but see also malloc_trim).
1107 */
1108
1109 #if __STD_C
malloc_extend_top(INTERNAL_SIZE_T nb)1110 static void malloc_extend_top(INTERNAL_SIZE_T nb)
1111 #else
1112 static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1113 #endif
1114 {
1115 char* brk; /* return value from sbrk */
1116 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1117 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1118 char* new_brk; /* return of 2nd sbrk call */
1119 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1120
1121 mchunkptr old_top = top; /* Record state of old top */
1122 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1123 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1124
1125 /* Pad request with top_pad plus minimal overhead */
1126
1127 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1128 unsigned long pagesz = malloc_getpagesize;
1129
1130 /* If not the first time through, round to preserve page boundary */
1131 /* Otherwise, we need to correct to a page size below anyway. */
1132 /* (We also correct below if an intervening foreign sbrk call.) */
1133
1134 if (sbrk_base != (char*)(-1))
1135 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1136
1137 brk = (char*)(MORECORE (sbrk_size));
1138
1139 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1140 if (brk == (char*)(MORECORE_FAILURE) ||
1141 (brk < old_end && old_top != initial_top))
1142 return;
1143
1144 sbrked_mem += sbrk_size;
1145
1146 if (brk == old_end) /* can just add bytes to current top */
1147 {
1148 top_size = sbrk_size + old_top_size;
1149 set_head(top, top_size | PREV_INUSE);
1150 }
1151 else
1152 {
1153 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1154 sbrk_base = brk;
1155 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1156 sbrked_mem += brk - (char*)old_end;
1157
1158 /* Guarantee alignment of first new chunk made from this space */
1159 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1160 if (front_misalign > 0)
1161 {
1162 correction = (MALLOC_ALIGNMENT) - front_misalign;
1163 brk += correction;
1164 }
1165 else
1166 correction = 0;
1167
1168 /* Guarantee the next brk will be at a page boundary */
1169
1170 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
1171 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
1172
1173 /* Allocate correction */
1174 new_brk = (char*)(MORECORE (correction));
1175 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1176
1177 sbrked_mem += correction;
1178
1179 top = (mchunkptr)brk;
1180 top_size = new_brk - brk + correction;
1181 set_head(top, top_size | PREV_INUSE);
1182
1183 if (old_top != initial_top)
1184 {
1185
1186 /* There must have been an intervening foreign sbrk call. */
1187 /* A double fencepost is necessary to prevent consolidation */
1188
1189 /* If not enough space to do this, then user did something very wrong */
1190 if (old_top_size < MINSIZE)
1191 {
1192 set_head(top, PREV_INUSE); /* will force null return from malloc */
1193 return;
1194 }
1195
1196 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1197 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1198 set_head_size(old_top, old_top_size);
1199 chunk_at_offset(old_top, old_top_size )->size =
1200 SIZE_SZ|PREV_INUSE;
1201 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1202 SIZE_SZ|PREV_INUSE;
1203 /* If possible, release the rest. */
1204 if (old_top_size >= MINSIZE)
1205 fREe(chunk2mem(old_top));
1206 }
1207 }
1208
1209 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1210 max_sbrked_mem = sbrked_mem;
1211 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1212 max_total_mem = mmapped_mem + sbrked_mem;
1213
1214 /* We always land on a page boundary */
1215 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1216 }
1217
1218
1219
1220
1221 /* Main public routines */
1222
1223
1224 /*
1225 Malloc Algorthim:
1226
1227 The requested size is first converted into a usable form, `nb'.
1228 This currently means to add 4 bytes overhead plus possibly more to
1229 obtain 8-byte alignment and/or to obtain a size of at least
1230 MINSIZE (currently 16 bytes), the smallest allocatable size.
1231 (All fits are considered `exact' if they are within MINSIZE bytes.)
1232
1233 From there, the first successful of the following steps is taken:
1234
1235 1. The bin corresponding to the request size is scanned, and if
1236 a chunk of exactly the right size is found, it is taken.
1237
1238 2. The most recently remaindered chunk is used if it is big
1239 enough. This is a form of (roving) first fit, used only in
1240 the absence of exact fits. Runs of consecutive requests use
1241 the remainder of the chunk used for the previous such request
1242 whenever possible. This limited use of a first-fit style
1243 allocation strategy tends to give contiguous chunks
1244 coextensive lifetimes, which improves locality and can reduce
1245 fragmentation in the long run.
1246
1247 3. Other bins are scanned in increasing size order, using a
1248 chunk big enough to fulfill the request, and splitting off
1249 any remainder. This search is strictly by best-fit; i.e.,
1250 the smallest (with ties going to approximately the least
1251 recently used) chunk that fits is selected.
1252
1253 4. If large enough, the chunk bordering the end of memory
1254 (`top') is split off. (This use of `top' is in accord with
1255 the best-fit search rule. In effect, `top' is treated as
1256 larger (and thus less well fitting) than any other available
1257 chunk since it can be extended to be as large as necessary
1258 (up to system limitations).
1259
1260 5. If the request size meets the mmap threshold and the
1261 system supports mmap, and there are few enough currently
1262 allocated mmapped regions, and a call to mmap succeeds,
1263 the request is allocated via direct memory mapping.
1264
1265 6. Otherwise, the top of memory is extended by
1266 obtaining more space from the system (normally using sbrk,
1267 but definable to anything else via the MORECORE macro).
1268 Memory is gathered from the system (in system page-sized
1269 units) in a way that allows chunks obtained across different
1270 sbrk calls to be consolidated, but does not require
1271 contiguous memory. Thus, it should be safe to intersperse
1272 mallocs with other sbrk calls.
1273
1274
1275 All allocations are made from the the `lowest' part of any found
1276 chunk. (The implementation invariant is that prev_inuse is
1277 always true of any allocated chunk; i.e., that each allocated
1278 chunk borders either a previously allocated and still in-use chunk,
1279 or the base of its memory arena.)
1280
1281 */
1282
1283 #if __STD_C
mALLOc(size_t bytes)1284 Void_t* mALLOc(size_t bytes)
1285 #else
1286 Void_t* mALLOc(bytes) size_t bytes;
1287 #endif
1288 {
1289 mchunkptr victim; /* inspected/selected chunk */
1290 INTERNAL_SIZE_T victim_size; /* its size */
1291 int idx; /* index for bin traversal */
1292 mbinptr bin; /* associated bin */
1293 mchunkptr remainder; /* remainder from a split */
1294 long remainder_size; /* its size */
1295 int remainder_index; /* its bin index */
1296 unsigned long block; /* block traverser bit */
1297 int startidx; /* first bin of a traversed block */
1298 mchunkptr fwd; /* misc temp for linking */
1299 mchunkptr bck; /* misc temp for linking */
1300 mbinptr q; /* misc temp */
1301
1302 INTERNAL_SIZE_T nb;
1303
1304 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1305 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1306 return malloc_simple(bytes);
1307 #endif
1308
1309 /* check if mem_malloc_init() was run */
1310 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1311 /* not initialized yet */
1312 return NULL;
1313 }
1314
1315 if ((long)bytes < 0) return NULL;
1316
1317 nb = request2size(bytes); /* padded request size; */
1318
1319 /* Check for exact match in a bin */
1320
1321 if (is_small_request(nb)) /* Faster version for small requests */
1322 {
1323 idx = smallbin_index(nb);
1324
1325 /* No traversal or size check necessary for small bins. */
1326
1327 q = bin_at(idx);
1328 victim = last(q);
1329
1330 /* Also scan the next one, since it would have a remainder < MINSIZE */
1331 if (victim == q)
1332 {
1333 q = next_bin(q);
1334 victim = last(q);
1335 }
1336 if (victim != q)
1337 {
1338 victim_size = chunksize(victim);
1339 unlink(victim, bck, fwd);
1340 set_inuse_bit_at_offset(victim, victim_size);
1341 check_malloced_chunk(victim, nb);
1342 return chunk2mem(victim);
1343 }
1344
1345 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1346
1347 }
1348 else
1349 {
1350 idx = bin_index(nb);
1351 bin = bin_at(idx);
1352
1353 for (victim = last(bin); victim != bin; victim = victim->bk)
1354 {
1355 victim_size = chunksize(victim);
1356 remainder_size = victim_size - nb;
1357
1358 if (remainder_size >= (long)MINSIZE) /* too big */
1359 {
1360 --idx; /* adjust to rescan below after checking last remainder */
1361 break;
1362 }
1363
1364 else if (remainder_size >= 0) /* exact fit */
1365 {
1366 unlink(victim, bck, fwd);
1367 set_inuse_bit_at_offset(victim, victim_size);
1368 check_malloced_chunk(victim, nb);
1369 return chunk2mem(victim);
1370 }
1371 }
1372
1373 ++idx;
1374
1375 }
1376
1377 /* Try to use the last split-off remainder */
1378
1379 if ( (victim = last_remainder->fd) != last_remainder)
1380 {
1381 victim_size = chunksize(victim);
1382 remainder_size = victim_size - nb;
1383
1384 if (remainder_size >= (long)MINSIZE) /* re-split */
1385 {
1386 remainder = chunk_at_offset(victim, nb);
1387 set_head(victim, nb | PREV_INUSE);
1388 link_last_remainder(remainder);
1389 set_head(remainder, remainder_size | PREV_INUSE);
1390 set_foot(remainder, remainder_size);
1391 check_malloced_chunk(victim, nb);
1392 return chunk2mem(victim);
1393 }
1394
1395 clear_last_remainder;
1396
1397 if (remainder_size >= 0) /* exhaust */
1398 {
1399 set_inuse_bit_at_offset(victim, victim_size);
1400 check_malloced_chunk(victim, nb);
1401 return chunk2mem(victim);
1402 }
1403
1404 /* Else place in bin */
1405
1406 frontlink(victim, victim_size, remainder_index, bck, fwd);
1407 }
1408
1409 /*
1410 If there are any possibly nonempty big-enough blocks,
1411 search for best fitting chunk by scanning bins in blockwidth units.
1412 */
1413
1414 if ( (block = idx2binblock(idx)) <= binblocks_r)
1415 {
1416
1417 /* Get to the first marked block */
1418
1419 if ( (block & binblocks_r) == 0)
1420 {
1421 /* force to an even block boundary */
1422 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1423 block <<= 1;
1424 while ((block & binblocks_r) == 0)
1425 {
1426 idx += BINBLOCKWIDTH;
1427 block <<= 1;
1428 }
1429 }
1430
1431 /* For each possibly nonempty block ... */
1432 for (;;)
1433 {
1434 startidx = idx; /* (track incomplete blocks) */
1435 q = bin = bin_at(idx);
1436
1437 /* For each bin in this block ... */
1438 do
1439 {
1440 /* Find and use first big enough chunk ... */
1441
1442 for (victim = last(bin); victim != bin; victim = victim->bk)
1443 {
1444 victim_size = chunksize(victim);
1445 remainder_size = victim_size - nb;
1446
1447 if (remainder_size >= (long)MINSIZE) /* split */
1448 {
1449 remainder = chunk_at_offset(victim, nb);
1450 set_head(victim, nb | PREV_INUSE);
1451 unlink(victim, bck, fwd);
1452 link_last_remainder(remainder);
1453 set_head(remainder, remainder_size | PREV_INUSE);
1454 set_foot(remainder, remainder_size);
1455 check_malloced_chunk(victim, nb);
1456 return chunk2mem(victim);
1457 }
1458
1459 else if (remainder_size >= 0) /* take */
1460 {
1461 set_inuse_bit_at_offset(victim, victim_size);
1462 unlink(victim, bck, fwd);
1463 check_malloced_chunk(victim, nb);
1464 return chunk2mem(victim);
1465 }
1466
1467 }
1468
1469 bin = next_bin(bin);
1470
1471 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1472
1473 /* Clear out the block bit. */
1474
1475 do /* Possibly backtrack to try to clear a partial block */
1476 {
1477 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1478 {
1479 av_[1] = (mbinptr)(binblocks_r & ~block);
1480 break;
1481 }
1482 --startidx;
1483 q = prev_bin(q);
1484 } while (first(q) == q);
1485
1486 /* Get to the next possibly nonempty block */
1487
1488 if ( (block <<= 1) <= binblocks_r && (block != 0) )
1489 {
1490 while ((block & binblocks_r) == 0)
1491 {
1492 idx += BINBLOCKWIDTH;
1493 block <<= 1;
1494 }
1495 }
1496 else
1497 break;
1498 }
1499 }
1500
1501
1502 /* Try to use top chunk */
1503
1504 /* Require that there be a remainder, ensuring top always exists */
1505 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1506 {
1507
1508 #if HAVE_MMAP
1509 /* If big and would otherwise need to extend, try to use mmap instead */
1510 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
1511 (victim = mmap_chunk(nb)))
1512 return chunk2mem(victim);
1513 #endif
1514
1515 /* Try to extend */
1516 malloc_extend_top(nb);
1517 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1518 return NULL; /* propagate failure */
1519 }
1520
1521 victim = top;
1522 set_head(victim, nb | PREV_INUSE);
1523 top = chunk_at_offset(victim, nb);
1524 set_head(top, remainder_size | PREV_INUSE);
1525 check_malloced_chunk(victim, nb);
1526 return chunk2mem(victim);
1527
1528 }
1529
1530
1531
1532
1533 /*
1534
1535 free() algorithm :
1536
1537 cases:
1538
1539 1. free(0) has no effect.
1540
1541 2. If the chunk was allocated via mmap, it is release via munmap().
1542
1543 3. If a returned chunk borders the current high end of memory,
1544 it is consolidated into the top, and if the total unused
1545 topmost memory exceeds the trim threshold, malloc_trim is
1546 called.
1547
1548 4. Other chunks are consolidated as they arrive, and
1549 placed in corresponding bins. (This includes the case of
1550 consolidating with the current `last_remainder').
1551
1552 */
1553
1554
1555 #if __STD_C
fREe(Void_t * mem)1556 void fREe(Void_t* mem)
1557 #else
1558 void fREe(mem) Void_t* mem;
1559 #endif
1560 {
1561 mchunkptr p; /* chunk corresponding to mem */
1562 INTERNAL_SIZE_T hd; /* its head field */
1563 INTERNAL_SIZE_T sz; /* its size */
1564 int idx; /* its bin index */
1565 mchunkptr next; /* next contiguous chunk */
1566 INTERNAL_SIZE_T nextsz; /* its size */
1567 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1568 mchunkptr bck; /* misc temp for linking */
1569 mchunkptr fwd; /* misc temp for linking */
1570 int islr; /* track whether merging with last_remainder */
1571
1572 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1573 /* free() is a no-op - all the memory will be freed on relocation */
1574 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1575 return;
1576 #endif
1577
1578 if (mem == NULL) /* free(0) has no effect */
1579 return;
1580
1581 p = mem2chunk(mem);
1582 hd = p->size;
1583
1584 #if HAVE_MMAP
1585 if (hd & IS_MMAPPED) /* release mmapped memory. */
1586 {
1587 munmap_chunk(p);
1588 return;
1589 }
1590 #endif
1591
1592 check_inuse_chunk(p);
1593
1594 sz = hd & ~PREV_INUSE;
1595 next = chunk_at_offset(p, sz);
1596 nextsz = chunksize(next);
1597
1598 if (next == top) /* merge with top */
1599 {
1600 sz += nextsz;
1601
1602 if (!(hd & PREV_INUSE)) /* consolidate backward */
1603 {
1604 prevsz = p->prev_size;
1605 p = chunk_at_offset(p, -((long) prevsz));
1606 sz += prevsz;
1607 unlink(p, bck, fwd);
1608 }
1609
1610 set_head(p, sz | PREV_INUSE);
1611 top = p;
1612 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1613 malloc_trim(top_pad);
1614 return;
1615 }
1616
1617 set_head(next, nextsz); /* clear inuse bit */
1618
1619 islr = 0;
1620
1621 if (!(hd & PREV_INUSE)) /* consolidate backward */
1622 {
1623 prevsz = p->prev_size;
1624 p = chunk_at_offset(p, -((long) prevsz));
1625 sz += prevsz;
1626
1627 if (p->fd == last_remainder) /* keep as last_remainder */
1628 islr = 1;
1629 else
1630 unlink(p, bck, fwd);
1631 }
1632
1633 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1634 {
1635 sz += nextsz;
1636
1637 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1638 {
1639 islr = 1;
1640 link_last_remainder(p);
1641 }
1642 else
1643 unlink(next, bck, fwd);
1644 }
1645
1646
1647 set_head(p, sz | PREV_INUSE);
1648 set_foot(p, sz);
1649 if (!islr)
1650 frontlink(p, sz, idx, bck, fwd);
1651 }
1652
1653
1654
1655
1656
1657 /*
1658
1659 Realloc algorithm:
1660
1661 Chunks that were obtained via mmap cannot be extended or shrunk
1662 unless HAVE_MREMAP is defined, in which case mremap is used.
1663 Otherwise, if their reallocation is for additional space, they are
1664 copied. If for less, they are just left alone.
1665
1666 Otherwise, if the reallocation is for additional space, and the
1667 chunk can be extended, it is, else a malloc-copy-free sequence is
1668 taken. There are several different ways that a chunk could be
1669 extended. All are tried:
1670
1671 * Extending forward into following adjacent free chunk.
1672 * Shifting backwards, joining preceding adjacent space
1673 * Both shifting backwards and extending forward.
1674 * Extending into newly sbrked space
1675
1676 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1677 size argument of zero (re)allocates a minimum-sized chunk.
1678
1679 If the reallocation is for less space, and the new request is for
1680 a `small' (<512 bytes) size, then the newly unused space is lopped
1681 off and freed.
1682
1683 The old unix realloc convention of allowing the last-free'd chunk
1684 to be used as an argument to realloc is no longer supported.
1685 I don't know of any programs still relying on this feature,
1686 and allowing it would also allow too many other incorrect
1687 usages of realloc to be sensible.
1688
1689
1690 */
1691
1692
1693 #if __STD_C
rEALLOc(Void_t * oldmem,size_t bytes)1694 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1695 #else
1696 Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1697 #endif
1698 {
1699 INTERNAL_SIZE_T nb; /* padded request size */
1700
1701 mchunkptr oldp; /* chunk corresponding to oldmem */
1702 INTERNAL_SIZE_T oldsize; /* its size */
1703
1704 mchunkptr newp; /* chunk to return */
1705 INTERNAL_SIZE_T newsize; /* its size */
1706 Void_t* newmem; /* corresponding user mem */
1707
1708 mchunkptr next; /* next contiguous chunk after oldp */
1709 INTERNAL_SIZE_T nextsize; /* its size */
1710
1711 mchunkptr prev; /* previous contiguous chunk before oldp */
1712 INTERNAL_SIZE_T prevsize; /* its size */
1713
1714 mchunkptr remainder; /* holds split off extra space from newp */
1715 INTERNAL_SIZE_T remainder_size; /* its size */
1716
1717 mchunkptr bck; /* misc temp for linking */
1718 mchunkptr fwd; /* misc temp for linking */
1719
1720 #ifdef REALLOC_ZERO_BYTES_FREES
1721 if (!bytes) {
1722 fREe(oldmem);
1723 return NULL;
1724 }
1725 #endif
1726
1727 if ((long)bytes < 0) return NULL;
1728
1729 /* realloc of null is supposed to be same as malloc */
1730 if (oldmem == NULL) return mALLOc(bytes);
1731
1732 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1733 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1734 /* This is harder to support and should not be needed */
1735 panic("pre-reloc realloc() is not supported");
1736 }
1737 #endif
1738
1739 newp = oldp = mem2chunk(oldmem);
1740 newsize = oldsize = chunksize(oldp);
1741
1742
1743 nb = request2size(bytes);
1744
1745 #if HAVE_MMAP
1746 if (chunk_is_mmapped(oldp))
1747 {
1748 #if HAVE_MREMAP
1749 newp = mremap_chunk(oldp, nb);
1750 if(newp) return chunk2mem(newp);
1751 #endif
1752 /* Note the extra SIZE_SZ overhead. */
1753 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1754 /* Must alloc, copy, free. */
1755 newmem = mALLOc(bytes);
1756 if (!newmem)
1757 return NULL; /* propagate failure */
1758 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1759 munmap_chunk(oldp);
1760 return newmem;
1761 }
1762 #endif
1763
1764 check_inuse_chunk(oldp);
1765
1766 if ((long)(oldsize) < (long)(nb))
1767 {
1768
1769 /* Try expanding forward */
1770
1771 next = chunk_at_offset(oldp, oldsize);
1772 if (next == top || !inuse(next))
1773 {
1774 nextsize = chunksize(next);
1775
1776 /* Forward into top only if a remainder */
1777 if (next == top)
1778 {
1779 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1780 {
1781 newsize += nextsize;
1782 top = chunk_at_offset(oldp, nb);
1783 set_head(top, (newsize - nb) | PREV_INUSE);
1784 set_head_size(oldp, nb);
1785 return chunk2mem(oldp);
1786 }
1787 }
1788
1789 /* Forward into next chunk */
1790 else if (((long)(nextsize + newsize) >= (long)(nb)))
1791 {
1792 unlink(next, bck, fwd);
1793 newsize += nextsize;
1794 goto split;
1795 }
1796 }
1797 else
1798 {
1799 next = NULL;
1800 nextsize = 0;
1801 }
1802
1803 /* Try shifting backwards. */
1804
1805 if (!prev_inuse(oldp))
1806 {
1807 prev = prev_chunk(oldp);
1808 prevsize = chunksize(prev);
1809
1810 /* try forward + backward first to save a later consolidation */
1811
1812 if (next != NULL)
1813 {
1814 /* into top */
1815 if (next == top)
1816 {
1817 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1818 {
1819 unlink(prev, bck, fwd);
1820 newp = prev;
1821 newsize += prevsize + nextsize;
1822 newmem = chunk2mem(newp);
1823 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1824 top = chunk_at_offset(newp, nb);
1825 set_head(top, (newsize - nb) | PREV_INUSE);
1826 set_head_size(newp, nb);
1827 return newmem;
1828 }
1829 }
1830
1831 /* into next chunk */
1832 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1833 {
1834 unlink(next, bck, fwd);
1835 unlink(prev, bck, fwd);
1836 newp = prev;
1837 newsize += nextsize + prevsize;
1838 newmem = chunk2mem(newp);
1839 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1840 goto split;
1841 }
1842 }
1843
1844 /* backward only */
1845 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
1846 {
1847 unlink(prev, bck, fwd);
1848 newp = prev;
1849 newsize += prevsize;
1850 newmem = chunk2mem(newp);
1851 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1852 goto split;
1853 }
1854 }
1855
1856 /* Must allocate */
1857
1858 newmem = mALLOc (bytes);
1859
1860 if (newmem == NULL) /* propagate failure */
1861 return NULL;
1862
1863 /* Avoid copy if newp is next chunk after oldp. */
1864 /* (This can only happen when new chunk is sbrk'ed.) */
1865
1866 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1867 {
1868 newsize += chunksize(newp);
1869 newp = oldp;
1870 goto split;
1871 }
1872
1873 /* Otherwise copy, free, and exit */
1874 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1875 fREe(oldmem);
1876 return newmem;
1877 }
1878
1879
1880 split: /* split off extra room in old or expanded chunk */
1881
1882 if (newsize - nb >= MINSIZE) /* split off remainder */
1883 {
1884 remainder = chunk_at_offset(newp, nb);
1885 remainder_size = newsize - nb;
1886 set_head_size(newp, nb);
1887 set_head(remainder, remainder_size | PREV_INUSE);
1888 set_inuse_bit_at_offset(remainder, remainder_size);
1889 fREe(chunk2mem(remainder)); /* let free() deal with it */
1890 }
1891 else
1892 {
1893 set_head_size(newp, newsize);
1894 set_inuse_bit_at_offset(newp, newsize);
1895 }
1896
1897 check_inuse_chunk(newp);
1898 return chunk2mem(newp);
1899 }
1900
1901
1902
1903
1904 /*
1905
1906 memalign algorithm:
1907
1908 memalign requests more than enough space from malloc, finds a spot
1909 within that chunk that meets the alignment request, and then
1910 possibly frees the leading and trailing space.
1911
1912 The alignment argument must be a power of two. This property is not
1913 checked by memalign, so misuse may result in random runtime errors.
1914
1915 8-byte alignment is guaranteed by normal malloc calls, so don't
1916 bother calling memalign with an argument of 8 or less.
1917
1918 Overreliance on memalign is a sure way to fragment space.
1919
1920 */
1921
1922
1923 #if __STD_C
mEMALIGn(size_t alignment,size_t bytes)1924 Void_t* mEMALIGn(size_t alignment, size_t bytes)
1925 #else
1926 Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1927 #endif
1928 {
1929 INTERNAL_SIZE_T nb; /* padded request size */
1930 char* m; /* memory returned by malloc call */
1931 mchunkptr p; /* corresponding chunk */
1932 char* brk; /* alignment point within p */
1933 mchunkptr newp; /* chunk to return */
1934 INTERNAL_SIZE_T newsize; /* its size */
1935 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1936 mchunkptr remainder; /* spare room at end to split off */
1937 long remainder_size; /* its size */
1938
1939 if ((long)bytes < 0) return NULL;
1940
1941 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
1942 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1943 return memalign_simple(alignment, bytes);
1944 }
1945 #endif
1946
1947 /* If need less alignment than we give anyway, just relay to malloc */
1948
1949 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1950
1951 /* Otherwise, ensure that it is at least a minimum chunk size */
1952
1953 if (alignment < MINSIZE) alignment = MINSIZE;
1954
1955 /* Call malloc with worst case padding to hit alignment. */
1956
1957 nb = request2size(bytes);
1958 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1959
1960 /*
1961 * The attempt to over-allocate (with a size large enough to guarantee the
1962 * ability to find an aligned region within allocated memory) failed.
1963 *
1964 * Try again, this time only allocating exactly the size the user wants. If
1965 * the allocation now succeeds and just happens to be aligned, we can still
1966 * fulfill the user's request.
1967 */
1968 if (m == NULL) {
1969 size_t extra, extra2;
1970 /*
1971 * Use bytes not nb, since mALLOc internally calls request2size too, and
1972 * each call increases the size to allocate, to account for the header.
1973 */
1974 m = (char*)(mALLOc(bytes));
1975 /* Aligned -> return it */
1976 if ((((unsigned long)(m)) % alignment) == 0)
1977 return m;
1978 /*
1979 * Otherwise, try again, requesting enough extra space to be able to
1980 * acquire alignment.
1981 */
1982 fREe(m);
1983 /* Add in extra bytes to match misalignment of unexpanded allocation */
1984 extra = alignment - (((unsigned long)(m)) % alignment);
1985 m = (char*)(mALLOc(bytes + extra));
1986 /*
1987 * m might not be the same as before. Validate that the previous value of
1988 * extra still works for the current value of m.
1989 * If (!m), extra2=alignment so
1990 */
1991 if (m) {
1992 extra2 = alignment - (((unsigned long)(m)) % alignment);
1993 if (extra2 > extra) {
1994 fREe(m);
1995 m = NULL;
1996 }
1997 }
1998 /* Fall through to original NULL check and chunk splitting logic */
1999 }
2000
2001 if (m == NULL) return NULL; /* propagate failure */
2002
2003 p = mem2chunk(m);
2004
2005 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2006 {
2007 #if HAVE_MMAP
2008 if(chunk_is_mmapped(p))
2009 return chunk2mem(p); /* nothing more to do */
2010 #endif
2011 }
2012 else /* misaligned */
2013 {
2014 /*
2015 Find an aligned spot inside chunk.
2016 Since we need to give back leading space in a chunk of at
2017 least MINSIZE, if the first calculation places us at
2018 a spot with less than MINSIZE leader, we can move to the
2019 next aligned spot -- we've allocated enough total room so that
2020 this is always possible.
2021 */
2022
2023 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2024 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2025
2026 newp = (mchunkptr)brk;
2027 leadsize = brk - (char*)(p);
2028 newsize = chunksize(p) - leadsize;
2029
2030 #if HAVE_MMAP
2031 if(chunk_is_mmapped(p))
2032 {
2033 newp->prev_size = p->prev_size + leadsize;
2034 set_head(newp, newsize|IS_MMAPPED);
2035 return chunk2mem(newp);
2036 }
2037 #endif
2038
2039 /* give back leader, use the rest */
2040
2041 set_head(newp, newsize | PREV_INUSE);
2042 set_inuse_bit_at_offset(newp, newsize);
2043 set_head_size(p, leadsize);
2044 fREe(chunk2mem(p));
2045 p = newp;
2046
2047 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2048 }
2049
2050 /* Also give back spare room at the end */
2051
2052 remainder_size = chunksize(p) - nb;
2053
2054 if (remainder_size >= (long)MINSIZE)
2055 {
2056 remainder = chunk_at_offset(p, nb);
2057 set_head(remainder, remainder_size | PREV_INUSE);
2058 set_head_size(p, nb);
2059 fREe(chunk2mem(remainder));
2060 }
2061
2062 check_inuse_chunk(p);
2063 return chunk2mem(p);
2064
2065 }
2066
2067
2068
2069
2070 /*
2071 valloc just invokes memalign with alignment argument equal
2072 to the page size of the system (or as near to this as can
2073 be figured out from all the includes/defines above.)
2074 */
2075
2076 #if __STD_C
vALLOc(size_t bytes)2077 Void_t* vALLOc(size_t bytes)
2078 #else
2079 Void_t* vALLOc(bytes) size_t bytes;
2080 #endif
2081 {
2082 return mEMALIGn (malloc_getpagesize, bytes);
2083 }
2084
2085 /*
2086 pvalloc just invokes valloc for the nearest pagesize
2087 that will accommodate request
2088 */
2089
2090
2091 #if __STD_C
pvALLOc(size_t bytes)2092 Void_t* pvALLOc(size_t bytes)
2093 #else
2094 Void_t* pvALLOc(bytes) size_t bytes;
2095 #endif
2096 {
2097 size_t pagesize = malloc_getpagesize;
2098 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2099 }
2100
2101 /*
2102
2103 calloc calls malloc, then zeroes out the allocated chunk.
2104
2105 */
2106
2107 #if __STD_C
cALLOc(size_t n,size_t elem_size)2108 Void_t* cALLOc(size_t n, size_t elem_size)
2109 #else
2110 Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2111 #endif
2112 {
2113 mchunkptr p;
2114 INTERNAL_SIZE_T csz;
2115
2116 INTERNAL_SIZE_T sz = n * elem_size;
2117
2118
2119 /* check if expand_top called, in which case don't need to clear */
2120 #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
2121 #if MORECORE_CLEARS
2122 mchunkptr oldtop = top;
2123 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2124 #endif
2125 #endif
2126 Void_t* mem = mALLOc (sz);
2127
2128 if ((long)n < 0) return NULL;
2129
2130 if (mem == NULL)
2131 return NULL;
2132 else
2133 {
2134 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
2135 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
2136 memset(mem, 0, sz);
2137 return mem;
2138 }
2139 #endif
2140 p = mem2chunk(mem);
2141
2142 /* Two optional cases in which clearing not necessary */
2143
2144
2145 #if HAVE_MMAP
2146 if (chunk_is_mmapped(p)) return mem;
2147 #endif
2148
2149 csz = chunksize(p);
2150
2151 #ifdef CONFIG_SYS_MALLOC_CLEAR_ON_INIT
2152 #if MORECORE_CLEARS
2153 if (p == oldtop && csz > oldtopsize)
2154 {
2155 /* clear only the bytes from non-freshly-sbrked memory */
2156 csz = oldtopsize;
2157 }
2158 #endif
2159 #endif
2160
2161 MALLOC_ZERO(mem, csz - SIZE_SZ);
2162 return mem;
2163 }
2164 }
2165
2166 /*
2167
2168 cfree just calls free. It is needed/defined on some systems
2169 that pair it with calloc, presumably for odd historical reasons.
2170
2171 */
2172
2173 #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2174 #if __STD_C
cfree(Void_t * mem)2175 void cfree(Void_t *mem)
2176 #else
2177 void cfree(mem) Void_t *mem;
2178 #endif
2179 {
2180 fREe(mem);
2181 }
2182 #endif
2183
2184
2185
2186 /*
2187
2188 Malloc_trim gives memory back to the system (via negative
2189 arguments to sbrk) if there is unused memory at the `high' end of
2190 the malloc pool. You can call this after freeing large blocks of
2191 memory to potentially reduce the system-level memory requirements
2192 of a program. However, it cannot guarantee to reduce memory. Under
2193 some allocation patterns, some large free blocks of memory will be
2194 locked between two used chunks, so they cannot be given back to
2195 the system.
2196
2197 The `pad' argument to malloc_trim represents the amount of free
2198 trailing space to leave untrimmed. If this argument is zero,
2199 only the minimum amount of memory to maintain internal data
2200 structures will be left (one page or less). Non-zero arguments
2201 can be supplied to maintain enough trailing space to service
2202 future expected allocations without having to re-obtain memory
2203 from the system.
2204
2205 Malloc_trim returns 1 if it actually released any memory, else 0.
2206
2207 */
2208
2209 #if __STD_C
malloc_trim(size_t pad)2210 int malloc_trim(size_t pad)
2211 #else
2212 int malloc_trim(pad) size_t pad;
2213 #endif
2214 {
2215 long top_size; /* Amount of top-most memory */
2216 long extra; /* Amount to release */
2217 char* current_brk; /* address returned by pre-check sbrk call */
2218 char* new_brk; /* address returned by negative sbrk call */
2219
2220 unsigned long pagesz = malloc_getpagesize;
2221
2222 top_size = chunksize(top);
2223 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2224
2225 if (extra < (long)pagesz) /* Not enough memory to release */
2226 return 0;
2227
2228 else
2229 {
2230 /* Test to make sure no one else called sbrk */
2231 current_brk = (char*)(MORECORE (0));
2232 if (current_brk != (char*)(top) + top_size)
2233 return 0; /* Apparently we don't own memory; must fail */
2234
2235 else
2236 {
2237 new_brk = (char*)(MORECORE (-extra));
2238
2239 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2240 {
2241 /* Try to figure out what we have */
2242 current_brk = (char*)(MORECORE (0));
2243 top_size = current_brk - (char*)top;
2244 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2245 {
2246 sbrked_mem = current_brk - sbrk_base;
2247 set_head(top, top_size | PREV_INUSE);
2248 }
2249 check_chunk(top);
2250 return 0;
2251 }
2252
2253 else
2254 {
2255 /* Success. Adjust top accordingly. */
2256 set_head(top, (top_size - extra) | PREV_INUSE);
2257 sbrked_mem -= extra;
2258 check_chunk(top);
2259 return 1;
2260 }
2261 }
2262 }
2263 }
2264
2265
2266
2267 /*
2268 malloc_usable_size:
2269
2270 This routine tells you how many bytes you can actually use in an
2271 allocated chunk, which may be more than you requested (although
2272 often not). You can use this many bytes without worrying about
2273 overwriting other allocated objects. Not a particularly great
2274 programming practice, but still sometimes useful.
2275
2276 */
2277
2278 #if __STD_C
malloc_usable_size(Void_t * mem)2279 size_t malloc_usable_size(Void_t* mem)
2280 #else
2281 size_t malloc_usable_size(mem) Void_t* mem;
2282 #endif
2283 {
2284 mchunkptr p;
2285 if (mem == NULL)
2286 return 0;
2287 else
2288 {
2289 p = mem2chunk(mem);
2290 if(!chunk_is_mmapped(p))
2291 {
2292 if (!inuse(p)) return 0;
2293 check_inuse_chunk(p);
2294 return chunksize(p) - SIZE_SZ;
2295 }
2296 return chunksize(p) - 2*SIZE_SZ;
2297 }
2298 }
2299
2300
2301
2302
2303 /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2304
2305 #ifdef DEBUG
malloc_update_mallinfo()2306 static void malloc_update_mallinfo()
2307 {
2308 int i;
2309 mbinptr b;
2310 mchunkptr p;
2311 #ifdef DEBUG
2312 mchunkptr q;
2313 #endif
2314
2315 INTERNAL_SIZE_T avail = chunksize(top);
2316 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2317
2318 for (i = 1; i < NAV; ++i)
2319 {
2320 b = bin_at(i);
2321 for (p = last(b); p != b; p = p->bk)
2322 {
2323 #ifdef DEBUG
2324 check_free_chunk(p);
2325 for (q = next_chunk(p);
2326 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2327 q = next_chunk(q))
2328 check_inuse_chunk(q);
2329 #endif
2330 avail += chunksize(p);
2331 navail++;
2332 }
2333 }
2334
2335 current_mallinfo.ordblks = navail;
2336 current_mallinfo.uordblks = sbrked_mem - avail;
2337 current_mallinfo.fordblks = avail;
2338 current_mallinfo.hblks = n_mmaps;
2339 current_mallinfo.hblkhd = mmapped_mem;
2340 current_mallinfo.keepcost = chunksize(top);
2341
2342 }
2343 #endif /* DEBUG */
2344
2345
2346
2347 /*
2348
2349 malloc_stats:
2350
2351 Prints on the amount of space obtain from the system (both
2352 via sbrk and mmap), the maximum amount (which may be more than
2353 current if malloc_trim and/or munmap got called), the maximum
2354 number of simultaneous mmap regions used, and the current number
2355 of bytes allocated via malloc (or realloc, etc) but not yet
2356 freed. (Note that this is the number of bytes allocated, not the
2357 number requested. It will be larger than the number requested
2358 because of alignment and bookkeeping overhead.)
2359
2360 */
2361
2362 #ifdef DEBUG
malloc_stats()2363 void malloc_stats()
2364 {
2365 malloc_update_mallinfo();
2366 printf("max system bytes = %10u\n",
2367 (unsigned int)(max_total_mem));
2368 printf("system bytes = %10u\n",
2369 (unsigned int)(sbrked_mem + mmapped_mem));
2370 printf("in use bytes = %10u\n",
2371 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
2372 #if HAVE_MMAP
2373 printf("max mmap regions = %10u\n",
2374 (unsigned int)max_n_mmaps);
2375 #endif
2376 }
2377 #endif /* DEBUG */
2378
2379 /*
2380 mallinfo returns a copy of updated current mallinfo.
2381 */
2382
2383 #ifdef DEBUG
mALLINFo()2384 struct mallinfo mALLINFo()
2385 {
2386 malloc_update_mallinfo();
2387 return current_mallinfo;
2388 }
2389 #endif /* DEBUG */
2390
2391
2392
2393
2394 /*
2395 mallopt:
2396
2397 mallopt is the general SVID/XPG interface to tunable parameters.
2398 The format is to provide a (parameter-number, parameter-value) pair.
2399 mallopt then sets the corresponding parameter to the argument
2400 value if it can (i.e., so long as the value is meaningful),
2401 and returns 1 if successful else 0.
2402
2403 See descriptions of tunable parameters above.
2404
2405 */
2406
2407 #if __STD_C
mALLOPt(int param_number,int value)2408 int mALLOPt(int param_number, int value)
2409 #else
2410 int mALLOPt(param_number, value) int param_number; int value;
2411 #endif
2412 {
2413 switch(param_number)
2414 {
2415 case M_TRIM_THRESHOLD:
2416 trim_threshold = value; return 1;
2417 case M_TOP_PAD:
2418 top_pad = value; return 1;
2419 case M_MMAP_THRESHOLD:
2420 mmap_threshold = value; return 1;
2421 case M_MMAP_MAX:
2422 #if HAVE_MMAP
2423 n_mmaps_max = value; return 1;
2424 #else
2425 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2426 #endif
2427
2428 default:
2429 return 0;
2430 }
2431 }
2432
initf_malloc(void)2433 int initf_malloc(void)
2434 {
2435 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
2436 assert(gd->malloc_base); /* Set up by crt0.S */
2437 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
2438 gd->malloc_ptr = 0;
2439 #endif
2440
2441 return 0;
2442 }
2443
2444 /*
2445
2446 History:
2447
2448 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2449 * return null for negative arguments
2450 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
2451 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2452 (e.g. WIN32 platforms)
2453 * Cleanup up header file inclusion for WIN32 platforms
2454 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2455 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2456 memory allocation routines
2457 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2458 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
2459 usage of 'assert' in non-WIN32 code
2460 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2461 avoid infinite loop
2462 * Always call 'fREe()' rather than 'free()'
2463
2464 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2465 * Fixed ordering problem with boundary-stamping
2466
2467 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2468 * Added pvalloc, as recommended by H.J. Liu
2469 * Added 64bit pointer support mainly from Wolfram Gloger
2470 * Added anonymously donated WIN32 sbrk emulation
2471 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2472 * malloc_extend_top: fix mask error that caused wastage after
2473 foreign sbrks
2474 * Add linux mremap support code from HJ Liu
2475
2476 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2477 * Integrated most documentation with the code.
2478 * Add support for mmap, with help from
2479 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2480 * Use last_remainder in more cases.
2481 * Pack bins using idea from colin@nyx10.cs.du.edu
2482 * Use ordered bins instead of best-fit threshhold
2483 * Eliminate block-local decls to simplify tracing and debugging.
2484 * Support another case of realloc via move into top
2485 * Fix error occuring when initial sbrk_base not word-aligned.
2486 * Rely on page size for units instead of SBRK_UNIT to
2487 avoid surprises about sbrk alignment conventions.
2488 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
2489 (raymond@es.ele.tue.nl) for the suggestion.
2490 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2491 * More precautions for cases where other routines call sbrk,
2492 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2493 * Added macros etc., allowing use in linux libc from
2494 H.J. Lu (hjl@gnu.ai.mit.edu)
2495 * Inverted this history list
2496
2497 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2498 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2499 * Removed all preallocation code since under current scheme
2500 the work required to undo bad preallocations exceeds
2501 the work saved in good cases for most test programs.
2502 * No longer use return list or unconsolidated bins since
2503 no scheme using them consistently outperforms those that don't
2504 given above changes.
2505 * Use best fit for very large chunks to prevent some worst-cases.
2506 * Added some support for debugging
2507
2508 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2509 * Removed footers when chunks are in use. Thanks to
2510 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2511
2512 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2513 * Added malloc_trim, with help from Wolfram Gloger
2514 (wmglo@Dent.MED.Uni-Muenchen.DE).
2515
2516 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2517
2518 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2519 * realloc: try to expand in both directions
2520 * malloc: swap order of clean-bin strategy;
2521 * realloc: only conditionally expand backwards
2522 * Try not to scavenge used bins
2523 * Use bin counts as a guide to preallocation
2524 * Occasionally bin return list chunks in first scan
2525 * Add a few optimizations from colin@nyx10.cs.du.edu
2526
2527 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2528 * faster bin computation & slightly different binning
2529 * merged all consolidations to one part of malloc proper
2530 (eliminating old malloc_find_space & malloc_clean_bin)
2531 * Scan 2 returns chunks (not just 1)
2532 * Propagate failure in realloc if malloc returns 0
2533 * Add stuff to allow compilation on non-ANSI compilers
2534 from kpv@research.att.com
2535
2536 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2537 * removed potential for odd address access in prev_chunk
2538 * removed dependency on getpagesize.h
2539 * misc cosmetics and a bit more internal documentation
2540 * anticosmetics: mangled names in macros to evade debugger strangeness
2541 * tested on sparc, hp-700, dec-mips, rs6000
2542 with gcc & native cc (hp, dec only) allowing
2543 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2544
2545 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2546 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
2547 structure of old version, but most details differ.)
2548
2549 */
2550