1 /*
2 * Copyright (c) 2012, 2013 ARM Ltd
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 * products derived from this software without specific prior written
15 * permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional
30 * as to be reenterable.
31 *
32 * Interface documentation refer to malloc.c.
33 */
34
35 #include <stdio.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <malloc.h>
39
40 #if DEBUG
41 #include <assert.h>
42 #else
43 #define assert(x) ((void)0)
44 #endif
45
46 #ifndef MAX
47 #define MAX(a,b) ((a) >= (b) ? (a) : (b))
48 #endif
49
50 #include <sys/config.h>
51
52 #define MALLOC_LOCK __malloc_lock()
53 #define MALLOC_UNLOCK __malloc_unlock()
54
55 #define nano_malloc malloc
56 #define nano_free free
57 #define nano_realloc realloc
58 #define nano_memalign memalign
59 #define nano_valloc valloc
60 #define nano_pvalloc pvalloc
61 #define nano_calloc calloc
62 #define nano_cfree cfree
63 #define nano_malloc_usable_size malloc_usable_size
64 #define nano_malloc_stats malloc_stats
65 #define nano_mallinfo mallinfo
66 #define nano_mallopt mallopt
67
68 /* Redefine names to avoid conflict with user names */
69 #define free_list __malloc_free_list
70 #define sbrk_start __malloc_sbrk_start
71 #define current_mallinfo __malloc_current_mallinfo
72
73 #define ALIGN_TO(size, align) \
74 (((size) + (align) -1L) & ~((align) -1L))
75
76 /* Alignment of allocated block */
77 #define MALLOC_ALIGN (8U)
78 #define CHUNK_ALIGN (sizeof(void*))
79 #define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN)
80
81 /* as well as the minimal allocation size
82 * to hold a free pointer */
83 #define MALLOC_MINSIZE (sizeof(void *))
84 #define MALLOC_PAGE_ALIGN (0x1000)
85 #define MAX_ALLOC_SIZE (0x80000000U)
86
87 typedef size_t malloc_size_t;
88
89 typedef struct malloc_chunk
90 {
91 /* --------------------------------------
92 * chunk->| size |
93 * --------------------------------------
94 * | Padding for alignment |
95 * | This includes padding inserted by |
96 * | the compiler (to align fields) and |
97 * | explicit padding inserted by this |
98 * | implementation. If any explicit |
99 * | padding is being used then the |
100 * | sizeof (size) bytes at |
101 * | mem_ptr - CHUNK_OFFSET must be |
102 * | initialized with the negative |
103 * | offset to size. |
104 * --------------------------------------
105 * mem_ptr->| When allocated: data |
106 * | When freed: pointer to next free |
107 * | chunk |
108 * --------------------------------------
109 */
110 /* size of the allocated payload area, including size before
111 CHUNK_OFFSET */
112 long size;
113
114 /* since here, the memory is either the next free block, or data load */
115 struct malloc_chunk * next;
116 }chunk;
117
118
119 #define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next)))
120
121 /* size of smallest possible chunk. A memory piece smaller than this size
122 * won't be able to create a chunk */
123 #define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE)
124
125 /* Forward data declarations */
126 extern chunk * free_list;
127 extern char * sbrk_start;
128 extern struct mallinfo current_mallinfo;
129
130 /* Forward function declarations */
131 extern void * nano_malloc(malloc_size_t);
132 extern void nano_free (void * free_p);
133 extern void nano_cfree(void * ptr);
134 extern void * nano_calloc(malloc_size_t n, malloc_size_t elem);
135 extern void nano_malloc_stats(void);
136 extern malloc_size_t nano_malloc_usable_size(void * ptr);
137 extern void * nano_realloc(void * ptr, malloc_size_t size);
138 extern void * nano_memalign(size_t align, size_t s);
139 extern int nano_mallopt(int parameter_number, int parameter_value);
140 extern void * nano_valloc(size_t s);
141 extern void * nano_pvalloc(size_t s);
142
get_chunk_from_ptr(void * ptr)143 static inline chunk * get_chunk_from_ptr(void * ptr)
144 {
145 /* Assume that there is no explicit padding in the
146 chunk, and that the chunk starts at ptr - CHUNK_OFFSET. */
147 chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
148
149 /* c->size being negative indicates that there is explicit padding in
150 the chunk. In which case, c->size is currently the negative offset to
151 the true size. */
152 if (c->size < 0) c = (chunk *)((char *)c + c->size);
153 return c;
154 }
155
156 extern void *sbrk(intptr_t);
157
158 #ifdef DEFINE_MALLOC
159 /* List list header of free blocks */
160 chunk * free_list = NULL;
161
162 /* Starting point of memory allocated from system */
163 char * sbrk_start = NULL;
164
165 /** Function sbrk_aligned
166 * Algorithm:
167 * Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned
168 * Optimise for the case that it is already aligned - only ask for extra
169 * padding after we know we need it
170 */
sbrk_aligned(malloc_size_t s)171 static void* sbrk_aligned(malloc_size_t s)
172 {
173 char *p, *align_p;
174
175 if (sbrk_start == NULL) sbrk_start = sbrk(0);
176
177 p = sbrk(s);
178
179 /* sbrk returns -1 if fail to allocate */
180 if (p == (void *)-1)
181 return p;
182
183 align_p = (char*)ALIGN_TO((unsigned long)p, CHUNK_ALIGN);
184 if (align_p != p)
185 {
186 /* p is not aligned, ask for a few more bytes so that we have s
187 * bytes reserved from align_p. */
188 p = sbrk(align_p - p);
189 if (p == (void *)-1)
190 return p;
191 }
192 return align_p;
193 }
194
195 /** Function nano_malloc
196 * Algorithm:
197 * Walk through the free list to find the first match. If fails to find
198 * one, call sbrk to allocate a new chunk.
199 */
nano_malloc(malloc_size_t s)200 void * nano_malloc(malloc_size_t s)
201 {
202 chunk *p, *r;
203 char * ptr, * align_ptr;
204 int offset;
205
206 malloc_size_t alloc_size;
207
208 alloc_size = ALIGN_TO(s, CHUNK_ALIGN); /* size of aligned data load */
209 alloc_size += MALLOC_PADDING; /* padding */
210 alloc_size += CHUNK_OFFSET; /* size of chunk head */
211 alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
212
213 if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s)
214 {
215 errno = ENOMEM;
216 return NULL;
217 }
218
219 MALLOC_LOCK;
220
221 p = free_list;
222 r = p;
223
224 while (r)
225 {
226 int rem = r->size - alloc_size;
227 if (rem >= 0)
228 {
229 if (rem >= MALLOC_MINCHUNK)
230 {
231 /* Find a chunk that much larger than required size, break
232 * it into two chunks and return the second one */
233 r->size = rem;
234 r = (chunk *)((char *)r + rem);
235 r->size = alloc_size;
236 }
237 /* Find a chunk that is exactly the size or slightly bigger
238 * than requested size, just return this chunk */
239 else if (p == r)
240 {
241 /* Now it implies p==r==free_list. Move the free_list
242 * to next chunk */
243 free_list = r->next;
244 }
245 else
246 {
247 /* Normal case. Remove it from free_list */
248 p->next = r->next;
249 }
250 break;
251 }
252 p=r;
253 r=r->next;
254 }
255
256 /* Failed to find a appropriate chunk. Ask for more memory */
257 if (r == NULL)
258 {
259 r = sbrk_aligned(alloc_size);
260
261 /* sbrk returns -1 if fail to allocate */
262 if (r == (void *)-1)
263 {
264 errno = ENOMEM;
265 MALLOC_UNLOCK;
266 return NULL;
267 }
268 r->size = alloc_size;
269 }
270 MALLOC_UNLOCK;
271
272 ptr = (char *)r + CHUNK_OFFSET;
273
274 align_ptr = (char *)ALIGN_TO((unsigned long)ptr, MALLOC_ALIGN);
275 offset = align_ptr - ptr;
276
277 if (offset)
278 {
279 /* Initialize sizeof (malloc_chunk.size) bytes at
280 align_ptr - CHUNK_OFFSET with negative offset to the
281 size field (at the start of the chunk).
282
283 The negative offset to size from align_ptr - CHUNK_OFFSET is
284 the size of any remaining padding minus CHUNK_OFFSET. This is
285 equivalent to the total size of the padding, because the size of
286 any remaining padding is the total size of the padding minus
287 CHUNK_OFFSET.
288
289 Note that the size of the padding must be at least CHUNK_OFFSET.
290
291 The rest of the padding is not initialized. */
292 *(long *)((char *)r + offset) = -offset;
293 }
294
295 assert(align_ptr + size <= (char *)r + alloc_size);
296 return align_ptr;
297 }
298 #endif /* DEFINE_MALLOC */
299
300 #ifdef DEFINE_FREE
301 #define MALLOC_CHECK_DOUBLE_FREE
302
303 /** Function nano_free
304 * Implementation of libc free.
305 * Algorithm:
306 * Maintain a global free chunk single link list, headed by global
307 * variable free_list.
308 * When free, insert the to-be-freed chunk into free list. The place to
309 * insert should make sure all chunks are sorted by address from low to
310 * high. Then merge with neighbor chunks if adjacent.
311 */
nano_free(void * free_p)312 void nano_free (void * free_p)
313 {
314 chunk * p_to_free;
315 chunk * p, * q;
316
317 if (free_p == NULL) return;
318
319 p_to_free = get_chunk_from_ptr(free_p);
320
321 MALLOC_LOCK;
322 if (free_list == NULL)
323 {
324 /* Set first free list element */
325 p_to_free->next = free_list;
326 free_list = p_to_free;
327 MALLOC_UNLOCK;
328 return;
329 }
330
331 if (p_to_free < free_list)
332 {
333 if ((char *)p_to_free + p_to_free->size == (char *)free_list)
334 {
335 /* Chunk to free is just before the first element of
336 * free list */
337 p_to_free->size += free_list->size;
338 p_to_free->next = free_list->next;
339 }
340 else
341 {
342 /* Insert before current free_list */
343 p_to_free->next = free_list;
344 }
345 free_list = p_to_free;
346 MALLOC_UNLOCK;
347 return;
348 }
349
350 q = free_list;
351 /* Walk through the free list to find the place for insert. */
352 do
353 {
354 p = q;
355 q = q->next;
356 } while (q && q <= p_to_free);
357
358 /* Now p <= p_to_free and either q == NULL or q > p_to_free
359 * Try to merge with chunks immediately before/after it. */
360
361 if ((char *)p + p->size == (char *)p_to_free)
362 {
363 /* Chunk to be freed is adjacent
364 * to a free chunk before it */
365 p->size += p_to_free->size;
366 /* If the merged chunk is also adjacent
367 * to the chunk after it, merge again */
368 if ((char *)p + p->size == (char *) q)
369 {
370 p->size += q->size;
371 p->next = q->next;
372 }
373 }
374 #ifdef MALLOC_CHECK_DOUBLE_FREE
375 else if ((char *)p + p->size > (char *)p_to_free)
376 {
377 /* Report double free fault */
378 errno = ENOMEM;
379 MALLOC_UNLOCK;
380 return;
381 }
382 #endif
383 else if ((char *)p_to_free + p_to_free->size == (char *) q)
384 {
385 /* Chunk to be freed is adjacent
386 * to a free chunk after it */
387 p_to_free->size += q->size;
388 p_to_free->next = q->next;
389 p->next = p_to_free;
390 }
391 else
392 {
393 /* Not adjacent to any chunk. Just insert it. Resulting
394 * a fragment. */
395 p_to_free->next = q;
396 p->next = p_to_free;
397 }
398 MALLOC_UNLOCK;
399 }
400 #endif /* DEFINE_FREE */
401
402 #ifdef DEFINE_CFREE
nano_cfree(void * ptr)403 void nano_cfree(void * ptr)
404 {
405 nano_free(ptr);
406 }
407 #endif /* DEFINE_CFREE */
408
409 #ifdef DEFINE_CALLOC
410 /* Function nano_calloc
411 * Implement calloc simply by calling malloc and set zero */
nano_calloc(malloc_size_t n,malloc_size_t elem)412 void * nano_calloc(malloc_size_t n, malloc_size_t elem)
413 {
414 void * mem = nano_malloc(n * elem);
415 if (mem != NULL) memset(mem, 0, n * elem);
416 return mem;
417 }
418 #endif /* DEFINE_CALLOC */
419
420 #ifdef DEFINE_REALLOC
421 /* Function nano_realloc
422 * Implement realloc by malloc + memcpy */
nano_realloc(void * ptr,malloc_size_t size)423 void * nano_realloc(void * ptr, malloc_size_t size)
424 {
425 void * mem;
426
427 if (ptr == NULL) return nano_malloc(size);
428
429 if (size == 0)
430 {
431 nano_free(ptr);
432 return NULL;
433 }
434
435 #ifdef DEFINE_MALLOC_USABLE_SIZE
436 /* TODO: There is chance to shrink the chunk if newly requested
437 * size is much small */
438 if (nano_malloc_usable_size(ptr) >= size)
439 return ptr;
440 #endif
441
442 mem = nano_malloc(size);
443 if (mem != NULL)
444 {
445 memcpy(mem, ptr, size);
446 nano_free(ptr);
447 }
448 return mem;
449 }
450 #endif /* DEFINE_REALLOC */
451
452 #ifdef DEFINE_MALLINFO
453 struct mallinfo current_mallinfo={0,0,0,0,0,0,0,0,0,0};
454
nano_mallinfo(void)455 struct mallinfo nano_mallinfo(void)
456 {
457 char * sbrk_now;
458 chunk * pf;
459 size_t free_size = 0;
460 size_t total_size;
461
462 MALLOC_LOCK;
463
464 if (sbrk_start == NULL) total_size = 0;
465 else {
466 sbrk_now = sbrk(0);
467
468 if (sbrk_now == (void *)-1)
469 total_size = (size_t)-1;
470 else
471 total_size = (size_t) (sbrk_now - sbrk_start);
472 }
473
474 for (pf = free_list; pf; pf = pf->next)
475 free_size += pf->size;
476
477 current_mallinfo.arena = total_size;
478 current_mallinfo.fordblks = free_size;
479 current_mallinfo.uordblks = total_size - free_size;
480
481 MALLOC_UNLOCK;
482 return current_mallinfo;
483 }
484 #endif /* DEFINE_MALLINFO */
485
486 #ifdef DEFINE_MALLOC_STATS
nano_malloc_stats(void)487 void nano_malloc_stats(void)
488 {
489 nano_mallinfo();
490 __i_fprintf(stderr, "max system bytes = %10u\n",
491 current_mallinfo.arena);
492 __i_fprintf(stderr, "system bytes = %10u\n",
493 current_mallinfo.arena);
494 __i_fprintf(stderr, "in use bytes = %10u\n",
495 current_mallinfo.uordblks);
496 }
497 #endif /* DEFINE_MALLOC_STATS */
498
499 #ifdef DEFINE_MALLOC_USABLE_SIZE
nano_malloc_usable_size(void * ptr)500 malloc_size_t nano_malloc_usable_size(void * ptr)
501 {
502 chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
503 int size_or_offset = c->size;
504
505 if (size_or_offset < 0)
506 {
507 /* Padding is used. Excluding the padding size */
508 c = (chunk *)((char *)c + c->size);
509 return c->size - CHUNK_OFFSET + size_or_offset;
510 }
511 return c->size - CHUNK_OFFSET;
512 }
513 #endif /* DEFINE_MALLOC_USABLE_SIZE */
514
515 #ifdef DEFINE_MEMALIGN
516 /* Function nano_memalign
517 * Allocate memory block aligned at specific boundary.
518 * align: required alignment. Must be power of 2. Return NULL
519 * if not power of 2. Undefined behavior is bigger than
520 * pointer value range.
521 * s: required size.
522 * Return: allocated memory pointer aligned to align
523 * Algorithm: Malloc a big enough block, padding pointer to aligned
524 * address, then truncate and free the tail if too big.
525 * Record the offset of align pointer and original pointer
526 * in the padding area.
527 */
nano_memalign(size_t align,size_t s)528 void * nano_memalign(size_t align, size_t s)
529 {
530 chunk * chunk_p;
531 malloc_size_t size_allocated, offset, ma_size, size_with_padding;
532 char * allocated, * aligned_p;
533
534 /* Return NULL if align isn't power of 2 */
535 if ((align & (align-1)) != 0) return NULL;
536
537 align = MAX(align, MALLOC_ALIGN);
538 ma_size = ALIGN_TO(MAX(s, MALLOC_MINSIZE), CHUNK_ALIGN);
539 size_with_padding = ma_size + align - MALLOC_ALIGN;
540
541 allocated = nano_malloc(size_with_padding);
542 if (allocated == NULL) return NULL;
543
544 chunk_p = get_chunk_from_ptr(allocated);
545 aligned_p = (char *)ALIGN_TO(
546 (unsigned long)((char *)chunk_p + CHUNK_OFFSET),
547 (unsigned long)align);
548 offset = aligned_p - ((char *)chunk_p + CHUNK_OFFSET);
549
550 if (offset)
551 {
552 if (offset >= MALLOC_MINCHUNK)
553 {
554 /* Padding is too large, free it */
555 chunk * front_chunk = chunk_p;
556 chunk_p = (chunk *)((char *)chunk_p + offset);
557 chunk_p->size = front_chunk->size - offset;
558 front_chunk->size = offset;
559 nano_free((char *)front_chunk + CHUNK_OFFSET);
560 }
561 else
562 {
563 /* Padding is used. Need to set a jump offset for aligned pointer
564 * to get back to chunk head */
565 assert(offset >= sizeof(int));
566 *(long *)((char *)chunk_p + offset) = -offset;
567 }
568 }
569
570 size_allocated = chunk_p->size;
571 if ((char *)chunk_p + size_allocated >
572 (aligned_p + ma_size + MALLOC_MINCHUNK))
573 {
574 /* allocated much more than what's required for padding, free
575 * tail part */
576 chunk * tail_chunk = (chunk *)(aligned_p + ma_size);
577 chunk_p->size = aligned_p + ma_size - (char *)chunk_p;
578 tail_chunk->size = size_allocated - chunk_p->size;
579 nano_free((char *)tail_chunk + CHUNK_OFFSET);
580 }
581 return aligned_p;
582 }
583 #endif /* DEFINE_MEMALIGN */
584
585 #ifdef DEFINE_MALLOPT
nano_mallopt(int parameter_number,int parameter_value)586 int nano_mallopt(int parameter_number, int parameter_value)
587 {
588 return 0;
589 }
590 #endif /* DEFINE_MALLOPT */
591
592 #ifdef DEFINE_VALLOC
nano_valloc(size_t s)593 void * nano_valloc(size_t s)
594 {
595 return nano_memalign(MALLOC_PAGE_ALIGN, s);
596 }
597 #endif /* DEFINE_VALLOC */
598
599 #ifdef DEFINE_PVALLOC
nano_pvalloc(size_t s)600 void * nano_pvalloc(size_t s)
601 {
602 return nano_valloc(ALIGN_TO(s, MALLOC_PAGE_ALIGN));
603 }
604 #endif /* DEFINE_PVALLOC */
605