1 /*
2 * Copyright (C) 2000 Alan Robertson <alanr@unix.sh>
3 *
4 * This software licensed under the GNU LGPL.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #define HA_MALLOC_ORIGINAL
21 #include <lha_internal.h>
22 #include <unistd.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #ifdef HAVE_STDINT_H
26 #include <stdint.h>
27 #endif /* HAVE_STDINT_H */
28 #include <string.h>
29 #include <errno.h>
30 #ifndef BSD
31 #ifdef HAVE_MALLOC_H
32 # include <malloc.h>
33 #endif
34 #endif
35 #include <clplumbing/cl_malloc.h>
36 #include <clplumbing/cl_log.h>
37 #include <clplumbing/longclock.h>
38
39 #include <ltdl.h>
40
41 #ifndef _CLPLUMBING_CLMALLOC_NATIVE_H
42 static cl_mem_stats_t default_memstats;
43 static volatile cl_mem_stats_t * memstats = &default_memstats;
44
45 /*
46 * Compile time malloc debugging switches:
47 *
48 * MARK_PRISTINE - puts known byte pattern in freed memory
49 * Good at finding "use after free" cases
50 * Cheap in memory, but expensive in CPU
51 *
52 * MAKE_GUARD - puts a known pattern *after* allocated memory
53 * Good at finding overrun problems after the fact
54 * Cheap in CPU, adds a few bytes to each malloc item
55 *
56 */
57
58 #define MARK_PRISTINE 1 /* Expensive in CPU time */
59 #undef MARK_PRISTINE
60 #define MAKE_GUARD 1 /* Adds 'n' bytes memory - cheap in CPU*/
61 #define USE_ASSERTS 1
62 #define DUMPONERR 1
63 #define RETURN_TO_MALLOC 1
64 #undef RETURN_TO_MALLOC
65
66 #ifndef DUMPONERR
67 # define DUMPIFASKED() /* nothing */
68 #else
69 # define DUMPIFASKED() {abort();}
70 #endif
71
72
73 /*
74 *
75 * Malloc wrapper functions
76 *
77 * I wrote these so we can better track memory leaks, etc. and verify
78 * that the system is stable in terms of memory usage.
79 *
80 * For our purposes, these functions are a somewhat faster than using
81 * malloc directly (although they use a bit more memory)
82 *
83 * The general strategy is loosely related to the buddy system,
84 * except very simple, well-suited to our continuous running
85 * nature, and the constancy of the requests and messages.
86 *
87 * We keep an array of linked lists, each for a different size
88 * buffer. If we need a buffer larger than the largest one provided
89 * by the list, we go directly to malloc.
90 *
91 * Otherwise, we keep return them to the appropriate linked list
92 * when we're done with them, and reuse them from the list.
93 *
94 * We never coalesce buffers on our lists, and we never free them.
95 *
96 * It's very simple. We get usage stats. It makes me happy.
97 */
98
99 #define HA_MALLOC_MAGIC 0xFEEDBEEFUL
100 #define HA_FREE_MAGIC 0xDEADBEEFUL
101
102
103 /*
104 * We put a struct cl_mhdr in front of every malloc item.
105 * This means each malloc item is at least 12 bytes bigger than it theoretically
106 * needs to be. But, it allows this code to be fast and recognize
107 * multiple free attempts, and memory corruption *before* the object
108 *
109 * It's probably possible to combine these fields a bit,
110 * since bucket and reqsize are only needed for allocated items,
111 * both are bounded in value, and fairly strong integrity checks apply
112 * to them. But then we wouldn't be able to tell *quite* as reliably
113 * if someone gave us an item to free that we didn't allocate...
114 *
115 * Could even make the bucket and reqsize objects into 16-bit ints...
116 *
117 * The idea of getting it all down into 32-bits of overhead is
118 * an interesting thought...
119 *
120 * But some architectures have alignment constraints. For instance, sparc
121 * requires that double-word accesses be aligned on double-word boundaries.
122 * Thus if the requested space is bigger than a double-word, then cl_mhdr
123 * should, for safety, be a double-word multiple (minimum 8bytes, 64bits).
124
125 */
126
127 #ifdef HA_MALLOC_TRACK
128 # define HA_MALLOC_OWNER 64
129 struct cl_bucket;
130 #endif
131
132 struct cl_mhdr {
133 # ifdef HA_MALLOC_MAGIC
134 unsigned long magic; /* Must match HA_*_MAGIC */
135 #endif
136 # ifdef HA_MALLOC_TRACK
137 char owner[HA_MALLOC_OWNER];
138 struct cl_bucket * left;
139 struct cl_bucket * right;
140 int dumped;
141 longclock_t mtime;
142 #endif
143 size_t reqsize;
144 int bucket;
145 };
146
147 struct cl_bucket {
148 struct cl_mhdr hdr;
149 struct cl_bucket * next;
150 };
151
152 #define NUMBUCKS 12
153 #define NOBUCKET (NUMBUCKS)
154
155 static struct cl_bucket* cl_malloc_buckets[NUMBUCKS];
156 static size_t cl_bucket_sizes[NUMBUCKS];
157 static size_t buckminpow2 = 0L;
158
159 static int cl_malloc_inityet = 0;
160 static size_t cl_malloc_hdr_offset = sizeof(struct cl_mhdr);
161
162 static void* cl_new_mem(size_t size, int numbuck);
163 static void cl_malloc_init(void);
164 static void cl_dump_item(const struct cl_bucket*b);
165
166 #ifdef MARK_PRISTINE
167 # define PRISTVALUE 0xff
168 static int cl_check_is_pristine(const void* v, unsigned size);
169 static void cl_mark_pristine(void* v, unsigned size);
170 static int pristoff;
171 #endif
172
173 #define BHDR(p) ((struct cl_bucket*)(void*)(((char*)p)-cl_malloc_hdr_offset))
174 #define CBHDR(p) ((const struct cl_bucket*)(const void*)(((const char*)p)-cl_malloc_hdr_offset))
175 #define MEMORYSIZE(p)(CBHDR(p)->hdr.reqsize)
176
177 #define MALLOCSIZE(allocsize) ((allocsize) + cl_malloc_hdr_offset + GUARDSIZE)
178 #define MAXMALLOC (SIZE_MAX-(MALLOCSIZE(0)+1))
179
180 #ifdef MAKE_GUARD
181 # define GUARDLEN 4
182 static const unsigned char cl_malloc_guard[] =
183 #if GUARDLEN == 1
184 {0xA5};
185 #endif
186 #if GUARDLEN == 2
187 {0x5A, 0xA5};
188 #endif
189 #if GUARDLEN == 4
190 {0x5A, 0xA5, 0x5A, 0xA5};
191 #endif
192 # define GUARDSIZE sizeof(cl_malloc_guard)
193 # define ADD_GUARD(cp) (memcpy((((char*)cp)+MEMORYSIZE(cp)), cl_malloc_guard, sizeof(cl_malloc_guard)))
194 # define GUARD_IS_OK(cp) (memcmp((((const char*)cp)+MEMORYSIZE(cp)), \
195 cl_malloc_guard, sizeof(cl_malloc_guard)) == 0)
196 # define CHECK_GUARD_BYTES(cp, msg) { \
197 if (!GUARD_IS_OK(cp)) { \
198 cl_log(LOG_ERR, "%s: guard corrupted at 0x%lx", msg \
199 , (unsigned long)cp); \
200 cl_dump_item(CBHDR(cp)); \
201 DUMPIFASKED(); \
202 } \
203 }
204 #else
205 # define GUARDSIZE 0
206 # define ADD_GUARD(cp) /* */
207 # define GUARD_IS_OK(cp) (1)
208 # define CHECK_GUARD_BYTES(cp, msg) /* */
209 #endif
210
211 #define MALLOCROUND 4096 /* Round big mallocs up to a multiple of this size */
212
213
214 #ifdef HA_MALLOC_TRACK
215
216 static struct cl_bucket * cl_malloc_track_root = NULL;
217
218 static void
cl_ptr_tag(void * ptr,const char * file,const char * function,const int line)219 cl_ptr_tag(void *ptr, const char *file, const char *function, const int line)
220 {
221 struct cl_bucket* bhdr = BHDR(ptr);
222 snprintf(bhdr->hdr.owner, HA_MALLOC_OWNER, "%s:%s:%d",
223 file, function, line);
224 }
225
226 static void
cl_ptr_track(void * ptr)227 cl_ptr_track(void *ptr)
228 {
229 struct cl_bucket* bhdr = BHDR(ptr);
230
231 #if defined(USE_ASSERTS)
232 g_assert(bhdr->hdr.left == NULL);
233 g_assert(bhdr->hdr.right == NULL);
234 g_assert((cl_malloc_track_root == NULL) || (cl_malloc_track_root->hdr.left == NULL));
235 #endif
236
237 bhdr->hdr.dumped = 0;
238 bhdr->hdr.mtime = time_longclock();
239
240 if (cl_malloc_track_root == NULL) {
241 cl_malloc_track_root = bhdr;
242 } else {
243 bhdr->hdr.right = cl_malloc_track_root;
244 cl_malloc_track_root->hdr.left = bhdr;
245 cl_malloc_track_root = bhdr;
246 }
247 }
248
249 static void
cl_ptr_release(void * ptr)250 cl_ptr_release(void *ptr)
251 {
252 struct cl_bucket* bhdr = BHDR(ptr);
253
254 /* cl_log(LOG_DEBUG, "cl_free: Freeing memory belonging to %s"
255 , bhdr->hdr.owner); */
256
257 #if defined(USE_ASSERTS)
258 g_assert(cl_malloc_track_root != NULL);
259 g_assert(cl_malloc_track_root->hdr.left == NULL);
260 #endif
261
262 if (bhdr->hdr.left != NULL) {
263 bhdr->hdr.left->hdr.right=bhdr->hdr.right;
264 }
265
266 if (bhdr->hdr.right != NULL) {
267 bhdr->hdr.right->hdr.left=bhdr->hdr.left;
268 }
269
270 if (cl_malloc_track_root == bhdr) {
271 cl_malloc_track_root=bhdr->hdr.right;
272 }
273
274 bhdr->hdr.left = NULL;
275 bhdr->hdr.right = NULL;
276 }
277
278 static void
cl_ptr_init(void)279 cl_ptr_init(void)
280 {
281 cl_malloc_track_root = NULL;
282 }
283
284 int
cl_malloc_dump_allocated(int log_level,gboolean filter)285 cl_malloc_dump_allocated(int log_level, gboolean filter)
286 {
287 int lpc = 0;
288 struct cl_bucket* cursor = cl_malloc_track_root;
289 longclock_t time_diff;
290
291 cl_log(LOG_INFO, "Dumping allocated memory buffers:");
292
293 while (cursor != NULL) {
294 if(filter && cursor->hdr.dumped) {
295
296 } else if(log_level > LOG_DEBUG) {
297 } else if(filter) {
298 lpc++;
299 cl_log(log_level, "cl_malloc_dump: %p owner %s, size %d"
300 , cursor+cl_malloc_hdr_offset
301 , cursor->hdr.owner
302 , (int)cursor->hdr.reqsize);
303 } else {
304 lpc++;
305 time_diff = sub_longclock(time_longclock(), cursor->hdr.mtime);
306 cl_log(log_level, "cl_malloc_dump: %p owner %s, size %d, dumped %d, age %lu ms"
307 , cursor+cl_malloc_hdr_offset
308 , cursor->hdr.owner
309 , (int)cursor->hdr.reqsize
310 , cursor->hdr.dumped
311 , longclockto_long(time_diff));
312 }
313 cursor->hdr.dumped = 1;
314 cursor = cursor->hdr.right;
315 }
316
317 cl_log(LOG_INFO, "End dump.");
318 return lpc;
319 }
320 #endif
321 static const int LogTable256[] =
322 {
323 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
324 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
325 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
326 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
327 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
328 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
329 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
330 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
331 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
332 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
333 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
334 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
335 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
336 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
337 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
338 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
339 };
340 #define POW2BYTE(b) (LogTable256[b])
341 #define BYTE3(i) (((i)&0xFF000000)>>24)
342 #define BYTE2(i) (((i)&0x00FF0000)>>16)
343 #define BYTE1(i) (((i)&0x0000FF00)>>8)
344 #define BYTE0(i) ((i)&0x000000FF)
345
346 /* Works for malloc bucket sizes up to 2^8 */
347 #define POW21BYTE(i) (POW2BYTE(BYTE0(i)))
348
349 /* Works for malloc bucket sizes up to 2^16 */
350 #define POW22BYTE(i) ((BYTE1(i) != 0x00)? (POW2BYTE(BYTE1(i))+8) \
351 : (POW21BYTE(i)))
352
353 /* Works for malloc bucket sizes up to 2^24 */
354 #define POW23BYTE(i) ((BYTE2(i) != 0x00)? (POW2BYTE(BYTE2(i))+16) \
355 : POW22BYTE(i))
356
357 /* Works for malloc bucket sizes up to 2^32 */
358 #define POW24BYTE(i) ((BYTE3(i) != 0x00)? (POW2BYTE(BYTE3(i))+24) \
359 : POW23BYTE(i))
360
361 /* #define INT2POW2(i) POW24BYTE(i) / * This would allow 2G in our largest malloc chain */
362 /* which I don't think we need */
363 #define INT2POW2(i) POW23BYTE(i) /* This allows up to about 16 Mbytes in our largest malloc chain */
364 /* and it's a little faster than the one above */
365
366
367 /*
368 * cl_malloc: malloc clone
369 */
370
371 void *
cl_malloc(size_t size)372 cl_malloc(size_t size)
373 {
374 #if 0
375 int j;
376 #endif
377 int numbuck = NOBUCKET;
378 struct cl_bucket* buckptr = NULL;
379 void* ret;
380
381 if(!size) {
382 cl_log(LOG_ERR
383 , "%s: refusing to allocate zero sized block"
384 , __FUNCTION__
385 );
386 return NULL;
387 }
388 if (size > MAXMALLOC) {
389 return NULL;
390 }
391 if (!cl_malloc_inityet) {
392 cl_malloc_init();
393 }
394
395 #if 1
396 /*
397 * NOTE: This restricts bucket sizes to be powers of two
398 * - which is OK with me - and how the code has always worked :-D
399 */
400 numbuck = INT2POW2(size-1)-buckminpow2;
401 numbuck = MAX(0, numbuck);
402 if (numbuck < NUMBUCKS) {
403 if (size <= cl_bucket_sizes[numbuck]
404 || (numbuck > 0 && size <= (cl_bucket_sizes[numbuck]/2))) {
405 buckptr = cl_malloc_buckets[numbuck];
406 }else{
407 cl_log(LOG_ERR
408 , "%s: bucket size bug: %lu bytes in %lu byte bucket #%d"
409 , __FUNCTION__
410 , (unsigned long)size
411 , (unsigned long)cl_bucket_sizes[numbuck]
412 , numbuck);
413
414 }
415 }
416 #else
417 /*
418 * Find which bucket would have buffers of the requested size
419 */
420 for (j=0; j < NUMBUCKS; ++j) {
421 if (size <= cl_bucket_sizes[j]) {
422 numbuck = j;
423 buckptr = cl_malloc_buckets[numbuck];
424 break;
425 }
426 }
427 #endif
428
429 /*
430 * Pull it out of the linked list of free buffers if we can...
431 */
432
433 if (buckptr == NULL) {
434 ret = cl_new_mem(size, numbuck);
435 }else{
436 cl_malloc_buckets[numbuck] = buckptr->next;
437 buckptr->hdr.reqsize = size;
438 ret = (((char*)buckptr)+cl_malloc_hdr_offset);
439
440 #ifdef MARK_PRISTINE
441 {
442 int bucksize = cl_bucket_sizes[numbuck];
443 if (!cl_check_is_pristine(ret, bucksize)) {
444 cl_log(LOG_ERR
445 , "attempt to allocate memory"
446 " which is not pristine.");
447 cl_dump_item(buckptr);
448 DUMPIFASKED();
449 }
450 }
451 #endif
452
453 #ifdef HA_MALLOC_MAGIC
454 switch (buckptr->hdr.magic) {
455
456 case HA_FREE_MAGIC:
457 break;
458
459 case HA_MALLOC_MAGIC:
460 cl_log(LOG_ERR
461 , "attempt to allocate memory"
462 " already allocated at 0x%lx"
463 , (unsigned long)ret);
464 cl_dump_item(buckptr);
465 DUMPIFASKED();
466 ret=NULL;
467 break;
468
469 default:
470 cl_log(LOG_ERR
471 , "corrupt malloc buffer at 0x%lx"
472 , (unsigned long)ret);
473 cl_dump_item(buckptr);
474 DUMPIFASKED();
475 ret=NULL;
476 break;
477 }
478 buckptr->hdr.magic = HA_MALLOC_MAGIC;
479 #endif /* HA_MALLOC_MAGIC */
480 if (memstats) {
481 memstats->nbytes_req += size;
482 memstats->nbytes_alloc
483 += MALLOCSIZE(cl_bucket_sizes[numbuck]);
484 }
485
486 }
487
488 if (ret && memstats) {
489 #if 0 && defined(HAVE_MALLINFO)
490 /* mallinfo is too expensive to use :-( */
491 struct mallinfo i = mallinfo();
492 memstats->arena = i.arena;
493 #endif
494 memstats->numalloc++;
495 }
496 if (ret) {
497 #ifdef HA_MALLOC_TRACK
498 /* If we were _always_ called via the wrapper functions,
499 * this wouldn't be necessary, but we aren't, some use
500 * function pointers directly to cl_malloc() */
501 cl_ptr_track(ret);
502 cl_ptr_tag(ret, "cl_malloc.c", "cl_malloc", 0);
503 #endif
504 ADD_GUARD(ret);
505 }
506 return(ret);
507 }
508
509 int
cl_is_allocated(const void * ptr)510 cl_is_allocated(const void *ptr)
511 {
512 #ifdef HA_MALLOC_MAGIC
513 if (NULL == ptr || CBHDR(ptr)->hdr.magic != HA_MALLOC_MAGIC) {
514 return FALSE;
515 }else if (GUARD_IS_OK(ptr)) {
516 return TRUE;
517 }
518 cl_log(LOG_ERR
519 , "cl_is_allocated: supplied storage is guard-corrupted at 0x%lx"
520 , (unsigned long)ptr);
521 cl_dump_item(CBHDR(ptr));
522 DUMPIFASKED();
523 return FALSE;
524 #else
525 return (ptr != NULL);
526 #endif
527 }
528
529 /*
530 * cl_free: "free" clone
531 */
532
533 void
cl_free(void * ptr)534 cl_free(void *ptr)
535 {
536 int bucket;
537 struct cl_bucket* bhdr;
538
539 if (!cl_malloc_inityet) {
540 cl_malloc_init();
541 }
542
543 if (ptr == NULL) {
544 cl_log(LOG_ERR, "attempt to free NULL pointer in cl_free()");
545 DUMPIFASKED();
546 return;
547 }
548
549 /* Find the beginning of our "hidden" structure */
550
551 bhdr = BHDR(ptr);
552
553 #ifdef HA_MALLOC_MAGIC
554 switch (bhdr->hdr.magic) {
555 case HA_MALLOC_MAGIC:
556 break;
557
558 case HA_FREE_MAGIC:
559 cl_log(LOG_ERR
560 , "cl_free: attempt to free already-freed"
561 " object at 0x%lx"
562 , (unsigned long)ptr);
563 cl_dump_item(bhdr);
564 DUMPIFASKED();
565 return;
566 break;
567 default:
568 cl_log(LOG_ERR, "cl_free: Bad magic number"
569 " in object at 0x%lx"
570 , (unsigned long)ptr);
571 cl_dump_item(bhdr);
572 DUMPIFASKED();
573 return;
574 break;
575 }
576 #endif
577 if (!GUARD_IS_OK(ptr)) {
578 cl_log(LOG_ERR
579 , "cl_free: attempt to free guard-corrupted"
580 " object at 0x%lx", (unsigned long)ptr);
581 cl_dump_item(bhdr);
582 DUMPIFASKED();
583 return;
584 }
585 #ifdef HA_MALLOC_TRACK
586 cl_ptr_release(ptr);
587 #endif
588 bucket = bhdr->hdr.bucket;
589 #ifdef HA_MALLOC_MAGIC
590 bhdr->hdr.magic = HA_FREE_MAGIC;
591 #endif
592
593 /*
594 * Return it to the appropriate bucket (linked list), or just free
595 * it if it didn't come from one of our lists...
596 */
597
598 #ifndef RETURN_TO_MALLOC
599 if (bucket >= NUMBUCKS) {
600 #endif
601 #ifdef MARK_PRISTINE
602 /* Is this size right? */
603 cl_mark_pristine(ptr, bhdr->hdr.reqsize);
604 #endif
605 if (memstats) {
606 memstats->nbytes_req -= bhdr->hdr.reqsize;
607 memstats->nbytes_alloc -= MALLOCSIZE(bhdr->hdr.reqsize);
608 memstats->mallocbytes -= MALLOCSIZE(bhdr->hdr.reqsize);
609 }
610 free(bhdr);
611 #ifndef RETURN_TO_MALLOC
612 }else{
613 int bucksize = cl_bucket_sizes[bucket];
614 #if defined(USE_ASSERTS)
615 g_assert(bhdr->hdr.reqsize <= cl_bucket_sizes[bucket]);
616 # endif
617 if (memstats) {
618 memstats->nbytes_req -= bhdr->hdr.reqsize;
619 memstats->nbytes_alloc -= MALLOCSIZE(bucksize);
620 }
621 bhdr->next = cl_malloc_buckets[bucket];
622 cl_malloc_buckets[bucket] = bhdr;
623 #ifdef MARK_PRISTINE
624 cl_mark_pristine(ptr, bucksize);
625 # endif
626 }
627 #endif /* RETURN_TO_MALLOC */
628 if (memstats) {
629 memstats->numfree++;
630 }
631 }
632
633 void*
cl_realloc(void * ptr,size_t newsize)634 cl_realloc(void *ptr, size_t newsize)
635 {
636 struct cl_bucket* bhdr;
637 int bucket;
638 size_t bucksize;
639
640 if (!cl_malloc_inityet) {
641 cl_malloc_init();
642 }
643
644 if (memstats) {
645 memstats->numrealloc++;
646 }
647 if (ptr == NULL) {
648 /* NULL is a legal 'ptr' value for realloc... */
649 return cl_malloc(newsize);
650 }
651 if (newsize == 0) {
652 /* realloc() is the most redundant interface ever */
653 cl_free(ptr);
654 return NULL;
655 }
656
657 /* Find the beginning of our "hidden" structure */
658
659 bhdr = BHDR(ptr);
660
661 #ifdef HA_MALLOC_MAGIC
662 switch (bhdr->hdr.magic) {
663 case HA_MALLOC_MAGIC:
664 break;
665
666 case HA_FREE_MAGIC:
667 cl_log(LOG_ERR
668 , "cl_realloc: attempt to realloc already-freed"
669 " object at 0x%lx"
670 , (unsigned long)ptr);
671 cl_dump_item(bhdr);
672 DUMPIFASKED();
673 return NULL;
674 break;
675 default:
676 cl_log(LOG_ERR, "cl_realloc: Bad magic number"
677 " in object at 0x%lx"
678 , (unsigned long)ptr);
679 cl_dump_item(bhdr);
680 DUMPIFASKED();
681 return NULL;
682 break;
683 }
684 #endif
685 CHECK_GUARD_BYTES(ptr, "cl_realloc");
686
687 bucket = bhdr->hdr.bucket;
688
689 /*
690 * Figure out which bucket it came from... If any...
691 */
692
693 if (bucket >= NUMBUCKS) {
694 /* Not from our bucket-area... Call realloc... */
695 if (memstats) {
696 memstats->nbytes_req -= bhdr->hdr.reqsize;
697 memstats->nbytes_alloc -= MALLOCSIZE(bhdr->hdr.reqsize);
698 memstats->mallocbytes -= MALLOCSIZE(bhdr->hdr.reqsize);
699 memstats->nbytes_req += newsize;
700 memstats->nbytes_alloc += MALLOCSIZE(newsize);
701 memstats->mallocbytes += MALLOCSIZE(newsize);
702 }
703 #ifdef HA_MALLOC_TRACK
704 cl_ptr_release(ptr);
705 #endif
706 bhdr = realloc(bhdr, newsize + cl_malloc_hdr_offset + GUARDSIZE);
707 if (!bhdr) {
708 return NULL;
709 }
710 #ifdef HA_MALLOC_TRACK
711 cl_ptr_track(ptr);
712 cl_ptr_tag(ptr, "cl_malloc.c", "realloc", 0);
713 #endif
714 bhdr->hdr.reqsize = newsize;
715 ptr = (((char*)bhdr)+cl_malloc_hdr_offset);
716 ADD_GUARD(ptr);
717 CHECK_GUARD_BYTES(ptr, "cl_realloc - real realloc return value");
718 /* Not really a memory leak... BEAM thinks so though... */
719 return ptr; /*memory leak*/
720 }
721 bucksize = cl_bucket_sizes[bucket];
722 #if defined(USE_ASSERTS)
723 g_assert(bhdr->hdr.reqsize <= bucksize);
724 #endif
725 if (newsize > bucksize) {
726 /* Need to allocate new space for it */
727 void* newret = cl_malloc(newsize);
728 if (newret != NULL) {
729 memcpy(newret, ptr, bhdr->hdr.reqsize);
730 CHECK_GUARD_BYTES(newret, "cl_realloc - cl_malloc case");
731 }
732 cl_free(ptr);
733 return newret;
734 }
735
736 /* Amazing! It fits into the space previously allocated for it! */
737 bhdr->hdr.reqsize = newsize;
738 if (memstats) {
739 memstats->nbytes_req -= bhdr->hdr.reqsize;
740 memstats->nbytes_req += newsize;
741 }
742 ADD_GUARD(ptr);
743 CHECK_GUARD_BYTES(ptr, "cl_realloc - fits in existing space");
744 return ptr;
745 }
746
747 /*
748 * cl_new_mem: use the real malloc to allocate some new memory
749 */
750
751 static void*
cl_new_mem(size_t size,int numbuck)752 cl_new_mem(size_t size, int numbuck)
753 {
754 struct cl_bucket* hdrret;
755 size_t allocsize;
756 size_t mallocsize;
757
758 if (numbuck < NUMBUCKS) {
759 allocsize = cl_bucket_sizes[numbuck];
760 }else{
761 allocsize = size;
762 }
763
764 mallocsize = MALLOCSIZE(allocsize);
765 if (numbuck == NOBUCKET) {
766 mallocsize = (((mallocsize + (MALLOCROUND-1))/MALLOCROUND)*MALLOCROUND);
767 }
768
769 if ((hdrret = malloc(mallocsize)) == NULL) {
770 return NULL;
771 }
772
773 hdrret->hdr.reqsize = size;
774 hdrret->hdr.bucket = numbuck;
775 #ifdef HA_MALLOC_MAGIC
776 hdrret->hdr.magic = HA_MALLOC_MAGIC;
777 #endif
778 #ifdef HA_MALLOC_TRACK
779 hdrret->hdr.left = NULL;
780 hdrret->hdr.right = NULL;
781 hdrret->hdr.owner[0] = '\0';
782 hdrret->hdr.dumped = 0;
783 #endif
784
785 if (memstats) {
786 memstats->nbytes_alloc += mallocsize;
787 memstats->nbytes_req += size;
788 memstats->mallocbytes += mallocsize;
789 }
790 /* BEAM BUG -- this is NOT a leak */
791 return(((char*)hdrret)+cl_malloc_hdr_offset); /*memory leak*/
792 }
793
794
795 /*
796 * cl_calloc: calloc clone
797 */
798
799 void *
cl_calloc(size_t nmemb,size_t size)800 cl_calloc(size_t nmemb, size_t size)
801 {
802 void * ret = cl_malloc(nmemb*size);
803
804 if (ret != NULL) {
805 memset(ret, 0, nmemb*size);
806 #ifdef HA_MALLOC_TRACK
807 cl_ptr_tag(ret, "cl_malloc.c", "cl_calloc", 0);
808 #endif
809 }
810
811 return(ret);
812 }
813
814 #ifdef HA_MALLOC_TRACK
815 void *
cl_calloc_track(size_t nmemb,size_t size,const char * file,const char * function,const int line)816 cl_calloc_track(size_t nmemb, size_t size,
817 const char *file, const char *function, const int line)
818 {
819 void* ret;
820
821 ret = cl_calloc(nmemb, size);
822
823 if (ret) {
824 cl_ptr_tag(ret, file, function, line);
825 }
826
827 return ret;
828 }
829
830 void*
cl_realloc_track(void * ptr,size_t newsize,const char * file,const char * function,const int line)831 cl_realloc_track(void *ptr, size_t newsize,
832 const char *file, const char *function, const int line)
833 {
834 void* ret;
835
836 ret = cl_realloc(ptr, newsize);
837
838 if (ret) {
839 cl_ptr_tag(ret, file, function, line);
840 }
841
842 return ret;
843 }
844
845 void *
cl_malloc_track(size_t size,const char * file,const char * function,const int line)846 cl_malloc_track(size_t size,
847 const char *file, const char *function, const int line)
848 {
849 void* ret;
850
851 ret = cl_malloc(size);
852 if (ret) {
853 /* Retag with the proper owner. */
854 cl_ptr_tag(ret, file, function, line);
855 }
856
857 return ret;
858 }
859
860 #endif
861
862 /*
863 * cl_strdup: strdup clone
864 */
865
866 char *
cl_strdup(const char * s)867 cl_strdup(const char *s)
868 {
869 void * ret;
870
871 if (!s) {
872 cl_log(LOG_ERR, "cl_strdup(NULL)");
873 return(NULL);
874 }
875 ret = cl_malloc((strlen(s) + 1) * sizeof(char));
876
877 if (ret) {
878 strcpy(ret, s);
879 }
880
881 return(ret);
882 }
883
884
885 /*
886 * cl_malloc_init(): initialize our malloc wrapper things
887 */
888
889 static void
cl_malloc_init()890 cl_malloc_init()
891 {
892 int j;
893 size_t cursize = 32;
894 int llcount = 1;
895
896 cl_malloc_inityet = 1;
897
898 /* cl_malloc_hdr_offset should be a double-word multiple */
899 while (cl_malloc_hdr_offset > (llcount * sizeof(long long))) {
900 llcount++;
901 }
902 cl_malloc_hdr_offset = llcount * sizeof(long long);
903
904
905 for (j=0; j < NUMBUCKS; ++j) {
906 cl_malloc_buckets[j] = NULL;
907
908 cl_bucket_sizes[j] = cursize;
909 cursize <<= 1;
910 }
911 buckminpow2 = INT2POW2(cl_bucket_sizes[0]-1);
912 #ifdef MARK_PRISTINE
913 {
914 struct cl_bucket b;
915 pristoff = (unsigned char*)&(b.next)-(unsigned char*)&b;
916 pristoff += sizeof(b.next);
917 }
918 #endif
919 #ifdef HA_MALLOC_TRACK
920 cl_ptr_init();
921 #endif
922 }
923
924 void
cl_malloc_setstats(volatile cl_mem_stats_t * stats)925 cl_malloc_setstats(volatile cl_mem_stats_t *stats)
926 {
927 if (memstats && stats) {
928 *stats = *memstats;
929 }
930 memstats = stats;
931 }
932
933 volatile cl_mem_stats_t *
cl_malloc_getstats(void)934 cl_malloc_getstats(void)
935 {
936 return memstats;
937 }
938
939 static void
cl_dump_item(const struct cl_bucket * b)940 cl_dump_item(const struct cl_bucket*b)
941 {
942 const unsigned char * cbeg;
943 const unsigned char * cend;
944 const unsigned char * cp;
945 cl_log(LOG_INFO, "Dumping cl_malloc item @ 0x%lx, bucket address: 0x%lx"
946 , ((unsigned long)b)+cl_malloc_hdr_offset, (unsigned long)b);
947 #ifdef HA_MALLOC_TRACK
948 cl_log(LOG_INFO, "Owner: %s"
949 , b->hdr.owner);
950 #endif
951 #ifdef HA_MALLOC_MAGIC
952 cl_log(LOG_INFO, "Magic number: 0x%lx reqsize=%ld"
953 ", bucket=%d, bucksize=%ld"
954 , b->hdr.magic
955 , (long)b->hdr.reqsize, b->hdr.bucket
956 , (long)(b->hdr.bucket >= NUMBUCKS ? 0
957 : cl_bucket_sizes[b->hdr.bucket]));
958 #else
959 cl_log(LOG_INFO, "reqsize=%ld"
960 ", bucket=%d, bucksize=%ld"
961 , (long)b->hdr.reqsize, b->hdr.bucket
962 , (long)(b->hdr.bucket >= NUMBUCKS ? 0
963 : cl_bucket_sizes[b->hdr.bucket]));
964 #endif
965 cbeg = ((const unsigned char *)b)+cl_malloc_hdr_offset;
966 cend = cbeg+b->hdr.reqsize+GUARDSIZE;
967
968 for (cp=cbeg; cp < cend; cp+= sizeof(unsigned)) {
969 cl_log(LOG_INFO, "%02x %02x %02x %02x \"%c%c%c%c\""
970 , (unsigned)cp[0], (unsigned)cp[1]
971 , (unsigned)cp[2], (unsigned)cp[3]
972 , cp[0], cp[1], cp[2], cp[3]);
973 }
974 }
975
976 /* The only reason these functions exist is because glib uses non-standard
977 * types (gsize)in place of size_t. Since size_t is 64-bits on some
978 * machines where gsize (unsigned int) is 32-bits, this is annoying.
979 */
980
981 static gpointer
cl_malloc_glib(gsize n_bytes)982 cl_malloc_glib(gsize n_bytes)
983 {
984 return (gpointer)cl_malloc((size_t)n_bytes);
985 }
986
987 static void
cl_free_glib(gpointer mem)988 cl_free_glib(gpointer mem)
989 {
990 cl_free((void*)mem);
991 }
992
993 static void *
cl_realloc_glib(gpointer mem,gsize n_bytes)994 cl_realloc_glib(gpointer mem, gsize n_bytes)
995 {
996 return cl_realloc((void*)mem, (size_t)n_bytes);
997 }
998
999
1000 /* Call before using any glib functions(!) */
1001 /* See also: g_mem_set_vtable() */
1002 void
cl_malloc_forced_for_glib(void)1003 cl_malloc_forced_for_glib(void)
1004 {
1005 static GMemVTable vt = {
1006 cl_malloc_glib,
1007 cl_realloc_glib,
1008 cl_free_glib,
1009 NULL,
1010 NULL,
1011 NULL,
1012 };
1013 if (!cl_malloc_inityet) {
1014 cl_malloc_init();
1015 }
1016 g_mem_set_vtable(&vt);
1017 }
1018
1019 #ifdef MARK_PRISTINE
1020 static int
cl_check_is_pristine(const void * v,unsigned size)1021 cl_check_is_pristine(const void* v, unsigned size)
1022 {
1023 const unsigned char * cp;
1024 const unsigned char * last;
1025 cp = v;
1026 last = cp + size;
1027 cp += pristoff;
1028
1029 for (;cp < last; ++cp) {
1030 if (*cp != PRISTVALUE) {
1031 return FALSE;
1032 }
1033 }
1034 return TRUE;
1035 }
1036 static void
cl_mark_pristine(void * v,unsigned size)1037 cl_mark_pristine(void* v, unsigned size)
1038 {
1039 unsigned char * cp = v;
1040 memset(cp+pristoff, PRISTVALUE, size-pristoff);
1041 }
1042 #endif
1043
1044 #endif /* _CLPLUMBING_CLMALLOC_NATIVE_H */
1045