1 /*
2 * Electric Fence - Red-Zone memory allocator.
3 * Bruce Perens, 1988, 1993
4 *
5 * This is a special version of malloc() and company for debugging software
6 * that is suspected of overrunning or underrunning the boundaries of a
7 * malloc buffer, or touching free memory.
8 *
9 * It arranges for each malloc buffer to be followed (or preceded)
10 * in the address space by an inaccessable virtual memory page,
11 * and for free memory to be inaccessable. If software touches the
12 * inaccessable page, it will get an immediate segmentation
13 * fault. It is then trivial to uncover the offending code using a debugger.
14 *
15 * An advantage of this product over most malloc debuggers is that this one
16 * detects reading out of bounds as well as writing, and this one stops on
17 * the exact instruction that causes the error, rather than waiting until the
18 * next boundary check.
19 *
20 * There is one product that debugs malloc buffer overruns
21 * better than Electric Fence: "Purify" from Purify Systems, and that's only
22 * a small part of what Purify does. I'm not affiliated with Purify, I just
23 * respect a job well done.
24 *
25 * This version of malloc() should not be linked into production software,
26 * since it tremendously increases the time and memory overhead of malloc().
27 * Each malloc buffer will consume a minimum of two virtual memory pages,
28 * this is 16 kilobytes on many systems. On some systems it will be necessary
29 * to increase the amount of swap space in order to debug large programs that
30 * perform lots of allocation, because of the per-buffer overhead.
31 */
32
33 #include "efence.h"
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <memory.h>
37 #include <string.h>
38 #include <fcntl.h>
39 #include <sys/mman.h>
40 #include <stdio.h>
41 #include <errno.h>
42 #include <stdarg.h>
43 #include <signal.h>
44
45 static const char version[] = "\n Electric Fence 2.0.1"
46 " Copyright (C) 1987-1993 Bruce Perens.\n";
47
48 /*
49 * MEMORY_CREATION_SIZE is the amount of memory to get from the operating
50 * system at one time. We'll break that memory down into smaller pieces for
51 * malloc buffers. One megabyte is probably a good value.
52 */
53 #define MEMORY_CREATION_SIZE 1024 * 1024
54
55 /*
56 * Enum Mode indicates the status of a malloc buffer.
57 */
58 enum _Mode {
59 NOT_IN_USE = 0, /* Available to represent a malloc buffer. */
60 FREE, /* A free buffer. */
61 ALLOCATED, /* A buffer that is in use. */
62 PROTECTED, /* A freed buffer that can not be allocated again. */
63 INTERNAL_USE /* A buffer used internally by malloc(). */
64 };
65 typedef enum _Mode Mode;
66
67 /*
68 * Struct Slot contains all of the information about a malloc buffer except
69 * for the contents of its memory.
70 */
71 struct _Slot {
72 void * userAddress;
73 void * internalAddress;
74 size_t userSize;
75 size_t internalSize;
76 Mode mode;
77 };
78 typedef struct _Slot Slot;
79
80 /*
81 * EF_ALIGNMENT is a global variable used to control the default alignment
82 * of buffers returned by malloc(), calloc(), and realloc(). It is all-caps
83 * so that its name matches the name of the environment variable that is used
84 * to set it. This gives the programmer one less name to remember.
85 * If the value is -1, it will be set from the environment or sizeof(int)
86 * at run time.
87 */
88 int EF_ALIGNMENT = -1;
89
90 /*
91 * EF_PROTECT_FREE is a global variable used to control the disposition of
92 * memory that is released using free(). It is all-caps so that its name
93 * matches the name of the environment variable that is used to set it.
94 * If its value is greater non-zero, memory released by free is made
95 * inaccessable and never allocated again. Any software that touches free
96 * memory will then get a segmentation fault. If its value is zero, freed
97 * memory will be available for reallocation, but will still be inaccessable
98 * until it is reallocated.
99 * If the value is -1, it will be set from the environment or to 0 at run-time.
100 */
101 int EF_PROTECT_FREE = -1;
102
103 /*
104 * EF_PROTECT_BELOW is used to modify the behavior of the allocator. When
105 * its value is non-zero, the allocator will place an inaccessable page
106 * immediately _before_ the malloc buffer in the address space, instead
107 * of _after_ it. Use this to detect malloc buffer under-runs, rather than
108 * over-runs. It won't detect both at the same time, so you should test your
109 * software twice, once with this value clear, and once with it set.
110 * If the value is -1, it will be set from the environment or to zero at
111 * run-time
112 */
113 int EF_PROTECT_BELOW = -1;
114
115 /*
116 * EF_ALLOW_MALLOC_0 is set if Electric Fence is to allow malloc(0). I
117 * trap malloc(0) by default because it is a common source of bugs.
118 */
119 int EF_ALLOW_MALLOC_0 = -1;
120
121 /*
122 * allocationList points to the array of slot structures used to manage the
123 * malloc arena.
124 */
125 static Slot * allocationList = 0;
126
127 /*
128 * allocationListSize is the size of the allocation list. This will always
129 * be a multiple of the page size.
130 */
131 static size_t allocationListSize = 0;
132
133 /*
134 * slotCount is the number of Slot structures in allocationList.
135 */
136 static size_t slotCount = 0;
137
138 /*
139 * unUsedSlots is the number of Slot structures that are currently available
140 * to represent new malloc buffers. When this number gets too low, we will
141 * create new slots.
142 */
143 static size_t unUsedSlots = 0;
144
145 /*
146 * slotsPerPage is the number of slot structures that fit in a virtual
147 * memory page.
148 */
149 static size_t slotsPerPage = 0;
150
151 /*
152 * internalUse is set when allocating and freeing the allocatior-internal
153 * data structures.
154 */
155 static int internalUse = 0;
156
157 /*
158 * noAllocationListProtection is set to tell malloc() and free() not to
159 * manipulate the protection of the allocation list. This is only set in
160 * realloc(), which does it to save on slow system calls, and in
161 * allocateMoreSlots(), which does it because it changes the allocation list.
162 */
163 static int noAllocationListProtection = 0;
164
165 /*
166 * bytesPerPage is set at run-time to the number of bytes per virtual-memory
167 * page, as returned by Page_Size().
168 */
169 static size_t bytesPerPage = 0;
170
171 /*
172 * internalError is called for those "shouldn't happen" errors in the
173 * allocator.
174 */
175 static void
internalError(void)176 internalError(void)
177 {
178 EF_Abort("Internal error in allocator.");
179 }
180
181 /*
182 * initialize sets up the memory allocation arena and the run-time
183 * configuration information.
184 */
185 static void
initialize(void)186 initialize(void)
187 {
188 size_t size = MEMORY_CREATION_SIZE;
189 size_t slack;
190 char * string;
191 Slot * slot;
192
193 EF_Print(version);
194
195 /*
196 * Import the user's environment specification of the default
197 * alignment for malloc(). We want that alignment to be under
198 * user control, since smaller alignment lets us catch more bugs,
199 * however some software will break if malloc() returns a buffer
200 * that is not word-aligned.
201 *
202 * I would like
203 * alignment to be zero so that we could catch all one-byte
204 * overruns, however if malloc() is asked to allocate an odd-size
205 * buffer and returns an address that is not word-aligned, or whose
206 * size is not a multiple of the word size, software breaks.
207 * This was the case with the Sun string-handling routines,
208 * which can do word fetches up to three bytes beyond the end of a
209 * string. I handle this problem in part by providing
210 * byte-reference-only versions of the string library functions, but
211 * there are other functions that break, too. Some in X Windows, one
212 * in Sam Leffler's TIFF library, and doubtless many others.
213 */
214 if ( EF_ALIGNMENT == -1 ) {
215 if ( (string = getenv("EF_ALIGNMENT")) != 0 )
216 EF_ALIGNMENT = (size_t)atoi(string);
217 else
218 EF_ALIGNMENT = sizeof(int);
219 }
220
221 /*
222 * See if the user wants to protect the address space below a buffer,
223 * rather than that above a buffer.
224 */
225 if ( EF_PROTECT_BELOW == -1 ) {
226 if ( (string = getenv("EF_PROTECT_BELOW")) != 0 )
227 EF_PROTECT_BELOW = (atoi(string) != 0);
228 else
229 EF_PROTECT_BELOW = 0;
230 }
231
232 /*
233 * See if the user wants to protect memory that has been freed until
234 * the program exits, rather than until it is re-allocated.
235 */
236 if ( EF_PROTECT_FREE == -1 ) {
237 if ( (string = getenv("EF_PROTECT_FREE")) != 0 )
238 EF_PROTECT_FREE = (atoi(string) != 0);
239 else
240 EF_PROTECT_FREE = 0;
241 }
242
243 /*
244 * See if the user wants to allow malloc(0).
245 */
246 if ( EF_ALLOW_MALLOC_0 == -1 ) {
247 if ( (string = getenv("EF_ALLOW_MALLOC_0")) != 0 )
248 EF_ALLOW_MALLOC_0 = (atoi(string) != 0);
249 else
250 EF_ALLOW_MALLOC_0 = 0;
251 }
252
253 /*
254 * Get the run-time configuration of the virtual memory page size.
255 */
256 bytesPerPage = Page_Size();
257
258 /*
259 * Figure out how many Slot structures to allocate at one time.
260 */
261 slotCount = slotsPerPage = bytesPerPage / sizeof(Slot);
262 allocationListSize = bytesPerPage;
263
264 if ( allocationListSize > size )
265 size = allocationListSize;
266
267 if ( (slack = size % bytesPerPage) != 0 )
268 size += bytesPerPage - slack;
269
270 /*
271 * Allocate memory, and break it up into two malloc buffers. The
272 * first buffer will be used for Slot structures, the second will
273 * be marked free.
274 */
275 slot = allocationList = (Slot *)Page_Create(size);
276 memset((char *)allocationList, 0, allocationListSize);
277
278 slot[0].internalSize = slot[0].userSize = allocationListSize;
279 slot[0].internalAddress = slot[0].userAddress = allocationList;
280 slot[0].mode = INTERNAL_USE;
281 if ( size > allocationListSize ) {
282 slot[1].internalAddress = slot[1].userAddress
283 = ((char *)slot[0].internalAddress) + slot[0].internalSize;
284 slot[1].internalSize
285 = slot[1].userSize = size - slot[0].internalSize;
286 slot[1].mode = FREE;
287 }
288
289 /*
290 * Deny access to the free page, so that we will detect any software
291 * that treads upon free memory.
292 */
293 Page_DenyAccess(slot[1].internalAddress, slot[1].internalSize);
294
295 /*
296 * Account for the two slot structures that we've used.
297 */
298 unUsedSlots = slotCount - 2;
299 }
300
301 /*
302 * allocateMoreSlots is called when there are only enough slot structures
303 * left to support the allocation of a single malloc buffer.
304 */
305 static void
allocateMoreSlots(void)306 allocateMoreSlots(void)
307 {
308 size_t newSize = allocationListSize + bytesPerPage;
309 void * newAllocation;
310 void * oldAllocation = allocationList;
311
312 Page_AllowAccess(allocationList, allocationListSize);
313 noAllocationListProtection = 1;
314 internalUse = 1;
315
316 newAllocation = malloc(newSize);
317 memcpy(newAllocation, allocationList, allocationListSize);
318 memset(&(((char *)newAllocation)[allocationListSize]), 0, bytesPerPage);
319
320 allocationList = (Slot *)newAllocation;
321 allocationListSize = newSize;
322 slotCount += slotsPerPage;
323 unUsedSlots += slotsPerPage;
324
325 free(oldAllocation);
326
327 /*
328 * Keep access to the allocation list open at this point, because
329 * I am returning to memalign(), which needs that access.
330 */
331 noAllocationListProtection = 0;
332 internalUse = 0;
333 }
334
335 /*
336 * This is the memory allocator. When asked to allocate a buffer, allocate
337 * it in such a way that the end of the buffer is followed by an inaccessable
338 * memory page. If software overruns that buffer, it will touch the bad page
339 * and get an immediate segmentation fault. It's then easy to zero in on the
340 * offending code with a debugger.
341 *
342 * There are a few complications. If the user asks for an odd-sized buffer,
343 * we would have to have that buffer start on an odd address if the byte after
344 * the end of the buffer was to be on the inaccessable page. Unfortunately,
345 * there is lots of software that asks for odd-sized buffers and then
346 * requires that the returned address be word-aligned, or the size of the
347 * buffer be a multiple of the word size. An example are the string-processing
348 * functions on Sun systems, which do word references to the string memory
349 * and may refer to memory up to three bytes beyond the end of the string.
350 * For this reason, I take the alignment requests to memalign() and valloc()
351 * seriously, and
352 *
353 * Electric Fence wastes lots of memory. I do a best-fit allocator here
354 * so that it won't waste even more. It's slow, but thrashing because your
355 * working set is too big for a system's RAM is even slower.
356 */
357 extern C_LINKAGE void *
memalign(size_t alignment,size_t userSize)358 memalign(size_t alignment, size_t userSize)
359 {
360 register Slot * slot;
361 register size_t count;
362 Slot * fullSlot = 0;
363 Slot * emptySlots[2];
364 size_t internalSize;
365 size_t slack;
366 char * address;
367
368
369 if ( allocationList == 0 )
370 initialize();
371
372 if ( userSize == 0 && !EF_ALLOW_MALLOC_0 )
373 EF_Abort("Allocating 0 bytes, probably a bug.");
374
375 /*
376 * If EF_PROTECT_BELOW is set, all addresses returned by malloc()
377 * and company will be page-aligned.
378 */
379 if ( !EF_PROTECT_BELOW && alignment > 1 ) {
380 if ( (slack = userSize % alignment) != 0 )
381 userSize += alignment - slack;
382 }
383
384 /*
385 * The internal size of the buffer is rounded up to the next page-size
386 * boudary, and then we add another page's worth of memory for the
387 * dead page.
388 */
389 internalSize = userSize + bytesPerPage;
390 if ( (slack = internalSize % bytesPerPage) != 0 )
391 internalSize += bytesPerPage - slack;
392
393 /*
394 * These will hold the addresses of two empty Slot structures, that
395 * can be used to hold information for any memory I create, and any
396 * memory that I mark free.
397 */
398 emptySlots[0] = 0;
399 emptySlots[1] = 0;
400
401 /*
402 * The internal memory used by the allocator is currently
403 * inaccessable, so that errant programs won't scrawl on the
404 * allocator's arena. I'll un-protect it here so that I can make
405 * a new allocation. I'll re-protect it before I return.
406 */
407 if ( !noAllocationListProtection )
408 Page_AllowAccess(allocationList, allocationListSize);
409
410 /*
411 * If I'm running out of empty slots, create some more before
412 * I don't have enough slots left to make an allocation.
413 */
414 if ( !internalUse && unUsedSlots < 7 ) {
415 allocateMoreSlots();
416 }
417
418 /*
419 * Iterate through all of the slot structures. Attempt to find a slot
420 * containing free memory of the exact right size. Accept a slot with
421 * more memory than we want, if the exact right size is not available.
422 * Find two slot structures that are not in use. We will need one if
423 * we split a buffer into free and allocated parts, and the second if
424 * we have to create new memory and mark it as free.
425 *
426 */
427
428 for ( slot = allocationList, count = slotCount ; count > 0; count-- ) {
429 if ( slot->mode == FREE
430 && slot->internalSize >= internalSize ) {
431 if ( !fullSlot
432 ||slot->internalSize < fullSlot->internalSize){
433 fullSlot = slot;
434 if ( slot->internalSize == internalSize
435 && emptySlots[0] )
436 break; /* All done, */
437 }
438 }
439 else if ( slot->mode == NOT_IN_USE ) {
440 if ( !emptySlots[0] )
441 emptySlots[0] = slot;
442 else if ( !emptySlots[1] )
443 emptySlots[1] = slot;
444 else if ( fullSlot
445 && fullSlot->internalSize == internalSize )
446 break; /* All done. */
447 }
448 slot++;
449 }
450 if ( !emptySlots[0] )
451 internalError();
452
453 if ( !fullSlot ) {
454 /*
455 * I get here if I haven't been able to find a free buffer
456 * with all of the memory I need. I'll have to create more
457 * memory. I'll mark it all as free, and then split it into
458 * free and allocated portions later.
459 */
460 size_t chunkSize = MEMORY_CREATION_SIZE;
461
462 if ( !emptySlots[1] )
463 internalError();
464
465 if ( chunkSize < internalSize )
466 chunkSize = internalSize;
467
468 if ( (slack = chunkSize % bytesPerPage) != 0 )
469 chunkSize += bytesPerPage - slack;
470
471 /* Use up one of the empty slots to make the full slot. */
472 fullSlot = emptySlots[0];
473 emptySlots[0] = emptySlots[1];
474 fullSlot->internalAddress = Page_Create(chunkSize);
475 fullSlot->internalSize = chunkSize;
476 fullSlot->mode = FREE;
477 unUsedSlots--;
478 }
479
480 /*
481 * If I'm allocating memory for the allocator's own data structures,
482 * mark it INTERNAL_USE so that no errant software will be able to
483 * free it.
484 */
485 if ( internalUse )
486 fullSlot->mode = INTERNAL_USE;
487 else
488 fullSlot->mode = ALLOCATED;
489
490 /*
491 * If the buffer I've found is larger than I need, split it into
492 * an allocated buffer with the exact amount of memory I need, and
493 * a free buffer containing the surplus memory.
494 */
495 if ( fullSlot->internalSize > internalSize ) {
496 emptySlots[0]->internalSize
497 = fullSlot->internalSize - internalSize;
498 emptySlots[0]->internalAddress
499 = ((char *)fullSlot->internalAddress) + internalSize;
500 emptySlots[0]->mode = FREE;
501 fullSlot->internalSize = internalSize;
502 unUsedSlots--;
503 }
504
505 if ( !EF_PROTECT_BELOW ) {
506 /*
507 * Arrange the buffer so that it is followed by an inaccessable
508 * memory page. A buffer overrun that touches that page will
509 * cause a segmentation fault.
510 */
511 address = (char *)fullSlot->internalAddress;
512
513 /* Set up the "live" page. */
514 Page_AllowAccess(
515 fullSlot->internalAddress
516 ,internalSize - bytesPerPage);
517
518 address += internalSize - bytesPerPage;
519
520 /* Set up the "dead" page. */
521 if ( EF_PROTECT_FREE )
522 Page_Delete(address, bytesPerPage);
523 else
524 Page_DenyAccess(address, bytesPerPage);
525
526 /* Figure out what address to give the user. */
527 address -= userSize;
528 }
529 else { /* EF_PROTECT_BELOW != 0 */
530 /*
531 * Arrange the buffer so that it is preceded by an inaccessable
532 * memory page. A buffer underrun that touches that page will
533 * cause a segmentation fault.
534 */
535 address = (char *)fullSlot->internalAddress;
536
537 /* Set up the "dead" page. */
538 if ( EF_PROTECT_FREE )
539 Page_Delete(address, bytesPerPage);
540 else
541 Page_DenyAccess(address, bytesPerPage);
542
543 address += bytesPerPage;
544
545 /* Set up the "live" page. */
546 Page_AllowAccess(address, internalSize - bytesPerPage);
547 }
548
549 fullSlot->userAddress = address;
550 fullSlot->userSize = userSize;
551
552 /*
553 * Make the pool's internal memory inaccessable, so that the program
554 * being debugged can't stomp on it.
555 */
556 if ( !internalUse )
557 Page_DenyAccess(allocationList, allocationListSize);
558
559 return address;
560 }
561
562 /*
563 * Find the slot structure for a user address.
564 */
565 static Slot *
slotForUserAddress(void * address)566 slotForUserAddress(void * address)
567 {
568 register Slot * slot = allocationList;
569 register size_t count = slotCount;
570
571 for ( ; count > 0; count-- ) {
572 if ( slot->userAddress == address )
573 return slot;
574 slot++;
575 }
576
577 return 0;
578 }
579
580 /*
581 * Find the slot structure for an internal address.
582 */
583 static Slot *
slotForInternalAddress(void * address)584 slotForInternalAddress(void * address)
585 {
586 register Slot * slot = allocationList;
587 register size_t count = slotCount;
588
589 for ( ; count > 0; count-- ) {
590 if ( slot->internalAddress == address )
591 return slot;
592 slot++;
593 }
594 return 0;
595 }
596
597 /*
598 * Given the internal address of a buffer, find the buffer immediately
599 * before that buffer in the address space. This is used by free() to
600 * coalesce two free buffers into one.
601 */
602 static Slot *
slotForInternalAddressPreviousTo(void * address)603 slotForInternalAddressPreviousTo(void * address)
604 {
605 register Slot * slot = allocationList;
606 register size_t count = slotCount;
607
608 for ( ; count > 0; count-- ) {
609 if ( ((char *)slot->internalAddress)
610 + slot->internalSize == address )
611 return slot;
612 slot++;
613 }
614 return 0;
615 }
616
617 extern C_LINKAGE void
free(void * address)618 free(void * address)
619 {
620 Slot * slot;
621 Slot * previousSlot = 0;
622 Slot * nextSlot = 0;
623
624 if ( address == 0 )
625 EF_Abort("free() called for address zero.");
626
627 if ( allocationList == 0 )
628 EF_Abort("free() called before first malloc().");
629
630 if ( !noAllocationListProtection )
631 Page_AllowAccess(allocationList, allocationListSize);
632
633 slot = slotForUserAddress(address);
634
635 if ( !slot )
636 EF_Abort("free(%x): address not from malloc().", address);
637
638 if ( slot->mode != ALLOCATED ) {
639 if ( internalUse && slot->mode == INTERNAL_USE )
640 /* Do nothing. */;
641 else {
642 EF_Abort(
643 "free(%x): freeing free memory."
644 ,address);
645 }
646 }
647
648 if ( EF_PROTECT_FREE )
649 slot->mode = PROTECTED;
650 else
651 slot->mode = FREE;
652
653 previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress);
654 nextSlot = slotForInternalAddress(
655 ((char *)slot->internalAddress) + slot->internalSize);
656
657 if ( previousSlot
658 && (previousSlot->mode == FREE || previousSlot->mode == PROTECTED) ) {
659 /* Coalesce previous slot with this one. */
660 previousSlot->internalSize += slot->internalSize;
661 if ( EF_PROTECT_FREE )
662 previousSlot->mode = PROTECTED;
663
664 slot->internalAddress = slot->userAddress = 0;
665 slot->internalSize = slot->userSize = 0;
666 slot->mode = NOT_IN_USE;
667 slot = previousSlot;
668 unUsedSlots++;
669 }
670 if ( nextSlot
671 && (nextSlot->mode == FREE || nextSlot->mode == PROTECTED) ) {
672 /* Coalesce next slot with this one. */
673 slot->internalSize += nextSlot->internalSize;
674 nextSlot->internalAddress = nextSlot->userAddress = 0;
675 nextSlot->internalSize = nextSlot->userSize = 0;
676 nextSlot->mode = NOT_IN_USE;
677 unUsedSlots++;
678 }
679
680 slot->userAddress = slot->internalAddress;
681 slot->userSize = slot->internalSize;
682
683 /*
684 * Free memory is _always_ set to deny access. When EF_PROTECT_FREE
685 * is true, free memory is never reallocated, so it remains access
686 * denied for the life of the process. When EF_PROTECT_FREE is false,
687 * the memory may be re-allocated, at which time access to it will be
688 * allowed again.
689 *
690 * Some operating systems allow munmap() with single-page resolution,
691 * and allow you to un-map portions of a region, rather than the
692 * entire region that was mapped with mmap(). On those operating
693 * systems, we can release protected free pages with Page_Delete(),
694 * in the hope that the swap space attached to those pages will be
695 * released as well.
696 */
697 if ( EF_PROTECT_FREE )
698 Page_Delete(slot->internalAddress, slot->internalSize);
699 else
700 Page_DenyAccess(slot->internalAddress, slot->internalSize);
701
702 if ( !noAllocationListProtection )
703 Page_DenyAccess(allocationList, allocationListSize);
704 }
705
706 extern C_LINKAGE void *
realloc(void * oldBuffer,size_t newSize)707 realloc(void * oldBuffer, size_t newSize)
708 {
709 size_t size;
710 Slot * slot;
711 void * newBuffer = malloc(newSize);
712
713 if ( allocationList == 0 )
714 EF_Abort("realloc() called before first malloc().");
715
716 Page_AllowAccess(allocationList, allocationListSize);
717 noAllocationListProtection = 1;
718
719 slot = slotForUserAddress(oldBuffer);
720
721 if ( slot == 0 )
722 EF_Abort("free(%x): not from malloc().", oldBuffer);
723
724 if ( newSize < (size = slot->userSize) )
725 size = newSize;
726
727 if ( size > 0 )
728 memcpy(newBuffer, oldBuffer, size);
729
730 free(oldBuffer);
731 noAllocationListProtection = 0;
732 Page_DenyAccess(allocationList, allocationListSize);
733
734 if ( size < newSize )
735 memset(&(((char *)newBuffer)[size]), 0, newSize - size);
736
737 /* Internal memory was re-protected in free() */
738 return newBuffer;
739 }
740
741 extern C_LINKAGE void *
malloc(size_t size)742 malloc(size_t size)
743 {
744 if ( allocationList == 0 )
745 initialize(); /* This sets EF_ALIGNMENT */
746
747 return memalign(EF_ALIGNMENT, size);
748 }
749
750 extern C_LINKAGE void *
calloc(size_t nelem,size_t elsize)751 calloc(size_t nelem, size_t elsize)
752 {
753 size_t size = nelem * elsize;
754 void * allocation = malloc(size);
755
756 memset(allocation, 0, size);
757 return allocation;
758 }
759
760 /*
761 * This will catch more bugs if you remove the page alignment, but it
762 * will break some software.
763 */
764 extern C_LINKAGE void *
valloc(size_t size)765 valloc (size_t size)
766 {
767 return memalign(bytesPerPage, size);
768 }
769
770 /**********************************************************************
771 Below here is the original page.c
772 ***********************************************************************/
773
774 /*
775 * For some reason, I can't find mprotect() in any of the headers on
776 * IRIX or SunOS 4.1.2
777 */
778
779 /**
780 extern C_LINKAGE int mprotect(caddr_t addr, size_t len, int prot);
781 **/
782
783 static caddr_t startAddr = (caddr_t) 0;
784
785 #if ( !defined(sgi) && !defined(_AIX) )
786 extern int sys_nerr;
787 extern char * sys_errlist[];
788 #endif
789
790 static const char *
stringErrorReport(void)791 stringErrorReport(void)
792 {
793 #if ( defined(sgi) )
794 return strerror(oserror());
795 #elif ( defined(_AIX) )
796 return strerror(errno);
797 #else
798 if ( errno > 0 && errno < sys_nerr )
799 return sys_errlist[errno];
800 else
801 return "Unknown error.\n";
802 #endif
803 }
804
805 /*
806 * Create memory.
807 */
808 #if defined(MAP_ANONYMOUS)
809 void *
Page_Create(size_t size)810 Page_Create(size_t size)
811 {
812 caddr_t allocation;
813
814 /*
815 * In this version, "startAddr" is a _hint_, not a demand.
816 * When the memory I map here is contiguous with other
817 * mappings, the allocator can coalesce the memory from two
818 * or more mappings into one large contiguous chunk, and thus
819 * might be able to find a fit that would not otherwise have
820 * been possible. I could _force_ it to be contiguous by using
821 * the MMAP_FIXED flag, but I don't want to stomp on memory mappings
822 * generated by other software, etc.
823 */
824 allocation = mmap(
825 startAddr
826 ,(int)size
827 ,PROT_READ|PROT_WRITE
828 ,MAP_PRIVATE|MAP_ANONYMOUS
829 ,-1
830 ,0);
831
832 startAddr = allocation + size;
833
834 if ( allocation == (caddr_t)-1 )
835 EF_Exit("mmap() failed: %s", stringErrorReport());
836
837 return (void *)allocation;
838 }
839 #else
840 void *
Page_Create(size_t size)841 Page_Create(size_t size)
842 {
843 static int devZeroFd = -1;
844 caddr_t allocation;
845
846 if ( devZeroFd == -1 ) {
847 devZeroFd = open("/dev/zero", O_RDWR);
848 if ( devZeroFd < 0 )
849 EF_Exit(
850 "open() on /dev/zero failed: %s"
851 ,stringErrorReport());
852 }
853
854 /*
855 * In this version, "startAddr" is a _hint_, not a demand.
856 * When the memory I map here is contiguous with other
857 * mappings, the allocator can coalesce the memory from two
858 * or more mappings into one large contiguous chunk, and thus
859 * might be able to find a fit that would not otherwise have
860 * been possible. I could _force_ it to be contiguous by using
861 * the MMAP_FIXED flag, but I don't want to stomp on memory mappings
862 * generated by other software, etc.
863 */
864 allocation = mmap(
865 startAddr
866 ,(int)size
867 ,PROT_READ|PROT_WRITE
868 ,MAP_PRIVATE
869 ,devZeroFd
870 ,0);
871
872 startAddr = allocation + size;
873
874 if ( allocation == (caddr_t)-1 )
875 EF_Exit("mmap() failed: %s", stringErrorReport());
876
877 return (void *)allocation;
878 }
879 #endif
880
881 static void
mprotectFailed(void)882 mprotectFailed(void)
883 {
884 EF_Exit("mprotect() failed: %s", stringErrorReport());
885 }
886
887 void
Page_AllowAccess(void * address,size_t size)888 Page_AllowAccess(void * address, size_t size)
889 {
890 if ( mprotect((caddr_t)address, size, PROT_READ|PROT_WRITE) < 0 )
891 mprotectFailed();
892 }
893
894 void
Page_DenyAccess(void * address,size_t size)895 Page_DenyAccess(void * address, size_t size)
896 {
897 if ( mprotect((caddr_t)address, size, PROT_NONE) < 0 )
898 mprotectFailed();
899 }
900
901 void
Page_Delete(void * address,size_t size)902 Page_Delete(void * address, size_t size)
903 {
904 /*
905 * My SGI ONYX running IRIX 5.0 crashes reliably when "tstheap 3072" is
906 * run with the munmap call below compiled in. I'd like to hear how well
907 * other operating systems handle it, so that I can enable it on those
908 * systems.
909 */
910 #if ( defined(_AIX) )
911 if ( munmap((caddr_t)address, size) < 0 )
912 EF_Exit("munmap() failed: %s", stringErrorReport());
913 #else
914 Page_DenyAccess(address, size);
915 #endif
916 }
917
918 #if defined(_SC_PAGESIZE)
919 size_t
Page_Size(void)920 Page_Size(void)
921 {
922 return (size_t)sysconf(_SC_PAGESIZE);
923 }
924 #elif defined(_SC_PAGE_SIZE)
925 size_t
Page_Size(void)926 Page_Size(void)
927 {
928 return (size_t)sysconf(_SC_PAGE_SIZE);
929 }
930 #else
931 extern int getpagesize();
932 size_t
Page_Size(void)933 Page_Size(void)
934 {
935 return getpagesize();
936 }
937 #endif
938
939 /***********************************************************************
940 Below here is the original print.c
941 ************************************************************************/
942
943 /*
944 * These routines do their printing without using stdio. Stdio can't
945 * be used because it calls malloc(). Internal routines of a malloc()
946 * debugger should not re-enter malloc(), so stdio is out.
947 */
948
949 /*
950 * NUMBER_BUFFER_SIZE is the longest character string that could be needed
951 * to represent an unsigned integer. Assuming unsigned integers might be as
952 * large as 64 bits, and we might print in base 2, let's set it to 64 bytes.
953 */
954 #define NUMBER_BUFFER_SIZE 64
955
956 static void
printNumber(unsigned int number,unsigned int base)957 printNumber(unsigned int number, unsigned int base)
958 {
959 char buffer[NUMBER_BUFFER_SIZE];
960 char * s = &buffer[NUMBER_BUFFER_SIZE];
961 int size;
962
963 do {
964 unsigned int digit;
965
966 if ( --s == buffer )
967 EF_Abort("Internal error printing number.");
968
969 digit = number % base;
970
971 if ( digit < 10 )
972 *s = '0' + digit;
973 else
974 *s = 'a' + digit - 10;
975
976 } while ( (number /= base) > 0 );
977
978 size = &buffer[NUMBER_BUFFER_SIZE] - s;
979
980 if ( size > 0 )
981 write(2, s, size);
982 }
983
984 static void
vprint(const char * pattern,va_list args)985 vprint(const char * pattern, va_list args)
986 {
987 static const char bad_pattern[] =
988 "\nBad pattern specifier %%%c in EF_Print().\n";
989 const char * s = pattern;
990 char c;
991
992 while ( (c = *s++) != '\0' ) {
993 if ( c == '%' ) {
994 c = *s++;
995 switch ( c ) {
996 case '%':
997 (void) write(2, &c, 1);
998 break;
999 case 's':
1000 {
1001 const char * string;
1002 size_t length;
1003
1004 string = va_arg(args, char *);
1005 length = strlen(string);
1006
1007 (void) write(2, string, length);
1008 }
1009 break;
1010 case 'd':
1011 {
1012 int n = va_arg(args, int);
1013
1014 if ( n < 0 ) {
1015 char c = '-';
1016 write(2, &c, 1);
1017 n = -n;
1018 }
1019 printNumber(n, 10);
1020 }
1021 break;
1022 case 'x':
1023 printNumber(va_arg(args, u_int), 0x10);
1024 break;
1025 case 'c':
1026 {
1027 char c = va_arg(args, char);
1028
1029 (void) write(2, &c, 1);
1030 }
1031 break;
1032 default:
1033 {
1034 EF_Print(bad_pattern, c);
1035 }
1036
1037 }
1038 }
1039 else
1040 (void) write(2, &c, 1);
1041 }
1042 }
1043
1044 void
EF_Abort(const char * pattern,...)1045 EF_Abort(const char * pattern, ...)
1046 {
1047 va_list args;
1048
1049 va_start(args, pattern);
1050
1051 EF_Print("\nElectricFence Aborting: ");
1052 vprint(pattern, args);
1053 EF_Print("\n");
1054
1055 va_end(args);
1056
1057 /*
1058 * I use kill(getpid(), SIGILL) instead of abort() because some
1059 * mis-guided implementations of abort() flush stdio, which can
1060 * cause malloc() or free() to be called.
1061 */
1062 kill(getpid(), SIGILL);
1063 /* Just in case something handles SIGILL and returns, exit here. */
1064 _exit(-1);
1065 }
1066
1067 void
EF_Exit(const char * pattern,...)1068 EF_Exit(const char * pattern, ...)
1069 {
1070 va_list args;
1071
1072 va_start(args, pattern);
1073
1074 EF_Print("\nElectricFence Exiting: ");
1075 vprint(pattern, args);
1076 EF_Print("\n");
1077
1078 va_end(args);
1079
1080 /*
1081 * I use _exit() because the regular exit() flushes stdio,
1082 * which may cause malloc() or free() to be called.
1083 */
1084 _exit(-1);
1085 }
1086
1087 void
EF_Print(const char * pattern,...)1088 EF_Print(const char * pattern, ...)
1089 {
1090 va_list args;
1091
1092 va_start(args, pattern);
1093 vprint(pattern, args);
1094 va_end(args);
1095 }
1096