1 /* $Header: /p/tcsh/cvsroot/tcsh/tc.alloc.c,v 3.53 2015/02/22 16:31:54 christos Exp $ */ 2 /* 3 * tc.alloc.c (Caltech) 2/21/82 4 * Chris Kingsley, kingsley@cit-20. 5 * 6 * This is a very fast storage allocator. It allocates blocks of a small 7 * number of different sizes, and keeps free lists of each size. Blocks that 8 * don't exactly fit are passed up to the next larger size. In this 9 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long. 10 * This is designed for use in a program that uses vast quantities of memory, 11 * but bombs when it runs out. 12 */ 13 /*- 14 * Copyright (c) 1980, 1991 The Regents of the University of California. 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 */ 41 #include "sh.h" 42 #ifdef HAVE_MALLINFO 43 #include <malloc.h> 44 #endif 45 #if defined(HAVE_SBRK) && !defined(__APPLE__) 46 #define USE_SBRK 47 #endif 48 49 RCSID("$tcsh: tc.alloc.c,v 3.53 2015/02/22 16:31:54 christos Exp $") 50 51 #define RCHECK 52 #define DEBUG 53 54 static char *memtop = NULL; /* PWP: top of current memory */ 55 static char *membot = NULL; /* PWP: bottom of allocatable memory */ 56 57 int dont_free = 0; 58 59 #ifdef WINNT_NATIVE 60 # define malloc fmalloc 61 # define free ffree 62 # define calloc fcalloc 63 # define realloc frealloc 64 #endif /* WINNT_NATIVE */ 65 66 #if !defined(DEBUG) || defined(SYSMALLOC) 67 static void 68 out_of_memory (void) 69 { 70 static const char msg[] = "Out of memory\n"; 71 72 TCSH_IGNORE(write(didfds ? 2 : SHDIAG, msg, strlen(msg))); 73 _exit(1); 74 } 75 #endif 76 77 #ifndef SYSMALLOC 78 79 #ifdef SX 80 extern void* sbrk(); 81 #endif 82 /* 83 * Lots of os routines are busted and try to free invalid pointers. 84 * Although our free routine is smart enough and it will pick bad 85 * pointers most of the time, in cases where we know we are going to get 86 * a bad pointer, we'd rather leak. 87 */ 88 89 #ifndef NULL 90 #define NULL 0 91 #endif 92 93 typedef unsigned char U_char; /* we don't really have signed chars */ 94 typedef unsigned int U_int; 95 typedef unsigned short U_short; 96 typedef unsigned long U_long; 97 98 99 /* 100 * The overhead on a block is at least 4 bytes. When free, this space 101 * contains a pointer to the next free block, and the bottom two bits must 102 * be zero. When in use, the first byte is set to MAGIC, and the second 103 * byte is the size index. The remaining bytes are for alignment. 104 * If range checking is enabled and the size of the block fits 105 * in two bytes, then the top two bytes hold the size of the requested block 106 * plus the range checking words, and the header word MINUS ONE. 107 */ 108 109 110 #define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP) 111 112 union overhead { 113 union overhead *ov_next; /* when free */ 114 struct { 115 U_char ovu_magic; /* magic number */ 116 U_char ovu_index; /* bucket # */ 117 #ifdef RCHECK 118 U_short ovu_size; /* actual block size */ 119 U_int ovu_rmagic; /* range magic number */ 120 #endif 121 } ovu; 122 #define ov_magic ovu.ovu_magic 123 #define ov_index ovu.ovu_index 124 #define ov_size ovu.ovu_size 125 #define ov_rmagic ovu.ovu_rmagic 126 }; 127 128 #define MAGIC 0xfd /* magic # on accounting info */ 129 #define RMAGIC 0x55555555 /* magic # on range info */ 130 #ifdef RCHECK 131 #define RSLOP sizeof (U_int) 132 #else 133 #define RSLOP 0 134 #endif 135 136 137 #define ROUNDUP 7 138 139 /* 140 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 141 * smallest allocatable block is 8 bytes. The overhead information 142 * precedes the data area returned to the user. 143 */ 144 #define NBUCKETS ((sizeof(long) << 3) - 3) 145 static union overhead *nextf[NBUCKETS] IZERO_STRUCT; 146 147 /* 148 * nmalloc[i] is the difference between the number of mallocs and frees 149 * for a given block size. 150 */ 151 static U_int nmalloc[NBUCKETS] IZERO_STRUCT; 152 153 #ifndef lint 154 static int findbucket (union overhead *, int); 155 static void morecore (int); 156 #endif 157 158 159 #ifdef DEBUG 160 # define CHECK(a, str, p) \ 161 if (a) { \ 162 xprintf(str, p); \ 163 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \ 164 abort(); \ 165 } 166 #else 167 # define CHECK(a, str, p) \ 168 if (a) { \ 169 xprintf(str, p); \ 170 xprintf(" (memtop = %p membot = %p)\n", memtop, membot); \ 171 return; \ 172 } 173 #endif 174 175 memalign_t 176 malloc(size_t nbytes) 177 { 178 #ifndef lint 179 union overhead *p; 180 int bucket = 0; 181 unsigned shiftr; 182 183 /* 184 * Convert amount of memory requested into closest block size stored in 185 * hash buckets which satisfies request. Account for space used per block 186 * for accounting. 187 */ 188 #ifdef SUNOS4 189 /* 190 * SunOS localtime() overwrites the 9th byte on an 8 byte malloc().... 191 * so we get one more... 192 * From Michael Schroeder: This is not true. It depends on the 193 * timezone string. In Europe it can overwrite the 13th byte on a 194 * 12 byte malloc. 195 * So we punt and we always allocate an extra byte. 196 */ 197 nbytes++; 198 #endif 199 200 nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP); 201 shiftr = (nbytes - 1) >> 2; 202 203 /* apart from this loop, this is O(1) */ 204 while ((shiftr >>= 1) != 0) 205 bucket++; 206 /* 207 * If nothing in hash bucket right now, request more memory from the 208 * system. 209 */ 210 if (nextf[bucket] == NULL) 211 morecore(bucket); 212 if ((p = nextf[bucket]) == NULL) { 213 child++; 214 #ifndef DEBUG 215 out_of_memory(); 216 #else 217 showall(NULL, NULL); 218 xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes); 219 abort(); 220 #endif 221 /* fool lint */ 222 return ((memalign_t) 0); 223 } 224 /* remove from linked list */ 225 nextf[bucket] = nextf[bucket]->ov_next; 226 p->ov_magic = MAGIC; 227 p->ov_index = bucket; 228 nmalloc[bucket]++; 229 #ifdef RCHECK 230 /* 231 * Record allocated size of block and bound space with magic numbers. 232 */ 233 p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0; 234 p->ov_rmagic = RMAGIC; 235 *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC; 236 #endif 237 return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead)))); 238 #else 239 if (nbytes) 240 return ((memalign_t) 0); 241 else 242 return ((memalign_t) 0); 243 #endif /* !lint */ 244 } 245 246 #ifndef lint 247 /* 248 * Allocate more memory to the indicated bucket. 249 */ 250 static void 251 morecore(int bucket) 252 { 253 union overhead *op; 254 int rnu; /* 2^rnu bytes will be requested */ 255 int nblks; /* become nblks blocks of the desired size */ 256 int siz; 257 258 if (nextf[bucket]) 259 return; 260 /* 261 * Insure memory is allocated on a page boundary. Should make getpageize 262 * call? 263 */ 264 op = (union overhead *) sbrk(0); 265 memtop = (char *) op; 266 if (membot == NULL) 267 membot = memtop; 268 if ((long) op & 0x3ff) { 269 memtop = sbrk((int) (1024 - ((long) op & 0x3ff))); 270 memtop += (long) (1024 - ((long) op & 0x3ff)); 271 } 272 273 /* take 2k unless the block is bigger than that */ 274 rnu = (bucket <= 8) ? 11 : bucket + 3; 275 nblks = 1 << (rnu - (bucket + 3)); /* how many blocks to get */ 276 memtop = sbrk(1 << rnu); /* PWP */ 277 op = (union overhead *) memtop; 278 /* no more room! */ 279 if ((long) op == -1) 280 return; 281 memtop += (long) (1 << rnu); 282 /* 283 * Round up to minimum allocation size boundary and deduct from block count 284 * to reflect. 285 */ 286 if (((U_long) op) & ROUNDUP) { 287 op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP); 288 nblks--; 289 } 290 /* 291 * Add new memory allocated to that on free list for this hash bucket. 292 */ 293 nextf[bucket] = op; 294 siz = 1 << (bucket + 3); 295 while (--nblks > 0) { 296 op->ov_next = (union overhead *) (((caddr_t) op) + siz); 297 op = (union overhead *) (((caddr_t) op) + siz); 298 } 299 op->ov_next = NULL; 300 } 301 302 #endif 303 304 void 305 free(ptr_t cp) 306 { 307 #ifndef lint 308 int size; 309 union overhead *op; 310 311 /* 312 * the don't free flag is there so that we avoid os bugs in routines 313 * that free invalid pointers! 314 */ 315 if (cp == NULL || dont_free) 316 return; 317 CHECK(!memtop || !membot, 318 CGETS(19, 2, "free(%p) called before any allocations."), cp); 319 CHECK(cp > (ptr_t) memtop, 320 CGETS(19, 3, "free(%p) above top of memory."), cp); 321 CHECK(cp < (ptr_t) membot, 322 CGETS(19, 4, "free(%p) below bottom of memory."), cp); 323 op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead))); 324 CHECK(op->ov_magic != MAGIC, 325 CGETS(19, 5, "free(%p) bad block."), cp); 326 327 #ifdef RCHECK 328 if (op->ov_index <= 13) 329 CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC, 330 CGETS(19, 6, "free(%p) bad range check."), cp); 331 #endif 332 CHECK(op->ov_index >= NBUCKETS, 333 CGETS(19, 7, "free(%p) bad block index."), cp); 334 size = op->ov_index; 335 op->ov_next = nextf[size]; 336 nextf[size] = op; 337 338 nmalloc[size]--; 339 340 #else 341 if (cp == NULL) 342 return; 343 #endif 344 } 345 346 memalign_t 347 calloc(size_t i, size_t j) 348 { 349 #ifndef lint 350 char *cp; 351 352 i *= j; 353 cp = xmalloc(i); 354 /* 355 * DO NOT USE memset(), it will cause gcc-5 to mis-optimize the 356 * malloc+memset sequence into a call to calloc, which will implode 357 * tcsh. This is really a GCC bug honestly. 358 */ 359 bzero(cp, i); 360 361 return ((memalign_t) cp); 362 #else 363 if (i && j) 364 return ((memalign_t) 0); 365 else 366 return ((memalign_t) 0); 367 #endif 368 } 369 370 /* 371 * When a program attempts "storage compaction" as mentioned in the 372 * old malloc man page, it realloc's an already freed block. Usually 373 * this is the last block it freed; occasionally it might be farther 374 * back. We have to search all the free lists for the block in order 375 * to determine its bucket: 1st we make one pass thru the lists 376 * checking only the first block in each; if that fails we search 377 * ``realloc_srchlen'' blocks in each list for a match (the variable 378 * is extern so the caller can modify it). If that fails we just copy 379 * however many bytes was given to realloc() and hope it's not huge. 380 */ 381 #ifndef lint 382 /* 4 should be plenty, -1 =>'s whole list */ 383 static int realloc_srchlen = 4; 384 #endif /* lint */ 385 386 memalign_t 387 realloc(ptr_t cp, size_t nbytes) 388 { 389 #ifndef lint 390 U_int onb; 391 union overhead *op; 392 ptr_t res; 393 int i; 394 int was_alloced = 0; 395 396 if (cp == NULL) 397 return (malloc(nbytes)); 398 op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead))); 399 if (op->ov_magic == MAGIC) { 400 was_alloced++; 401 i = op->ov_index; 402 } 403 else 404 /* 405 * Already free, doing "compaction". 406 * 407 * Search for the old block of memory on the free list. First, check the 408 * most common case (last element free'd), then (this failing) the last 409 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume 410 * the size of the memory block being realloc'd is the smallest 411 * possible. 412 */ 413 if ((i = findbucket(op, 1)) < 0 && 414 (i = findbucket(op, realloc_srchlen)) < 0) 415 i = 0; 416 417 onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP); 418 419 /* avoid the copy if same size block */ 420 if (was_alloced && (onb <= (U_int) (1 << (i + 3))) && 421 (onb > (U_int) (1 << (i + 2)))) { 422 #ifdef RCHECK 423 /* JMR: formerly this wasn't updated ! */ 424 nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP); 425 *((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC; 426 op->ov_rmagic = RMAGIC; 427 op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0; 428 #endif 429 return ((memalign_t) cp); 430 } 431 if ((res = malloc(nbytes)) == NULL) 432 return ((memalign_t) NULL); 433 if (cp != res) { /* common optimization */ 434 /* 435 * christos: this used to copy nbytes! It should copy the 436 * smaller of the old and new size 437 */ 438 onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP; 439 (void) memmove(res, cp, onb < nbytes ? onb : nbytes); 440 } 441 if (was_alloced) 442 free(cp); 443 return ((memalign_t) res); 444 #else 445 if (cp && nbytes) 446 return ((memalign_t) 0); 447 else 448 return ((memalign_t) 0); 449 #endif /* !lint */ 450 } 451 452 /* 453 * On linux, _nss_nis_setnetgrent() calls this function to determine 454 * the usable size of the pointer passed, but this is not a portable 455 * API, so we cannot use our malloc replacement without providing one. 456 * Thanks a lot glibc! 457 */ 458 #ifdef __linux__ 459 #define M_U_S_CONST 460 #else 461 #define M_U_S_CONST 462 #endif 463 size_t malloc_usable_size(M_U_S_CONST void *); 464 size_t 465 malloc_usable_size(M_U_S_CONST void *ptr) 466 { 467 const union overhead *op = (const union overhead *) 468 (((const char *) ptr) - MEMALIGN(sizeof(*op))); 469 if (op->ov_magic == MAGIC) 470 return 1 << (op->ov_index + 3); 471 else 472 return 0; 473 } 474 475 476 #ifndef lint 477 /* 478 * Search ``srchlen'' elements of each free list for a block whose 479 * header starts at ``freep''. If srchlen is -1 search the whole list. 480 * Return bucket number, or -1 if not found. 481 */ 482 static int 483 findbucket(union overhead *freep, int srchlen) 484 { 485 union overhead *p; 486 size_t i; 487 int j; 488 489 for (i = 0; i < NBUCKETS; i++) { 490 j = 0; 491 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 492 if (p == freep) 493 return (i); 494 j++; 495 } 496 } 497 return (-1); 498 } 499 500 #endif 501 502 503 #else /* SYSMALLOC */ 504 505 /** 506 ** ``Protected versions'' of malloc, realloc, calloc, and free 507 ** 508 ** On many systems: 509 ** 510 ** 1. malloc(0) is bad 511 ** 2. free(0) is bad 512 ** 3. realloc(0, n) is bad 513 ** 4. realloc(n, 0) is bad 514 ** 515 ** Also we call our error routine if we run out of memory. 516 **/ 517 memalign_t 518 smalloc(size_t n) 519 { 520 ptr_t ptr; 521 522 n = n ? n : 1; 523 524 #ifdef USE_SBRK 525 if (membot == NULL) 526 membot = sbrk(0); 527 #endif /* USE_SBRK */ 528 529 if ((ptr = malloc(n)) == NULL) 530 out_of_memory(); 531 #ifndef USE_SBRK 532 if (memtop < ((char *) ptr) + n) 533 memtop = ((char *) ptr) + n; 534 if (membot == NULL) 535 membot = ptr; 536 #endif /* !USE_SBRK */ 537 return ((memalign_t) ptr); 538 } 539 540 memalign_t 541 srealloc(ptr_t p, size_t n) 542 { 543 ptr_t ptr; 544 545 n = n ? n : 1; 546 547 #ifdef USE_SBRK 548 if (membot == NULL) 549 membot = sbrk(0); 550 #endif /* USE_SBRK */ 551 552 if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL) 553 out_of_memory(); 554 #ifndef USE_SBRK 555 if (memtop < ((char *) ptr) + n) 556 memtop = ((char *) ptr) + n; 557 if (membot == NULL) 558 membot = ptr; 559 #endif /* !USE_SBRK */ 560 return ((memalign_t) ptr); 561 } 562 563 memalign_t 564 scalloc(size_t s, size_t n) 565 { 566 ptr_t ptr; 567 568 n *= s; 569 n = n ? n : 1; 570 571 #ifdef USE_SBRK 572 if (membot == NULL) 573 membot = sbrk(0); 574 #endif /* USE_SBRK */ 575 576 if ((ptr = malloc(n)) == NULL) 577 out_of_memory(); 578 579 memset (ptr, 0, n); 580 581 #ifndef USE_SBRK 582 if (memtop < ((char *) ptr) + n) 583 memtop = ((char *) ptr) + n; 584 if (membot == NULL) 585 membot = ptr; 586 #endif /* !USE_SBRK */ 587 588 return ((memalign_t) ptr); 589 } 590 591 void 592 sfree(ptr_t p) 593 { 594 if (p && !dont_free) 595 free(p); 596 } 597 598 #endif /* SYSMALLOC */ 599 600 /* 601 * mstats - print out statistics about malloc 602 * 603 * Prints two lines of numbers, one showing the length of the free list 604 * for each size category, the second showing the number of mallocs - 605 * frees for each size category. 606 */ 607 /*ARGSUSED*/ 608 void 609 showall(Char **v, struct command *c) 610 { 611 #ifndef SYSMALLOC 612 size_t i, j; 613 union overhead *p; 614 int totfree = 0, totused = 0; 615 616 xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname); 617 for (i = 0; i < NBUCKETS; i++) { 618 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 619 continue; 620 xprintf(" %4zd", j); 621 totfree += j * (1 << (i + 3)); 622 } 623 xprintf("\n%s:\t", CGETS(19, 9, "used")); 624 for (i = 0; i < NBUCKETS; i++) { 625 xprintf(" %4d", nmalloc[i]); 626 totused += nmalloc[i] * (1 << (i + 3)); 627 } 628 xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"), 629 totused, totfree); 630 xprintf(CGETS(19, 11, 631 "\tAllocated memory from 0x%lx to 0x%lx. Real top at 0x%lx\n"), 632 (unsigned long) membot, (unsigned long) memtop, 633 (unsigned long) sbrk(0)); 634 #else /* SYSMALLOC */ 635 #ifndef HAVE_MALLINFO 636 #ifdef USE_SBRK 637 memtop = sbrk(0); 638 #endif /* USE_SBRK */ 639 xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"), 640 (unsigned long) membot, (unsigned long) memtop, 641 (unsigned long) (memtop - membot)); 642 #else /* HAVE_MALLINFO */ 643 struct mallinfo mi; 644 645 mi = mallinfo(); 646 xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname); 647 xprintf(CGETS(19, 14, "Total space allocated from system: %d\n"), mi.arena); 648 xprintf(CGETS(19, 15, "Number of non-inuse chunks: %d\n"), mi.ordblks); 649 xprintf(CGETS(19, 16, "Number of mmapped regions: %d\n"), mi.hblks); 650 xprintf(CGETS(19, 17, "Total space in mmapped regions: %d\n"), mi.hblkhd); 651 xprintf(CGETS(19, 18, "Total allocated space: %d\n"), mi.uordblks); 652 xprintf(CGETS(19, 19, "Total non-inuse space: %d\n"), mi.fordblks); 653 xprintf(CGETS(19, 20, "Top-most, releasable space: %d\n"), mi.keepcost); 654 #endif /* HAVE_MALLINFO */ 655 #endif /* SYSMALLOC */ 656 USE(c); 657 USE(v); 658 } 659