1 /*- 2 * Copyright (c) 1983 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)malloc.c 5.11 (Berkeley) 2/23/91 34 * $FreeBSD: src/libexec/rtld-elf/malloc.c,v 1.3.2.3 2003/02/20 20:42:46 kan Exp $ 35 * $DragonFly: src/libexec/rtld-elf/malloc.c,v 1.3 2008/06/05 18:01:49 swildner Exp $ 36 */ 37 38 /* 39 * malloc.c (Caltech) 2/21/82 40 * Chris Kingsley, kingsley@cit-20. 41 * 42 * This is a very fast storage allocator. It allocates blocks of a small 43 * number of different sizes, and keeps free lists of each size. Blocks that 44 * don't exactly fit are passed up to the next larger size. In this 45 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 46 * This is designed for use in a virtual memory environment. 47 */ 48 49 #include <sys/types.h> 50 #include <err.h> 51 #include <paths.h> 52 #include <stdarg.h> 53 #include <stdio.h> 54 #include <stdlib.h> 55 #include <string.h> 56 #include <unistd.h> 57 #include <sys/param.h> 58 #include <sys/mman.h> 59 #ifndef BSD 60 #define MAP_COPY MAP_PRIVATE 61 #define MAP_FILE 0 62 #define MAP_ANON 0 63 #endif 64 65 #ifndef BSD /* Need do better than this */ 66 #define NEED_DEV_ZERO 1 67 #endif 68 69 static void morecore(); 70 static int findbucket(); 71 static void xprintf(const char *, ...) __printflike(1, 2); 72 73 /* 74 * Pre-allocate mmap'ed pages 75 */ 76 #define NPOOLPAGES (32*1024/pagesz) 77 static caddr_t pagepool_start, pagepool_end; 78 static int morepages(); 79 80 /* 81 * The overhead on a block is at least 4 bytes. When free, this space 82 * contains a pointer to the next free block, and the bottom two bits must 83 * be zero. When in use, the first byte is set to MAGIC, and the second 84 * byte is the size index. The remaining bytes are for alignment. 85 * If range checking is enabled then a second word holds the size of the 86 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). 87 * The order of elements is critical: ov_magic must overlay the low order 88 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. 89 */ 90 union overhead { 91 union overhead *ov_next; /* when free */ 92 struct { 93 u_char ovu_magic; /* magic number */ 94 u_char ovu_index; /* bucket # */ 95 #ifdef RCHECK 96 u_short ovu_rmagic; /* range magic number */ 97 u_int ovu_size; /* actual block size */ 98 #endif 99 } ovu; 100 #define ov_magic ovu.ovu_magic 101 #define ov_index ovu.ovu_index 102 #define ov_rmagic ovu.ovu_rmagic 103 #define ov_size ovu.ovu_size 104 }; 105 106 #define MAGIC 0xef /* magic # on accounting info */ 107 #define RMAGIC 0x5555 /* magic # on range info */ 108 109 #ifdef RCHECK 110 #define RSLOP sizeof (u_short) 111 #else 112 #define RSLOP 0 113 #endif 114 115 /* 116 * nextf[i] is the pointer to the next free block of size 2^(i+3). The 117 * smallest allocatable block is 8 bytes. The overhead information 118 * precedes the data area returned to the user. 119 */ 120 #define NBUCKETS 30 121 static union overhead *nextf[NBUCKETS]; 122 123 static int pagesz; /* page size */ 124 static int pagebucket; /* page size bucket */ 125 126 #ifdef MSTATS 127 /* 128 * nmalloc[i] is the difference between the number of mallocs and frees 129 * for a given block size. 130 */ 131 static u_int nmalloc[NBUCKETS]; 132 #include <stdio.h> 133 #endif 134 135 #if defined(MALLOC_DEBUG) || defined(RCHECK) 136 #define ASSERT(p) if (!(p)) botch("p") 137 #include <stdio.h> 138 static void 139 botch(s) 140 char *s; 141 { 142 fprintf(stderr, "\r\nassertion botched: %s\r\n", s); 143 (void) fflush(stderr); /* just in case user buffered it */ 144 abort(); 145 } 146 #else 147 #define ASSERT(p) 148 #endif 149 150 /* Debugging stuff */ 151 static void xprintf(const char *, ...); 152 #define TRACE() xprintf("TRACE %s:%d\n", __FILE__, __LINE__) 153 154 void * 155 malloc(nbytes) 156 size_t nbytes; 157 { 158 register union overhead *op; 159 register int bucket; 160 register long n; 161 register unsigned amt; 162 163 /* 164 * First time malloc is called, setup page size and 165 * align break pointer so all data will be page aligned. 166 */ 167 if (pagesz == 0) { 168 pagesz = n = getpagesize(); 169 if (morepages(NPOOLPAGES) == 0) 170 return NULL; 171 op = (union overhead *)(pagepool_start); 172 n = n - sizeof (*op) - ((long)op & (n - 1)); 173 if (n < 0) 174 n += pagesz; 175 if (n) { 176 pagepool_start += n; 177 } 178 bucket = 0; 179 amt = 8; 180 while (pagesz > amt) { 181 amt <<= 1; 182 bucket++; 183 } 184 pagebucket = bucket; 185 } 186 /* 187 * Convert amount of memory requested into closest block size 188 * stored in hash buckets which satisfies request. 189 * Account for space used per block for accounting. 190 */ 191 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 192 #ifndef RCHECK 193 amt = 8; /* size of first bucket */ 194 bucket = 0; 195 #else 196 amt = 16; /* size of first bucket */ 197 bucket = 1; 198 #endif 199 n = -(sizeof (*op) + RSLOP); 200 } else { 201 amt = pagesz; 202 bucket = pagebucket; 203 } 204 while (nbytes > amt + n) { 205 amt <<= 1; 206 if (amt == 0) 207 return (NULL); 208 bucket++; 209 } 210 /* 211 * If nothing in hash bucket right now, 212 * request more memory from the system. 213 */ 214 if ((op = nextf[bucket]) == NULL) { 215 morecore(bucket); 216 if ((op = nextf[bucket]) == NULL) 217 return (NULL); 218 } 219 /* remove from linked list */ 220 nextf[bucket] = op->ov_next; 221 op->ov_magic = MAGIC; 222 op->ov_index = bucket; 223 #ifdef MSTATS 224 nmalloc[bucket]++; 225 #endif 226 #ifdef RCHECK 227 /* 228 * Record allocated size of block and 229 * bound space with magic numbers. 230 */ 231 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 232 op->ov_rmagic = RMAGIC; 233 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 234 #endif 235 return ((char *)(op + 1)); 236 } 237 238 /* 239 * Used by rtld.c, if we don't override it here the calloc from 240 * libc may try to pull in the malloc/realloc/free from libc too. 241 */ 242 void * 243 calloc(size_t num, size_t size) 244 { 245 void *p; 246 247 size *= num; 248 if ((p = malloc(size)) != NULL) 249 bzero(p, size); 250 return(p); 251 } 252 253 /* 254 * Allocate more memory to the indicated bucket. 255 */ 256 static void 257 morecore(bucket) 258 int bucket; 259 { 260 register union overhead *op; 261 register int sz; /* size of desired block */ 262 int amt; /* amount to allocate */ 263 int nblks; /* how many blocks we get */ 264 265 /* 266 * sbrk_size <= 0 only for big, FLUFFY, requests (about 267 * 2^30 bytes on a VAX, I think) or for a negative arg. 268 */ 269 sz = 1 << (bucket + 3); 270 #ifdef MALLOC_DEBUG 271 ASSERT(sz > 0); 272 #else 273 if (sz <= 0) 274 return; 275 #endif 276 if (sz < pagesz) { 277 amt = pagesz; 278 nblks = amt / sz; 279 } else { 280 amt = sz + pagesz; 281 nblks = 1; 282 } 283 if (amt > pagepool_end - pagepool_start) 284 if (morepages(amt/pagesz + NPOOLPAGES) == 0) 285 return; 286 op = (union overhead *)pagepool_start; 287 pagepool_start += amt; 288 289 /* 290 * Add new memory allocated to that on 291 * free list for this hash bucket. 292 */ 293 nextf[bucket] = op; 294 while (--nblks > 0) { 295 op->ov_next = (union overhead *)((caddr_t)op + sz); 296 op = (union overhead *)((caddr_t)op + sz); 297 } 298 } 299 300 void 301 free(cp) 302 void *cp; 303 { 304 register int size; 305 register union overhead *op; 306 307 if (cp == NULL) 308 return; 309 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 310 #ifdef MALLOC_DEBUG 311 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 312 #else 313 if (op->ov_magic != MAGIC) 314 return; /* sanity */ 315 #endif 316 #ifdef RCHECK 317 ASSERT(op->ov_rmagic == RMAGIC); 318 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 319 #endif 320 size = op->ov_index; 321 ASSERT(size < NBUCKETS); 322 op->ov_next = nextf[size]; /* also clobbers ov_magic */ 323 nextf[size] = op; 324 #ifdef MSTATS 325 nmalloc[size]--; 326 #endif 327 } 328 329 /* 330 * When a program attempts "storage compaction" as mentioned in the 331 * old malloc man page, it realloc's an already freed block. Usually 332 * this is the last block it freed; occasionally it might be farther 333 * back. We have to search all the free lists for the block in order 334 * to determine its bucket: 1st we make one pass thru the lists 335 * checking only the first block in each; if that fails we search 336 * ``realloc_srchlen'' blocks in each list for a match (the variable 337 * is extern so the caller can modify it). If that fails we just copy 338 * however many bytes was given to realloc() and hope it's not huge. 339 */ 340 int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 341 342 void * 343 realloc(cp, nbytes) 344 void *cp; 345 size_t nbytes; 346 { 347 register u_int onb; 348 register int i; 349 union overhead *op; 350 char *res; 351 int was_alloced = 0; 352 353 if (cp == NULL) 354 return (malloc(nbytes)); 355 op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 356 if (op->ov_magic == MAGIC) { 357 was_alloced++; 358 i = op->ov_index; 359 } else { 360 /* 361 * Already free, doing "compaction". 362 * 363 * Search for the old block of memory on the 364 * free list. First, check the most common 365 * case (last element free'd), then (this failing) 366 * the last ``realloc_srchlen'' items free'd. 367 * If all lookups fail, then assume the size of 368 * the memory block being realloc'd is the 369 * largest possible (so that all "nbytes" of new 370 * memory are copied into). Note that this could cause 371 * a memory fault if the old area was tiny, and the moon 372 * is gibbous. However, that is very unlikely. 373 */ 374 if ((i = findbucket(op, 1)) < 0 && 375 (i = findbucket(op, realloc_srchlen)) < 0) 376 i = NBUCKETS; 377 } 378 onb = 1 << (i + 3); 379 if (onb < pagesz) 380 onb -= sizeof (*op) + RSLOP; 381 else 382 onb += pagesz - sizeof (*op) - RSLOP; 383 /* avoid the copy if same size block */ 384 if (was_alloced) { 385 if (i) { 386 i = 1 << (i + 2); 387 if (i < pagesz) 388 i -= sizeof (*op) + RSLOP; 389 else 390 i += pagesz - sizeof (*op) - RSLOP; 391 } 392 if (nbytes <= onb && nbytes > i) { 393 #ifdef RCHECK 394 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 395 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 396 #endif 397 return(cp); 398 } else 399 free(cp); 400 } 401 if ((res = malloc(nbytes)) == NULL) 402 return (NULL); 403 if (cp != res) /* common optimization if "compacting" */ 404 bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 405 return (res); 406 } 407 408 /* 409 * Search ``srchlen'' elements of each free list for a block whose 410 * header starts at ``freep''. If srchlen is -1 search the whole list. 411 * Return bucket number, or -1 if not found. 412 */ 413 static int 414 findbucket(freep, srchlen) 415 union overhead *freep; 416 int srchlen; 417 { 418 register union overhead *p; 419 register int i, j; 420 421 for (i = 0; i < NBUCKETS; i++) { 422 j = 0; 423 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 424 if (p == freep) 425 return (i); 426 j++; 427 } 428 } 429 return (-1); 430 } 431 432 #ifdef MSTATS 433 /* 434 * mstats - print out statistics about malloc 435 * 436 * Prints two lines of numbers, one showing the length of the free list 437 * for each size category, the second showing the number of mallocs - 438 * frees for each size category. 439 */ 440 mstats(s) 441 char *s; 442 { 443 register int i, j; 444 register union overhead *p; 445 int totfree = 0, 446 totused = 0; 447 448 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 449 for (i = 0; i < NBUCKETS; i++) { 450 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 451 ; 452 fprintf(stderr, " %d", j); 453 totfree += j * (1 << (i + 3)); 454 } 455 fprintf(stderr, "\nused:\t"); 456 for (i = 0; i < NBUCKETS; i++) { 457 fprintf(stderr, " %d", nmalloc[i]); 458 totused += nmalloc[i] * (1 << (i + 3)); 459 } 460 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 461 totused, totfree); 462 } 463 #endif 464 465 466 static int 467 morepages(n) 468 int n; 469 { 470 int fd = -1; 471 int offset; 472 473 #ifdef NEED_DEV_ZERO 474 fd = open(_PATH_DEVZERO, O_RDWR, 0); 475 if (fd == -1) 476 perror(_PATH_DEVZERO); 477 #endif 478 479 if (pagepool_end - pagepool_start > pagesz) { 480 caddr_t addr = (caddr_t) 481 (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1)); 482 if (munmap(addr, pagepool_end - addr) != 0) 483 warn("morepages: munmap %p", addr); 484 } 485 486 offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1)); 487 488 if ((pagepool_start = mmap(0, n * pagesz, 489 PROT_READ|PROT_WRITE, 490 MAP_ANON|MAP_COPY, fd, 0)) == (caddr_t)-1) { 491 xprintf("Cannot map anonymous memory"); 492 return 0; 493 } 494 pagepool_end = pagepool_start + n * pagesz; 495 pagepool_start += offset; 496 497 #ifdef NEED_DEV_ZERO 498 close(fd); 499 #endif 500 return n; 501 } 502 503 /* 504 * Non-mallocing printf, for use by malloc itself. 505 */ 506 static void 507 xprintf(const char *fmt, ...) 508 { 509 char buf[256]; 510 va_list ap; 511 512 va_start(ap, fmt); 513 vsprintf(buf, fmt, ap); 514 (void)write(STDOUT_FILENO, buf, strlen(buf)); 515 va_end(ap); 516 } 517