1 /* 2 * This module derived from code donated to the FreeBSD Project by 3 * Matthew Dillon <dillon@backplane.com> 4 * 5 * Copyright (c) 1998 The FreeBSD Project 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/lib/libstand/zalloc.c,v 1.5.2.1 2002/12/28 18:04:15 dillon Exp $ 30 * $DragonFly: src/lib/libstand/zalloc.c,v 1.2 2003/06/17 04:26:51 dillon Exp $ 31 */ 32 33 /* 34 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation 35 * subsystem 36 * 37 * This subsystem implements memory pools and memory allocation 38 * routines. 39 * 40 * Pools are managed via a linked list of 'free' areas. Allocating 41 * memory creates holes in the freelist, freeing memory fills them. 42 * Since the freelist consists only of free memory areas, it is possible 43 * to allocate the entire pool without incuring any structural overhead. 44 * 45 * The system works best when allocating similarly-sized chunks of 46 * memory. Care must be taken to avoid fragmentation when 47 * allocating/deallocating dissimilar chunks. 48 * 49 * When a memory pool is first allocated, the entire pool is marked as 50 * allocated. This is done mainly because we do not want to modify any 51 * portion of a pool's data area until we are given permission. The 52 * caller must explicitly deallocate portions of the pool to make them 53 * available. 54 * 55 * z[n]xalloc() works like z[n]alloc() but the allocation is made from 56 * within the specified address range. If the segment could not be 57 * allocated, NULL is returned. WARNING! The address range will be 58 * aligned to an 8 or 16 byte boundry depending on the cpu so if you 59 * give an unaligned address range, unexpected results may occur. 60 * 61 * If a standard allocation fails, the reclaim function will be called 62 * to recover some space. This usually causes other portions of the 63 * same pool to be released. Memory allocations at this low level 64 * should not block but you can do that too in your reclaim function 65 * if you want. Reclaim does not function when z[n]xalloc() is used, 66 * only for z[n]alloc(). 67 * 68 * Allocation and frees of 0 bytes are valid operations. 69 */ 70 71 #include "zalloc_defs.h" 72 73 /* 74 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim 75 * and retry if appropriate, return NULL if unable to allocate 76 * memory. 77 */ 78 79 void * 80 znalloc(MemPool *mp, uintptr_t bytes) 81 { 82 /* 83 * align according to pool object size (can be 0). This is 84 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 85 * 86 */ 87 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; 88 89 if (bytes == 0) 90 return((void *)-1); 91 92 /* 93 * locate freelist entry big enough to hold the object. If all objects 94 * are the same size, this is a constant-time function. 95 */ 96 97 if (bytes <= mp->mp_Size - mp->mp_Used) { 98 MemNode **pmn; 99 MemNode *mn; 100 101 for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) { 102 if (bytes > mn->mr_Bytes) 103 continue; 104 105 /* 106 * Cut a chunk of memory out of the beginning of this 107 * block and fixup the link appropriately. 108 */ 109 110 { 111 char *ptr = (char *)mn; 112 113 if (mn->mr_Bytes == bytes) { 114 *pmn = mn->mr_Next; 115 } else { 116 mn = (MemNode *)((char *)mn + bytes); 117 mn->mr_Next = ((MemNode *)ptr)->mr_Next; 118 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes; 119 *pmn = mn; 120 } 121 mp->mp_Used += bytes; 122 return(ptr); 123 } 124 } 125 } 126 127 /* 128 * Memory pool is full, return NULL. 129 */ 130 131 return(NULL); 132 } 133 134 /* 135 * zfree() - free previously allocated memory 136 */ 137 138 void 139 zfree(MemPool *mp, void *ptr, uintptr_t bytes) 140 { 141 /* 142 * align according to pool object size (can be 0). This is 143 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 144 */ 145 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; 146 147 if (bytes == 0) 148 return; 149 150 /* 151 * panic if illegal pointer 152 */ 153 154 if ((char *)ptr < (char *)mp->mp_Base || 155 (char *)ptr + bytes > (char *)mp->mp_End || 156 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0) 157 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes); 158 159 /* 160 * free the segment 161 */ 162 163 { 164 MemNode **pmn; 165 MemNode *mn; 166 167 mp->mp_Used -= bytes; 168 169 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { 170 /* 171 * If area between last node and current node 172 * - check range 173 * - check merge with next area 174 * - check merge with previous area 175 */ 176 if ((char *)ptr <= (char *)mn) { 177 /* 178 * range check 179 */ 180 if ((char *)ptr + bytes > (char *)mn) { 181 panic("zfree(%p,%ju): corrupt memlist1", ptr, 182 (uintmax_t)bytes); 183 } 184 185 /* 186 * merge against next area or create independant area 187 */ 188 189 if ((char *)ptr + bytes == (char *)mn) { 190 ((MemNode *)ptr)->mr_Next = mn->mr_Next; 191 ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes; 192 } else { 193 ((MemNode *)ptr)->mr_Next = mn; 194 ((MemNode *)ptr)->mr_Bytes= bytes; 195 } 196 *pmn = mn = (MemNode *)ptr; 197 198 /* 199 * merge against previous area (if there is a previous 200 * area). 201 */ 202 203 if (pmn != &mp->mp_First) { 204 if ((char*)pmn + ((MemNode*)pmn)->mr_Bytes == (char*)ptr) { 205 ((MemNode *)pmn)->mr_Next = mn->mr_Next; 206 ((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes; 207 mn = (MemNode *)pmn; 208 } 209 } 210 return; 211 /* NOT REACHED */ 212 } 213 if ((char *)ptr < (char *)mn + mn->mr_Bytes) { 214 panic("zfree(%p,%ju): corrupt memlist2", ptr, 215 (uintmax_t)bytes); 216 } 217 } 218 /* 219 * We are beyond the last MemNode, append new MemNode. Merge against 220 * previous area if possible. 221 */ 222 if (pmn == &mp->mp_First || 223 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr 224 ) { 225 ((MemNode *)ptr)->mr_Next = NULL; 226 ((MemNode *)ptr)->mr_Bytes = bytes; 227 *pmn = (MemNode *)ptr; 228 mn = (MemNode *)ptr; 229 } else { 230 ((MemNode *)pmn)->mr_Bytes += bytes; 231 mn = (MemNode *)pmn; 232 } 233 } 234 } 235 236 /* 237 * zextendPool() - extend memory pool to cover additional space. 238 * 239 * Note: the added memory starts out as allocated, you 240 * must free it to make it available to the memory subsystem. 241 * 242 * Note: mp_Size may not reflect (mp_End - mp_Base) range 243 * due to other parts of the system doing their own sbrk() 244 * calls. 245 */ 246 247 void 248 zextendPool(MemPool *mp, void *base, uintptr_t bytes) 249 { 250 if (mp->mp_Size == 0) { 251 mp->mp_Base = base; 252 mp->mp_Used = bytes; 253 mp->mp_End = (char *)base + bytes; 254 mp->mp_Size = bytes; 255 } else { 256 void *pend = (char *)mp->mp_Base + mp->mp_Size; 257 258 if (base < mp->mp_Base) { 259 mp->mp_Size += (char *)mp->mp_Base - (char *)base; 260 mp->mp_Used += (char *)mp->mp_Base - (char *)base; 261 mp->mp_Base = base; 262 } 263 base = (char *)base + bytes; 264 if (base > pend) { 265 mp->mp_Size += (char *)base - (char *)pend; 266 mp->mp_Used += (char *)base - (char *)pend; 267 mp->mp_End = (char *)base; 268 } 269 } 270 } 271 272 #ifdef ZALLOCDEBUG 273 274 void 275 zallocstats(MemPool *mp) 276 { 277 int abytes = 0; 278 int hbytes = 0; 279 int fcount = 0; 280 MemNode *mn; 281 282 printf("%d bytes reserved", (int) mp->mp_Size); 283 284 mn = mp->mp_First; 285 286 if ((void *)mn != (void *)mp->mp_Base) { 287 abytes += (char *)mn - (char *)mp->mp_Base; 288 } 289 290 while (mn) { 291 if ((char *)mn + mn->mr_Bytes != mp->mp_End) { 292 hbytes += mn->mr_Bytes; 293 ++fcount; 294 } 295 if (mn->mr_Next) 296 abytes += (char *)mn->mr_Next - ((char *)mn + mn->mr_Bytes); 297 mn = mn->mr_Next; 298 } 299 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n", 300 abytes, 301 fcount, 302 hbytes 303 ); 304 } 305 306 #endif 307 308