xref: /openbsd/usr.sbin/nsd/region-allocator.c (revision 8d8f1862)
1 /*
2  * region-allocator.c -- region based memory allocator.
3  *
4  * Copyright (c) 2001-2006, NLnet Labs. All rights reserved.
5  *
6  * See LICENSE for the license.
7  *
8  */
9 
10 #include "config.h"
11 
12 #include <assert.h>
13 #include <stdlib.h>
14 #include <string.h>
15 
16 #include "region-allocator.h"
17 #include "util.h"
18 
19 /** This value is enough so that x*y does not overflow if both < than this */
20 #define REGION_NO_OVERFLOW ((size_t)1 << (sizeof(size_t) * 4))
21 
22 #ifdef ALIGNMENT
23 #undef ALIGNMENT
24 #endif
25 #define REGION_ALIGN_UP(x, s)     (((x) + s - 1) & (~(s - 1)))
26 #if SIZEOF_OFF_T > SIZEOF_VOIDP
27 #define ALIGNMENT	(sizeof(off_t))
28 #else
29 #define ALIGNMENT	(sizeof(void *))
30 #endif
31 /* #define CHECK_DOUBLE_FREE 0 */ /* set to 1 to perform expensive check for double recycle() */
32 
33 typedef struct cleanup cleanup_type;
34 struct cleanup
35 {
36 	void (*action)(void *);
37 	void *data;
38 };
39 
40 struct recycle_elem {
41 	struct recycle_elem* next;
42 };
43 
44 struct large_elem {
45 	struct large_elem* next;
46 	struct large_elem* prev;
47 };
48 
49 struct region
50 {
51 	size_t        total_allocated;
52 	size_t        small_objects;
53 	size_t        large_objects;
54 	size_t        chunk_count;
55 	size_t        unused_space; /* Unused space due to alignment, etc. */
56 
57 	size_t        allocated;
58 	char         *initial_data;
59 	char         *data;
60 
61 	void         *(*allocator)(size_t);
62 	void          (*deallocator)(void *);
63 
64 	size_t        maximum_cleanup_count;
65 	size_t        cleanup_count;
66 	cleanup_type *cleanups;
67 	struct large_elem* large_list;
68 
69 	size_t        chunk_size;
70 	size_t        large_object_size;
71 
72 	/* if not NULL recycling is enabled.
73 	 * It is an array of linked lists of parts held for recycle.
74 	 * The parts are all pointers to within the allocated chunks.
75 	 * Array [i] points to elements of size i. */
76 	struct recycle_elem** recycle_bin;
77 	/* amount of memory in recycle storage */
78 	size_t		recycle_size;
79 };
80 
81 
82 static region_type *
83 alloc_region_base(void *(*allocator)(size_t size),
84 		  void (*deallocator)(void *),
85 		  size_t initial_cleanup_count)
86 {
87 	region_type *result = (region_type *) allocator(sizeof(region_type));
88 	if (!result) return NULL;
89 
90 	result->total_allocated = 0;
91 	result->small_objects = 0;
92 	result->large_objects = 0;
93 	result->chunk_count = 1;
94 	result->unused_space = 0;
95 	result->recycle_bin = NULL;
96 	result->recycle_size = 0;
97 	result->large_list = NULL;
98 
99 	result->allocated = 0;
100 	result->data = NULL;
101 	result->initial_data = NULL;
102 
103 	result->allocator = allocator;
104 	result->deallocator = deallocator;
105 
106 	assert(initial_cleanup_count > 0);
107 	result->maximum_cleanup_count = initial_cleanup_count;
108 	result->cleanup_count = 0;
109 	result->cleanups = (cleanup_type *) allocator(
110 		result->maximum_cleanup_count * sizeof(cleanup_type));
111 	if (!result->cleanups) {
112 		deallocator(result);
113 		return NULL;
114 	}
115 
116 	result->chunk_size = DEFAULT_CHUNK_SIZE;
117 	result->large_object_size = DEFAULT_LARGE_OBJECT_SIZE;
118 	return result;
119 }
120 
121 region_type *
122 region_create(void *(*allocator)(size_t size),
123 	      void (*deallocator)(void *))
124 {
125 	region_type* result = alloc_region_base(allocator, deallocator,
126 		DEFAULT_INITIAL_CLEANUP_SIZE);
127 	if(!result)
128 		return NULL;
129 	result->data = (char *) allocator(result->chunk_size);
130 	if (!result->data) {
131 		deallocator(result->cleanups);
132 		deallocator(result);
133 		return NULL;
134 	}
135 	result->initial_data = result->data;
136 
137 	return result;
138 }
139 
140 
141 region_type *region_create_custom(void *(*allocator)(size_t),
142 				  void (*deallocator)(void *),
143 				  size_t chunk_size,
144 				  size_t large_object_size,
145 				  size_t initial_cleanup_size,
146 				  int recycle)
147 {
148 	region_type* result = alloc_region_base(allocator, deallocator,
149 		initial_cleanup_size);
150 	if(!result)
151 		return NULL;
152 	assert(large_object_size <= chunk_size);
153 	result->chunk_size = chunk_size;
154 	result->large_object_size = large_object_size;
155 	if(result->chunk_size > 0) {
156 		result->data = (char *) allocator(result->chunk_size);
157 		if (!result->data) {
158 			deallocator(result->cleanups);
159 			deallocator(result);
160 			return NULL;
161 		}
162 		result->initial_data = result->data;
163 	}
164 	if(recycle) {
165 		result->recycle_bin = allocator(sizeof(struct recycle_elem*)
166 			* result->large_object_size);
167 		if(!result->recycle_bin) {
168 			region_destroy(result);
169 			return NULL;
170 		}
171 		memset(result->recycle_bin, 0, sizeof(struct recycle_elem*)
172 			* result->large_object_size);
173 	}
174 	return result;
175 }
176 
177 
178 void
179 region_destroy(region_type *region)
180 {
181 	void (*deallocator)(void *);
182 	if (!region)
183 		return;
184 
185 	deallocator = region->deallocator;
186 
187 	region_free_all(region);
188 	deallocator(region->cleanups);
189 	deallocator(region->initial_data);
190 	if(region->recycle_bin)
191 		deallocator(region->recycle_bin);
192 	if(region->large_list) {
193 		struct large_elem* p = region->large_list, *np;
194 		while(p) {
195 			np = p->next;
196 			deallocator(p);
197 			p = np;
198 		}
199 	}
200 	deallocator(region);
201 }
202 
203 
204 size_t
205 region_add_cleanup(region_type *region, void (*action)(void *), void *data)
206 {
207 	assert(action);
208 
209 	if (region->cleanup_count >= region->maximum_cleanup_count) {
210 		cleanup_type *cleanups = (cleanup_type *) region->allocator(
211 			2 * region->maximum_cleanup_count * sizeof(cleanup_type));
212 		if (!cleanups)
213 			return 0;
214 
215 		memcpy(cleanups, region->cleanups,
216 		       region->cleanup_count * sizeof(cleanup_type));
217 		region->deallocator(region->cleanups);
218 
219 		region->cleanups = cleanups;
220 		region->maximum_cleanup_count *= 2;
221 	}
222 
223 	region->cleanups[region->cleanup_count].action = action;
224 	region->cleanups[region->cleanup_count].data = data;
225 
226 	++region->cleanup_count;
227 	return region->cleanup_count;
228 }
229 
230 void
231 region_remove_cleanup(region_type *region, void (*action)(void *), void *data)
232 {
233 	size_t i;
234 	for(i=0; i<region->cleanup_count; i++) {
235 		if(region->cleanups[i].action == action &&
236 		   region->cleanups[i].data == data) {
237 			region->cleanup_count--;
238 			region->cleanups[i] =
239 				region->cleanups[region->cleanup_count];
240 			return;
241 		}
242 	}
243 }
244 
245 void *
246 region_alloc(region_type *region, size_t size)
247 {
248 	size_t aligned_size;
249 	void *result;
250 
251 	if (size == 0) {
252 		size = 1;
253 	}
254 	aligned_size = REGION_ALIGN_UP(size, ALIGNMENT);
255 
256 	if (aligned_size >= region->large_object_size) {
257 		result = region->allocator(size + sizeof(struct large_elem));
258 		if (!result)
259 			return NULL;
260 		((struct large_elem*)result)->prev = NULL;
261 		((struct large_elem*)result)->next = region->large_list;
262 		if(region->large_list)
263 			region->large_list->prev = (struct large_elem*)result;
264 		region->large_list = (struct large_elem*)result;
265 
266 		region->total_allocated += size;
267 		++region->large_objects;
268 
269 		return result + sizeof(struct large_elem);
270 	}
271 
272 	if (region->recycle_bin && region->recycle_bin[aligned_size]) {
273 		result = (void*)region->recycle_bin[aligned_size];
274 		region->recycle_bin[aligned_size] = region->recycle_bin[aligned_size]->next;
275 		region->recycle_size -= aligned_size;
276 		region->unused_space += aligned_size - size;
277 		return result;
278 	}
279 
280 	if (region->allocated + aligned_size > region->chunk_size) {
281 		void *chunk = region->allocator(region->chunk_size);
282 		size_t wasted;
283 		if (!chunk)
284 			return NULL;
285 
286 		wasted = (region->chunk_size - region->allocated) & (~(ALIGNMENT-1));
287 		if(wasted >= ALIGNMENT) {
288 			/* put wasted part in recycle bin for later use */
289 			region->total_allocated += wasted;
290 			++region->small_objects;
291 			region_recycle(region, region->data+region->allocated, wasted);
292 			region->allocated += wasted;
293 		}
294 		++region->chunk_count;
295 		region->unused_space += region->chunk_size - region->allocated;
296 
297 		if(!region_add_cleanup(region, region->deallocator, chunk)) {
298 			region->deallocator(chunk);
299 			region->chunk_count--;
300 			region->unused_space -=
301                                 region->chunk_size - region->allocated;
302 			return NULL;
303 		}
304 		region->allocated = 0;
305 		region->data = (char *) chunk;
306 	}
307 
308 	result = region->data + region->allocated;
309 	region->allocated += aligned_size;
310 
311 	region->total_allocated += aligned_size;
312 	region->unused_space += aligned_size - size;
313 	++region->small_objects;
314 
315 	return result;
316 }
317 
318 void *
319 region_alloc_init(region_type *region, const void *init, size_t size)
320 {
321 	void *result = region_alloc(region, size);
322 	if (!result) return NULL;
323 	memcpy(result, init, size);
324 	return result;
325 }
326 
327 void *
328 region_alloc_zero(region_type *region, size_t size)
329 {
330 	void *result = region_alloc(region, size);
331 	if (!result) return NULL;
332 	memset(result, 0, size);
333 	return result;
334 }
335 
336 void *
337 region_alloc_array_init(region_type *region, const void *init, size_t num,
338 	size_t size)
339 {
340 	if((num >= REGION_NO_OVERFLOW || size >= REGION_NO_OVERFLOW) &&
341 		num > 0 && SIZE_MAX / num < size) {
342 		log_msg(LOG_ERR, "region_alloc_array_init failed because of integer overflow");
343 		exit(1);
344 	}
345 	return region_alloc_init(region, init, num*size);
346 }
347 
348 void *
349 region_alloc_array_zero(region_type *region, size_t num, size_t size)
350 {
351 	if((num >= REGION_NO_OVERFLOW || size >= REGION_NO_OVERFLOW) &&
352 		num > 0 && SIZE_MAX / num < size) {
353 		log_msg(LOG_ERR, "region_alloc_array_zero failed because of integer overflow");
354 		exit(1);
355 	}
356 	return region_alloc_zero(region, num*size);
357 }
358 
359 void *
360 region_alloc_array(region_type *region, size_t num, size_t size)
361 {
362 	if((num >= REGION_NO_OVERFLOW || size >= REGION_NO_OVERFLOW) &&
363 		num > 0 && SIZE_MAX / num < size) {
364 		log_msg(LOG_ERR, "region_alloc_array failed because of integer overflow");
365 		exit(1);
366 	}
367 	return region_alloc(region, num*size);
368 }
369 
370 void
371 region_free_all(region_type *region)
372 {
373 	size_t i;
374 	assert(region);
375 	assert(region->cleanups);
376 
377 	i = region->cleanup_count;
378 	while (i > 0) {
379 		--i;
380 		assert(region->cleanups[i].action);
381 		region->cleanups[i].action(region->cleanups[i].data);
382 	}
383 
384 	if(region->recycle_bin) {
385 		memset(region->recycle_bin, 0, sizeof(struct recycle_elem*)
386 			* region->large_object_size);
387 		region->recycle_size = 0;
388 	}
389 
390 	if(region->large_list) {
391 		struct large_elem* p = region->large_list, *np;
392 		void (*deallocator)(void *) = region->deallocator;
393 		while(p) {
394 			np = p->next;
395 			deallocator(p);
396 			p = np;
397 		}
398 		region->large_list = NULL;
399 	}
400 
401 	region->data = region->initial_data;
402 	region->cleanup_count = 0;
403 	region->allocated = 0;
404 
405 	region->total_allocated = 0;
406 	region->small_objects = 0;
407 	region->large_objects = 0;
408 	region->chunk_count = 1;
409 	region->unused_space = 0;
410 }
411 
412 
413 char *
414 region_strdup(region_type *region, const char *string)
415 {
416 	return (char *) region_alloc_init(region, string, strlen(string) + 1);
417 }
418 
419 void
420 region_recycle(region_type *region, void *block, size_t size)
421 {
422 	size_t aligned_size;
423 
424 	if(!block || !region->recycle_bin)
425 		return;
426 
427 	if (size == 0) {
428 		size = 1;
429 	}
430 	aligned_size = REGION_ALIGN_UP(size, ALIGNMENT);
431 
432 	if(aligned_size < region->large_object_size) {
433 		struct recycle_elem* elem = (struct recycle_elem*)block;
434 		/* we rely on the fact that ALIGNMENT is void* so the next will fit */
435 		assert(aligned_size >= sizeof(struct recycle_elem));
436 
437 #ifdef CHECK_DOUBLE_FREE
438 		if(CHECK_DOUBLE_FREE) {
439 			/* make sure the same ptr is not freed twice. */
440 			struct recycle_elem *p = region->recycle_bin[aligned_size];
441 			while(p) {
442 				assert(p != elem);
443 				p = p->next;
444 			}
445 		}
446 #endif
447 
448 		elem->next = region->recycle_bin[aligned_size];
449 		region->recycle_bin[aligned_size] = elem;
450 		region->recycle_size += aligned_size;
451 		region->unused_space -= aligned_size - size;
452 		return;
453 	} else {
454 		struct large_elem* l;
455 
456 		/* a large allocation */
457 		region->total_allocated -= size;
458 		--region->large_objects;
459 
460 		l = (struct large_elem*)(block-sizeof(struct large_elem));
461 		if(l->prev)
462 			l->prev->next = l->next;
463 		else	region->large_list = l->next;
464 		if(l->next)
465 			l->next->prev = l->prev;
466 		region->deallocator(l);
467 	}
468 }
469 
470 void
471 region_dump_stats(region_type *region, FILE *out)
472 {
473 	fprintf(out, "%lu objects (%lu small/%lu large), %lu bytes allocated (%lu wasted) in %lu chunks, %lu cleanups, %lu in recyclebin",
474 		(unsigned long) (region->small_objects + region->large_objects),
475 		(unsigned long) region->small_objects,
476 		(unsigned long) region->large_objects,
477 		(unsigned long) region->total_allocated,
478 		(unsigned long) region->unused_space,
479 		(unsigned long) region->chunk_count,
480 		(unsigned long) region->cleanup_count,
481 		(unsigned long) region->recycle_size);
482 	if(1 && region->recycle_bin) {
483 		/* print details of the recycle bin */
484 		size_t i;
485 		for(i=0; i<region->large_object_size; i++) {
486 			size_t count = 0;
487 			struct recycle_elem* el = region->recycle_bin[i];
488 			while(el) {
489 				count++;
490 				el = el->next;
491 			}
492 			if(i%ALIGNMENT == 0 && i!=0)
493 				fprintf(out, " %lu", (unsigned long)count);
494 		}
495 	}
496 }
497 
498 size_t region_get_recycle_size(region_type* region)
499 {
500 	return region->recycle_size;
501 }
502 
503 size_t region_get_mem(region_type* region)
504 {
505 	return region->total_allocated;
506 }
507 
508 size_t region_get_mem_unused(region_type* region)
509 {
510 	return region->unused_space;
511 }
512 
513 /* debug routine */
514 void
515 region_log_stats(region_type *region)
516 {
517 	char buf[10240], *str=buf;
518 	int strl = sizeof(buf);
519 	int len;
520 	snprintf(str, strl, "%lu objects (%lu small/%lu large), %lu bytes allocated (%lu wasted) in %lu chunks, %lu cleanups, %lu in recyclebin",
521 		(unsigned long) (region->small_objects + region->large_objects),
522 		(unsigned long) region->small_objects,
523 		(unsigned long) region->large_objects,
524 		(unsigned long) region->total_allocated,
525 		(unsigned long) region->unused_space,
526 		(unsigned long) region->chunk_count,
527 		(unsigned long) region->cleanup_count,
528 		(unsigned long) region->recycle_size);
529 	len = strlen(str);
530 	str+=len;
531 	strl-=len;
532 	if(1 && region->recycle_bin) {
533 		/* print details of the recycle bin */
534 		size_t i;
535 		for(i=0; i<region->large_object_size; i++) {
536 			size_t count = 0;
537 			struct recycle_elem* el = region->recycle_bin[i];
538 			while(el) {
539 				count++;
540 				el = el->next;
541 			}
542 			if(i%ALIGNMENT == 0 && i!=0) {
543 				snprintf(str, strl, " %lu", (unsigned long)count);
544 				len = strlen(str);
545 				str+=len;
546 				strl-=len;
547 			}
548 		}
549 	}
550 	log_msg(LOG_INFO, "memory: %s", buf);
551 }
552